repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
devopshq/artifactory | artifactory.py | ArtifactoryPath.del_properties | def del_properties(self, properties, recursive=None):
"""
Delete properties listed in properties
properties - iterable contains the property names to delete. If it is an
str it will be casted to tuple.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior.
"""
return self._accessor.del_properties(self, properties, recursive) | python | def del_properties(self, properties, recursive=None):
"""
Delete properties listed in properties
properties - iterable contains the property names to delete. If it is an
str it will be casted to tuple.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior.
"""
return self._accessor.del_properties(self, properties, recursive) | [
"def",
"del_properties",
"(",
"self",
",",
"properties",
",",
"recursive",
"=",
"None",
")",
":",
"return",
"self",
".",
"_accessor",
".",
"del_properties",
"(",
"self",
",",
"properties",
",",
"recursive",
")"
] | Delete properties listed in properties
properties - iterable contains the property names to delete. If it is an
str it will be casted to tuple.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior. | [
"Delete",
"properties",
"listed",
"in",
"properties"
] | b9ec08cd72527d7d43159fe45c3a98a0b0838534 | https://github.com/devopshq/artifactory/blob/b9ec08cd72527d7d43159fe45c3a98a0b0838534/artifactory.py#L1329-L1338 | train | 198,600 |
devopshq/artifactory | artifactory.py | ArtifactoryPath.create_aql_text | def create_aql_text(*args):
"""
Create AQL querty from string or list or dict arguments
"""
aql_query_text = ""
for arg in args:
if isinstance(arg, dict):
arg = "({})".format(json.dumps(arg))
elif isinstance(arg, list):
arg = "({})".format(json.dumps(arg)).replace("[", "").replace("]", "")
aql_query_text += arg
return aql_query_text | python | def create_aql_text(*args):
"""
Create AQL querty from string or list or dict arguments
"""
aql_query_text = ""
for arg in args:
if isinstance(arg, dict):
arg = "({})".format(json.dumps(arg))
elif isinstance(arg, list):
arg = "({})".format(json.dumps(arg)).replace("[", "").replace("]", "")
aql_query_text += arg
return aql_query_text | [
"def",
"create_aql_text",
"(",
"*",
"args",
")",
":",
"aql_query_text",
"=",
"\"\"",
"for",
"arg",
"in",
"args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"arg",
"=",
"\"({})\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"arg",... | Create AQL querty from string or list or dict arguments | [
"Create",
"AQL",
"querty",
"from",
"string",
"or",
"list",
"or",
"dict",
"arguments"
] | b9ec08cd72527d7d43159fe45c3a98a0b0838534 | https://github.com/devopshq/artifactory/blob/b9ec08cd72527d7d43159fe45c3a98a0b0838534/artifactory.py#L1354-L1365 | train | 198,601 |
dsoprea/PyInotify | inotify/adapters.py | Inotify.remove_watch | def remove_watch(self, path, superficial=False):
"""Remove our tracking information and call inotify to stop watching
the given path. When a directory is removed, we'll just have to remove
our tracking since inotify already cleans-up the watch.
"""
wd = self.__watches.get(path)
if wd is None:
return
_LOGGER.debug("Removing watch for watch-handle (%d): [%s]",
wd, path)
del self.__watches[path]
self.remove_watch_with_id(wd) | python | def remove_watch(self, path, superficial=False):
"""Remove our tracking information and call inotify to stop watching
the given path. When a directory is removed, we'll just have to remove
our tracking since inotify already cleans-up the watch.
"""
wd = self.__watches.get(path)
if wd is None:
return
_LOGGER.debug("Removing watch for watch-handle (%d): [%s]",
wd, path)
del self.__watches[path]
self.remove_watch_with_id(wd) | [
"def",
"remove_watch",
"(",
"self",
",",
"path",
",",
"superficial",
"=",
"False",
")",
":",
"wd",
"=",
"self",
".",
"__watches",
".",
"get",
"(",
"path",
")",
"if",
"wd",
"is",
"None",
":",
"return",
"_LOGGER",
".",
"debug",
"(",
"\"Removing watch for... | Remove our tracking information and call inotify to stop watching
the given path. When a directory is removed, we'll just have to remove
our tracking since inotify already cleans-up the watch. | [
"Remove",
"our",
"tracking",
"information",
"and",
"call",
"inotify",
"to",
"stop",
"watching",
"the",
"given",
"path",
".",
"When",
"a",
"directory",
"is",
"removed",
"we",
"ll",
"just",
"have",
"to",
"remove",
"our",
"tracking",
"since",
"inotify",
"alread... | 288a2b01fd4a726dc68d960f8351630aee788a13 | https://github.com/dsoprea/PyInotify/blob/288a2b01fd4a726dc68d960f8351630aee788a13/inotify/adapters.py#L103-L118 | train | 198,602 |
dsoprea/PyInotify | inotify/adapters.py | Inotify._handle_inotify_event | def _handle_inotify_event(self, wd):
"""Handle a series of events coming-in from inotify."""
b = os.read(wd, 1024)
if not b:
return
self.__buffer += b
while 1:
length = len(self.__buffer)
if length < _STRUCT_HEADER_LENGTH:
_LOGGER.debug("Not enough bytes for a header.")
return
# We have, at least, a whole-header in the buffer.
peek_slice = self.__buffer[:_STRUCT_HEADER_LENGTH]
header_raw = struct.unpack(
_HEADER_STRUCT_FORMAT,
peek_slice)
header = _INOTIFY_EVENT(*header_raw)
type_names = self._get_event_names(header.mask)
_LOGGER.debug("Events received in stream: {}".format(type_names))
event_length = (_STRUCT_HEADER_LENGTH + header.len)
if length < event_length:
return
filename = self.__buffer[_STRUCT_HEADER_LENGTH:event_length]
# Our filename is 16-byte aligned and right-padded with NULs.
filename_bytes = filename.rstrip(b'\0')
self.__buffer = self.__buffer[event_length:]
path = self.__watches_r.get(header.wd)
if path is not None:
filename_unicode = filename_bytes.decode('utf8')
yield (header, type_names, path, filename_unicode)
buffer_length = len(self.__buffer)
if buffer_length < _STRUCT_HEADER_LENGTH:
break | python | def _handle_inotify_event(self, wd):
"""Handle a series of events coming-in from inotify."""
b = os.read(wd, 1024)
if not b:
return
self.__buffer += b
while 1:
length = len(self.__buffer)
if length < _STRUCT_HEADER_LENGTH:
_LOGGER.debug("Not enough bytes for a header.")
return
# We have, at least, a whole-header in the buffer.
peek_slice = self.__buffer[:_STRUCT_HEADER_LENGTH]
header_raw = struct.unpack(
_HEADER_STRUCT_FORMAT,
peek_slice)
header = _INOTIFY_EVENT(*header_raw)
type_names = self._get_event_names(header.mask)
_LOGGER.debug("Events received in stream: {}".format(type_names))
event_length = (_STRUCT_HEADER_LENGTH + header.len)
if length < event_length:
return
filename = self.__buffer[_STRUCT_HEADER_LENGTH:event_length]
# Our filename is 16-byte aligned and right-padded with NULs.
filename_bytes = filename.rstrip(b'\0')
self.__buffer = self.__buffer[event_length:]
path = self.__watches_r.get(header.wd)
if path is not None:
filename_unicode = filename_bytes.decode('utf8')
yield (header, type_names, path, filename_unicode)
buffer_length = len(self.__buffer)
if buffer_length < _STRUCT_HEADER_LENGTH:
break | [
"def",
"_handle_inotify_event",
"(",
"self",
",",
"wd",
")",
":",
"b",
"=",
"os",
".",
"read",
"(",
"wd",
",",
"1024",
")",
"if",
"not",
"b",
":",
"return",
"self",
".",
"__buffer",
"+=",
"b",
"while",
"1",
":",
"length",
"=",
"len",
"(",
"self",... | Handle a series of events coming-in from inotify. | [
"Handle",
"a",
"series",
"of",
"events",
"coming",
"-",
"in",
"from",
"inotify",
"."
] | 288a2b01fd4a726dc68d960f8351630aee788a13 | https://github.com/dsoprea/PyInotify/blob/288a2b01fd4a726dc68d960f8351630aee788a13/inotify/adapters.py#L143-L189 | train | 198,603 |
dsoprea/PyInotify | inotify/adapters.py | Inotify.event_gen | def event_gen(
self, timeout_s=None, yield_nones=True, filter_predicate=None,
terminal_events=_DEFAULT_TERMINAL_EVENTS):
"""Yield one event after another. If `timeout_s` is provided, we'll
break when no event is received for that many seconds.
"""
# We will either return due to the optional filter or because of a
# timeout. The former will always set this. The latter will never set
# this.
self.__last_success_return = None
last_hit_s = time.time()
while True:
block_duration_s = self.__get_block_duration()
# Poll, but manage signal-related errors.
try:
events = self.__epoll.poll(block_duration_s)
except IOError as e:
if e.errno != EINTR:
raise
if timeout_s is not None:
time_since_event_s = time.time() - last_hit_s
if time_since_event_s > timeout_s:
break
continue
# Process events.
for fd, event_type in events:
# (fd) looks to always match the inotify FD.
names = self._get_event_names(event_type)
_LOGGER.debug("Events received from epoll: {}".format(names))
for (header, type_names, path, filename) \
in self._handle_inotify_event(fd):
last_hit_s = time.time()
e = (header, type_names, path, filename)
for type_name in type_names:
if filter_predicate is not None and \
filter_predicate(type_name, e) is False:
self.__last_success_return = (type_name, e)
return
elif type_name in terminal_events:
raise TerminalEventException(type_name, e)
yield e
if timeout_s is not None:
time_since_event_s = time.time() - last_hit_s
if time_since_event_s > timeout_s:
break
if yield_nones is True:
yield None | python | def event_gen(
self, timeout_s=None, yield_nones=True, filter_predicate=None,
terminal_events=_DEFAULT_TERMINAL_EVENTS):
"""Yield one event after another. If `timeout_s` is provided, we'll
break when no event is received for that many seconds.
"""
# We will either return due to the optional filter or because of a
# timeout. The former will always set this. The latter will never set
# this.
self.__last_success_return = None
last_hit_s = time.time()
while True:
block_duration_s = self.__get_block_duration()
# Poll, but manage signal-related errors.
try:
events = self.__epoll.poll(block_duration_s)
except IOError as e:
if e.errno != EINTR:
raise
if timeout_s is not None:
time_since_event_s = time.time() - last_hit_s
if time_since_event_s > timeout_s:
break
continue
# Process events.
for fd, event_type in events:
# (fd) looks to always match the inotify FD.
names = self._get_event_names(event_type)
_LOGGER.debug("Events received from epoll: {}".format(names))
for (header, type_names, path, filename) \
in self._handle_inotify_event(fd):
last_hit_s = time.time()
e = (header, type_names, path, filename)
for type_name in type_names:
if filter_predicate is not None and \
filter_predicate(type_name, e) is False:
self.__last_success_return = (type_name, e)
return
elif type_name in terminal_events:
raise TerminalEventException(type_name, e)
yield e
if timeout_s is not None:
time_since_event_s = time.time() - last_hit_s
if time_since_event_s > timeout_s:
break
if yield_nones is True:
yield None | [
"def",
"event_gen",
"(",
"self",
",",
"timeout_s",
"=",
"None",
",",
"yield_nones",
"=",
"True",
",",
"filter_predicate",
"=",
"None",
",",
"terminal_events",
"=",
"_DEFAULT_TERMINAL_EVENTS",
")",
":",
"# We will either return due to the optional filter or because of a",
... | Yield one event after another. If `timeout_s` is provided, we'll
break when no event is received for that many seconds. | [
"Yield",
"one",
"event",
"after",
"another",
".",
"If",
"timeout_s",
"is",
"provided",
"we",
"ll",
"break",
"when",
"no",
"event",
"is",
"received",
"for",
"that",
"many",
"seconds",
"."
] | 288a2b01fd4a726dc68d960f8351630aee788a13 | https://github.com/dsoprea/PyInotify/blob/288a2b01fd4a726dc68d960f8351630aee788a13/inotify/adapters.py#L191-L251 | train | 198,604 |
aliyun/aliyun-log-python-sdk | aliyun/log/logclient_operator.py | copy_project | def copy_project(from_client, to_client, from_project, to_project, copy_machine_group=False):
"""
copy project, logstore, machine group and logtail config to target project,
will create the target project if it doesn't exist
:type from_client: LogClient
:param from_client: logclient instance
:type to_client: LogClient
:param to_client: logclient instance
:type from_project: string
:param from_project: project name
:type to_project: string
:param to_project: project name
:type copy_machine_group: bool
:param copy_machine_group: if copy machine group resources, False by default.
:return:
"""
# copy project
ret = from_client.get_project(from_project)
try:
ret = to_client.create_project(to_project, ret.get_description())
except LogException as ex:
if ex.get_error_code() == 'ProjectAlreadyExist':
# don't create the project as it already exists
pass
else:
raise
default_fetch_size = 100
# list logstore and copy them
offset, size = 0, default_fetch_size
while True:
ret = from_client.list_logstore(from_project, offset=offset, size=size)
count = ret.get_logstores_count()
total = ret.get_logstores_total()
for logstore_name in ret.get_logstores():
# copy logstore
ret = from_client.get_logstore(from_project, logstore_name)
res_shard = from_client.list_shards(from_project, logstore_name)
expected_rwshard_count = len([shard for shard in res_shard.shards if shard['status'].lower() == 'readwrite'])
ret = to_client.create_logstore(to_project, logstore_name, ret.get_ttl(),
min(expected_rwshard_count, MAX_INIT_SHARD_COUNT),
enable_tracking=ret.get_enable_tracking(),
append_meta=ret.append_meta,
auto_split=ret.auto_split,
max_split_shard=ret.max_split_shard,
preserve_storage=ret.preserve_storage
)
# copy index
try:
ret = from_client.get_index_config(from_project, logstore_name)
ret = to_client.create_index(to_project, logstore_name, ret.get_index_config())
except LogException as ex:
if ex.get_error_code() == 'IndexConfigNotExist':
pass
else:
raise
offset += count
if count < size or offset >= total:
break
# list logtail config and copy them
offset, size = 0, default_fetch_size
while True:
ret = from_client.list_logtail_config(from_project, offset=offset, size=size)
count = ret.get_configs_count()
total = ret.get_configs_total()
for config_name in ret.get_configs():
ret = from_client.get_logtail_config(from_project, config_name)
ret = to_client.create_logtail_config(to_project, ret.logtail_config)
offset += count
if count < size or offset >= total:
break
# list machine group and copy them
offset, size = 0, default_fetch_size
while copy_machine_group:
ret = from_client.list_machine_group(from_project, offset=offset, size=size)
count = ret.get_machine_group_count()
total = ret.get_machine_group_total()
for group_name in ret.get_machine_group():
ret = from_client.get_machine_group(from_project, group_name)
ret = to_client.create_machine_group(to_project, ret.get_machine_group())
# list all applied config and copy the relationship
ret = from_client.get_machine_group_applied_configs(from_project, group_name)
for config_name in ret.get_configs():
to_client.apply_config_to_machine_group(to_project, config_name, group_name)
offset += count
if count < size or offset >= total:
break | python | def copy_project(from_client, to_client, from_project, to_project, copy_machine_group=False):
"""
copy project, logstore, machine group and logtail config to target project,
will create the target project if it doesn't exist
:type from_client: LogClient
:param from_client: logclient instance
:type to_client: LogClient
:param to_client: logclient instance
:type from_project: string
:param from_project: project name
:type to_project: string
:param to_project: project name
:type copy_machine_group: bool
:param copy_machine_group: if copy machine group resources, False by default.
:return:
"""
# copy project
ret = from_client.get_project(from_project)
try:
ret = to_client.create_project(to_project, ret.get_description())
except LogException as ex:
if ex.get_error_code() == 'ProjectAlreadyExist':
# don't create the project as it already exists
pass
else:
raise
default_fetch_size = 100
# list logstore and copy them
offset, size = 0, default_fetch_size
while True:
ret = from_client.list_logstore(from_project, offset=offset, size=size)
count = ret.get_logstores_count()
total = ret.get_logstores_total()
for logstore_name in ret.get_logstores():
# copy logstore
ret = from_client.get_logstore(from_project, logstore_name)
res_shard = from_client.list_shards(from_project, logstore_name)
expected_rwshard_count = len([shard for shard in res_shard.shards if shard['status'].lower() == 'readwrite'])
ret = to_client.create_logstore(to_project, logstore_name, ret.get_ttl(),
min(expected_rwshard_count, MAX_INIT_SHARD_COUNT),
enable_tracking=ret.get_enable_tracking(),
append_meta=ret.append_meta,
auto_split=ret.auto_split,
max_split_shard=ret.max_split_shard,
preserve_storage=ret.preserve_storage
)
# copy index
try:
ret = from_client.get_index_config(from_project, logstore_name)
ret = to_client.create_index(to_project, logstore_name, ret.get_index_config())
except LogException as ex:
if ex.get_error_code() == 'IndexConfigNotExist':
pass
else:
raise
offset += count
if count < size or offset >= total:
break
# list logtail config and copy them
offset, size = 0, default_fetch_size
while True:
ret = from_client.list_logtail_config(from_project, offset=offset, size=size)
count = ret.get_configs_count()
total = ret.get_configs_total()
for config_name in ret.get_configs():
ret = from_client.get_logtail_config(from_project, config_name)
ret = to_client.create_logtail_config(to_project, ret.logtail_config)
offset += count
if count < size or offset >= total:
break
# list machine group and copy them
offset, size = 0, default_fetch_size
while copy_machine_group:
ret = from_client.list_machine_group(from_project, offset=offset, size=size)
count = ret.get_machine_group_count()
total = ret.get_machine_group_total()
for group_name in ret.get_machine_group():
ret = from_client.get_machine_group(from_project, group_name)
ret = to_client.create_machine_group(to_project, ret.get_machine_group())
# list all applied config and copy the relationship
ret = from_client.get_machine_group_applied_configs(from_project, group_name)
for config_name in ret.get_configs():
to_client.apply_config_to_machine_group(to_project, config_name, group_name)
offset += count
if count < size or offset >= total:
break | [
"def",
"copy_project",
"(",
"from_client",
",",
"to_client",
",",
"from_project",
",",
"to_project",
",",
"copy_machine_group",
"=",
"False",
")",
":",
"# copy project",
"ret",
"=",
"from_client",
".",
"get_project",
"(",
"from_project",
")",
"try",
":",
"ret",
... | copy project, logstore, machine group and logtail config to target project,
will create the target project if it doesn't exist
:type from_client: LogClient
:param from_client: logclient instance
:type to_client: LogClient
:param to_client: logclient instance
:type from_project: string
:param from_project: project name
:type to_project: string
:param to_project: project name
:type copy_machine_group: bool
:param copy_machine_group: if copy machine group resources, False by default.
:return: | [
"copy",
"project",
"logstore",
"machine",
"group",
"and",
"logtail",
"config",
"to",
"target",
"project",
"will",
"create",
"the",
"target",
"project",
"if",
"it",
"doesn",
"t",
"exist"
] | ac383db0a16abf1e5ef7df36074374184b43516e | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient_operator.py#L26-L129 | train | 198,605 |
aliyun/aliyun-log-python-sdk | aliyun/log/logclient_operator.py | copy_logstore | def copy_logstore(from_client, from_project, from_logstore, to_logstore, to_project=None, to_client=None):
"""
copy logstore, index, logtail config to target logstore, machine group are not included yet.
the target logstore will be crated if not existing
:type from_client: LogClient
:param from_client: logclient instance
:type from_project: string
:param from_project: project name
:type from_logstore: string
:param from_logstore: logstore name
:type to_logstore: string
:param to_logstore: target logstore name
:type to_project: string
:param to_project: project name, copy to same project if not being specified, will try to create it if not being specified
:type to_client: LogClient
:param to_client: logclient instance, use it to operate on the "to_project" if being specified
:return:
"""
# check client
if to_project is not None:
# copy to a different project in different client
to_client = to_client or from_client
# check if target project exists or not
ret = from_client.get_project(from_project)
try:
ret = to_client.create_project(to_project, ret.get_description())
except LogException as ex:
if ex.get_error_code() == 'ProjectAlreadyExist':
# don't create the project as it already exists
pass
else:
raise
to_project = to_project or from_project
to_client = to_client or from_client
# return if logstore are the same one
if from_client is to_client and from_project == to_project and from_logstore == to_logstore:
return
# copy logstore
ret = from_client.get_logstore(from_project, from_logstore)
res_shard = from_client.list_shards(from_project, from_logstore)
expected_rwshard_count = len([shard for shard in res_shard.shards if shard['status'].lower() == 'readwrite'])
try:
ret = to_client.create_logstore(to_project, to_logstore,
ttl=ret.get_ttl(),
shard_count=min(expected_rwshard_count, MAX_INIT_SHARD_COUNT),
enable_tracking=ret.get_enable_tracking(),
append_meta=ret.append_meta,
auto_split=ret.auto_split,
max_split_shard=ret.max_split_shard,
preserve_storage=ret.preserve_storage)
except LogException as ex:
if ex.get_error_code() == 'LogStoreAlreadyExist':
# update logstore's settings
ret = to_client.update_logstore(to_project, to_logstore,
ttl=ret.get_ttl(),
enable_tracking=ret.get_enable_tracking(),
append_meta=ret.append_meta,
auto_split=ret.auto_split,
max_split_shard=ret.max_split_shard,
preserve_storage=ret.preserve_storage
)
# arrange shard to expected count
res = arrange_shard(to_client, to_project, to_logstore, min(expected_rwshard_count, MAX_INIT_SHARD_COUNT))
else:
raise
# copy index
try:
ret = from_client.get_index_config(from_project, from_logstore)
ret = to_client.create_index(to_project, to_logstore, ret.get_index_config())
except LogException as ex:
if ex.get_error_code() == 'IndexConfigNotExist':
# source has no index
pass
elif ex.get_error_code() == 'IndexAlreadyExist':
# target already has index, overwrite it
ret = to_client.update_index(to_project, to_logstore, ret.get_index_config())
pass
else:
raise
# list logtail config linked to the logstore and copy them
default_fetch_size = 100
offset, size = 0, default_fetch_size
while True:
ret = from_client.list_logtail_config(from_project, offset=offset, size=size)
count = ret.get_configs_count()
total = ret.get_configs_total()
for config_name in ret.get_configs():
ret = from_client.get_logtail_config(from_project, config_name)
config = ret.logtail_config
if config.logstore_name != from_logstore:
continue
config.config_name = to_logstore + '_' + config_name
config.logstore_name = to_logstore
ret = to_client.create_logtail_config(to_project, config)
offset += count
if count < size or offset >= total:
break | python | def copy_logstore(from_client, from_project, from_logstore, to_logstore, to_project=None, to_client=None):
"""
copy logstore, index, logtail config to target logstore, machine group are not included yet.
the target logstore will be crated if not existing
:type from_client: LogClient
:param from_client: logclient instance
:type from_project: string
:param from_project: project name
:type from_logstore: string
:param from_logstore: logstore name
:type to_logstore: string
:param to_logstore: target logstore name
:type to_project: string
:param to_project: project name, copy to same project if not being specified, will try to create it if not being specified
:type to_client: LogClient
:param to_client: logclient instance, use it to operate on the "to_project" if being specified
:return:
"""
# check client
if to_project is not None:
# copy to a different project in different client
to_client = to_client or from_client
# check if target project exists or not
ret = from_client.get_project(from_project)
try:
ret = to_client.create_project(to_project, ret.get_description())
except LogException as ex:
if ex.get_error_code() == 'ProjectAlreadyExist':
# don't create the project as it already exists
pass
else:
raise
to_project = to_project or from_project
to_client = to_client or from_client
# return if logstore are the same one
if from_client is to_client and from_project == to_project and from_logstore == to_logstore:
return
# copy logstore
ret = from_client.get_logstore(from_project, from_logstore)
res_shard = from_client.list_shards(from_project, from_logstore)
expected_rwshard_count = len([shard for shard in res_shard.shards if shard['status'].lower() == 'readwrite'])
try:
ret = to_client.create_logstore(to_project, to_logstore,
ttl=ret.get_ttl(),
shard_count=min(expected_rwshard_count, MAX_INIT_SHARD_COUNT),
enable_tracking=ret.get_enable_tracking(),
append_meta=ret.append_meta,
auto_split=ret.auto_split,
max_split_shard=ret.max_split_shard,
preserve_storage=ret.preserve_storage)
except LogException as ex:
if ex.get_error_code() == 'LogStoreAlreadyExist':
# update logstore's settings
ret = to_client.update_logstore(to_project, to_logstore,
ttl=ret.get_ttl(),
enable_tracking=ret.get_enable_tracking(),
append_meta=ret.append_meta,
auto_split=ret.auto_split,
max_split_shard=ret.max_split_shard,
preserve_storage=ret.preserve_storage
)
# arrange shard to expected count
res = arrange_shard(to_client, to_project, to_logstore, min(expected_rwshard_count, MAX_INIT_SHARD_COUNT))
else:
raise
# copy index
try:
ret = from_client.get_index_config(from_project, from_logstore)
ret = to_client.create_index(to_project, to_logstore, ret.get_index_config())
except LogException as ex:
if ex.get_error_code() == 'IndexConfigNotExist':
# source has no index
pass
elif ex.get_error_code() == 'IndexAlreadyExist':
# target already has index, overwrite it
ret = to_client.update_index(to_project, to_logstore, ret.get_index_config())
pass
else:
raise
# list logtail config linked to the logstore and copy them
default_fetch_size = 100
offset, size = 0, default_fetch_size
while True:
ret = from_client.list_logtail_config(from_project, offset=offset, size=size)
count = ret.get_configs_count()
total = ret.get_configs_total()
for config_name in ret.get_configs():
ret = from_client.get_logtail_config(from_project, config_name)
config = ret.logtail_config
if config.logstore_name != from_logstore:
continue
config.config_name = to_logstore + '_' + config_name
config.logstore_name = to_logstore
ret = to_client.create_logtail_config(to_project, config)
offset += count
if count < size or offset >= total:
break | [
"def",
"copy_logstore",
"(",
"from_client",
",",
"from_project",
",",
"from_logstore",
",",
"to_logstore",
",",
"to_project",
"=",
"None",
",",
"to_client",
"=",
"None",
")",
":",
"# check client",
"if",
"to_project",
"is",
"not",
"None",
":",
"# copy to a diffe... | copy logstore, index, logtail config to target logstore, machine group are not included yet.
the target logstore will be crated if not existing
:type from_client: LogClient
:param from_client: logclient instance
:type from_project: string
:param from_project: project name
:type from_logstore: string
:param from_logstore: logstore name
:type to_logstore: string
:param to_logstore: target logstore name
:type to_project: string
:param to_project: project name, copy to same project if not being specified, will try to create it if not being specified
:type to_client: LogClient
:param to_client: logclient instance, use it to operate on the "to_project" if being specified
:return: | [
"copy",
"logstore",
"index",
"logtail",
"config",
"to",
"target",
"logstore",
"machine",
"group",
"are",
"not",
"included",
"yet",
".",
"the",
"target",
"logstore",
"will",
"be",
"crated",
"if",
"not",
"existing"
] | ac383db0a16abf1e5ef7df36074374184b43516e | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient_operator.py#L132-L247 | train | 198,606 |
aliyun/aliyun-log-python-sdk | aliyun/log/logclient_operator.py | _split_one_shard_to_multiple | def _split_one_shard_to_multiple(client, project, logstore, shard_info, count, current_shard_count):
"""return new_rw_shards_list, increased_shard_count """
distance = shard_info['length'] // count
if distance <= 0 or count <= 1:
return [shard_info['info']], 0
rw_shards, increased_shard_count = {shard_info['id']: shard_info['info']}, 0
for x in range(1, count):
new_hash = shard_info['start'] + distance * x
new_hash = hex(new_hash)[2:].strip('lL')
new_hash = '0' * (TOTAL_HASH_LENGTH - len(new_hash)) + new_hash
try:
if x == 1:
res = client.split_shard(project, logstore, shard_info['id'], new_hash)
else:
res = client.split_shard(project, logstore, current_shard_count - 1, new_hash)
# new rw_shards
for shard in res.shards:
if shard['status'] == 'readonly':
del rw_shards[shard['shardID']]
else:
rw_shards[shard['shardID']] = shard
current_shard_count += res.count - 1
increased_shard_count += res.count - 1
logger.info("split shard: project={0}, logstore={1}, shard_info={2}, count={3}, current_shard_count={4}".format(project, logstore, shard_info, count, current_shard_count))
except Exception as ex:
print(ex)
print(x, project, logstore, shard_info, count, current_shard_count)
raise
return rw_shards.values(), increased_shard_count | python | def _split_one_shard_to_multiple(client, project, logstore, shard_info, count, current_shard_count):
"""return new_rw_shards_list, increased_shard_count """
distance = shard_info['length'] // count
if distance <= 0 or count <= 1:
return [shard_info['info']], 0
rw_shards, increased_shard_count = {shard_info['id']: shard_info['info']}, 0
for x in range(1, count):
new_hash = shard_info['start'] + distance * x
new_hash = hex(new_hash)[2:].strip('lL')
new_hash = '0' * (TOTAL_HASH_LENGTH - len(new_hash)) + new_hash
try:
if x == 1:
res = client.split_shard(project, logstore, shard_info['id'], new_hash)
else:
res = client.split_shard(project, logstore, current_shard_count - 1, new_hash)
# new rw_shards
for shard in res.shards:
if shard['status'] == 'readonly':
del rw_shards[shard['shardID']]
else:
rw_shards[shard['shardID']] = shard
current_shard_count += res.count - 1
increased_shard_count += res.count - 1
logger.info("split shard: project={0}, logstore={1}, shard_info={2}, count={3}, current_shard_count={4}".format(project, logstore, shard_info, count, current_shard_count))
except Exception as ex:
print(ex)
print(x, project, logstore, shard_info, count, current_shard_count)
raise
return rw_shards.values(), increased_shard_count | [
"def",
"_split_one_shard_to_multiple",
"(",
"client",
",",
"project",
",",
"logstore",
",",
"shard_info",
",",
"count",
",",
"current_shard_count",
")",
":",
"distance",
"=",
"shard_info",
"[",
"'length'",
"]",
"//",
"count",
"if",
"distance",
"<=",
"0",
"or",... | return new_rw_shards_list, increased_shard_count | [
"return",
"new_rw_shards_list",
"increased_shard_count"
] | ac383db0a16abf1e5ef7df36074374184b43516e | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient_operator.py#L505-L537 | train | 198,607 |
aliyun/aliyun-log-python-sdk | aliyun/log/util.py | Util.get_host_ip | def get_host_ip(logHost):
""" If it is not match your local ip, you should fill the PutLogsRequest
parameter source by yourself.
"""
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((logHost, 80))
ip = s.getsockname()[0]
return ip
except Exception:
return '127.0.0.1'
finally:
if s:
s.close() | python | def get_host_ip(logHost):
""" If it is not match your local ip, you should fill the PutLogsRequest
parameter source by yourself.
"""
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((logHost, 80))
ip = s.getsockname()[0]
return ip
except Exception:
return '127.0.0.1'
finally:
if s:
s.close() | [
"def",
"get_host_ip",
"(",
"logHost",
")",
":",
"s",
"=",
"None",
"try",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"s",
".",
"connect",
"(",
"(",
"logHost",
",",
"80",
")",
")",
... | If it is not match your local ip, you should fill the PutLogsRequest
parameter source by yourself. | [
"If",
"it",
"is",
"not",
"match",
"your",
"local",
"ip",
"you",
"should",
"fill",
"the",
"PutLogsRequest",
"parameter",
"source",
"by",
"yourself",
"."
] | ac383db0a16abf1e5ef7df36074374184b43516e | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/util.py#L52-L66 | train | 198,608 |
aliyun/aliyun-log-python-sdk | aliyun/log/logtail_config_detail.py | LogtailConfigHelper.generate_common_reg_log_config | def generate_common_reg_log_config(json_value):
"""Generate common logtail config from loaded json value
:param json_value:
:return:
"""
input_detail = copy.deepcopy(json_value['inputDetail'])
output_detail = json_value['outputDetail']
logSample = json_value.get('logSample', '')
config_name = json_value['configName']
logstore_name = output_detail['logstoreName']
endpoint = output_detail.get('endpoint', '')
log_path = input_detail['logPath']
file_pattern = input_detail['filePattern']
time_format = input_detail['timeFormat']
log_begin_regex = input_detail.get('logBeginRegex', '')
log_parse_regex = input_detail.get('regex', '')
reg_keys = input_detail['key']
topic_format = input_detail['topicFormat']
filter_keys = input_detail['filterKey']
filter_keys_reg = input_detail['filterRegex']
log_type = input_detail.get('logType')
for item in ('logPath', 'filePattern', 'timeFormat', 'logBeginRegex', 'regex', 'key',
'topicFormat', 'filterKey', 'filterRegex', 'logType'):
if item in input_detail:
del input_detail[item]
config = CommonRegLogConfigDetail(config_name, logstore_name, endpoint, log_path, file_pattern, time_format,
log_begin_regex, log_parse_regex, reg_keys,
topic_format, filter_keys, filter_keys_reg, logSample,
log_type, **input_detail)
return config | python | def generate_common_reg_log_config(json_value):
"""Generate common logtail config from loaded json value
:param json_value:
:return:
"""
input_detail = copy.deepcopy(json_value['inputDetail'])
output_detail = json_value['outputDetail']
logSample = json_value.get('logSample', '')
config_name = json_value['configName']
logstore_name = output_detail['logstoreName']
endpoint = output_detail.get('endpoint', '')
log_path = input_detail['logPath']
file_pattern = input_detail['filePattern']
time_format = input_detail['timeFormat']
log_begin_regex = input_detail.get('logBeginRegex', '')
log_parse_regex = input_detail.get('regex', '')
reg_keys = input_detail['key']
topic_format = input_detail['topicFormat']
filter_keys = input_detail['filterKey']
filter_keys_reg = input_detail['filterRegex']
log_type = input_detail.get('logType')
for item in ('logPath', 'filePattern', 'timeFormat', 'logBeginRegex', 'regex', 'key',
'topicFormat', 'filterKey', 'filterRegex', 'logType'):
if item in input_detail:
del input_detail[item]
config = CommonRegLogConfigDetail(config_name, logstore_name, endpoint, log_path, file_pattern, time_format,
log_begin_regex, log_parse_regex, reg_keys,
topic_format, filter_keys, filter_keys_reg, logSample,
log_type, **input_detail)
return config | [
"def",
"generate_common_reg_log_config",
"(",
"json_value",
")",
":",
"input_detail",
"=",
"copy",
".",
"deepcopy",
"(",
"json_value",
"[",
"'inputDetail'",
"]",
")",
"output_detail",
"=",
"json_value",
"[",
"'outputDetail'",
"]",
"logSample",
"=",
"json_value",
"... | Generate common logtail config from loaded json value
:param json_value:
:return: | [
"Generate",
"common",
"logtail",
"config",
"from",
"loaded",
"json",
"value"
] | ac383db0a16abf1e5ef7df36074374184b43516e | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logtail_config_detail.py#L281-L314 | train | 198,609 |
aliyun/aliyun-log-python-sdk | aliyun/log/logtail_config_detail.py | LogtailConfigHelper.generate_apsara_log_config | def generate_apsara_log_config(json_value):
"""Generate apsara logtail config from loaded json value
:param json_value:
:return:
"""
input_detail = json_value['inputDetail']
output_detail = json_value['outputDetail']
config_name = json_value['configName']
logSample = json_value.get('logSample', '')
logstore_name = output_detail['logstoreName']
endpoint = output_detail.get('endpoint', '')
log_path = input_detail['logPath']
file_pattern = input_detail['filePattern']
log_begin_regex = input_detail.get('logBeginRegex', '')
topic_format = input_detail['topicFormat']
filter_keys = input_detail['filterKey']
filter_keys_reg = input_detail['filterRegex']
config = ApsaraLogConfigDetail(config_name, logstore_name, endpoint, log_path, file_pattern,
log_begin_regex, topic_format, filter_keys, filter_keys_reg, logSample)
return config | python | def generate_apsara_log_config(json_value):
"""Generate apsara logtail config from loaded json value
:param json_value:
:return:
"""
input_detail = json_value['inputDetail']
output_detail = json_value['outputDetail']
config_name = json_value['configName']
logSample = json_value.get('logSample', '')
logstore_name = output_detail['logstoreName']
endpoint = output_detail.get('endpoint', '')
log_path = input_detail['logPath']
file_pattern = input_detail['filePattern']
log_begin_regex = input_detail.get('logBeginRegex', '')
topic_format = input_detail['topicFormat']
filter_keys = input_detail['filterKey']
filter_keys_reg = input_detail['filterRegex']
config = ApsaraLogConfigDetail(config_name, logstore_name, endpoint, log_path, file_pattern,
log_begin_regex, topic_format, filter_keys, filter_keys_reg, logSample)
return config | [
"def",
"generate_apsara_log_config",
"(",
"json_value",
")",
":",
"input_detail",
"=",
"json_value",
"[",
"'inputDetail'",
"]",
"output_detail",
"=",
"json_value",
"[",
"'outputDetail'",
"]",
"config_name",
"=",
"json_value",
"[",
"'configName'",
"]",
"logSample",
"... | Generate apsara logtail config from loaded json value
:param json_value:
:return: | [
"Generate",
"apsara",
"logtail",
"config",
"from",
"loaded",
"json",
"value"
] | ac383db0a16abf1e5ef7df36074374184b43516e | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logtail_config_detail.py#L317-L340 | train | 198,610 |
aliyun/aliyun-log-python-sdk | aliyun/log/logtail_config_detail.py | LogtailConfigHelper.generate_logtail_config | def generate_logtail_config(json_value):
"""Generate logtail config from loaded json value
:param json_value:
:return:
"""
logger.warning("aliyun.log.LogtailConfigHelper is deprecated and will be removed in future version."
"Use LogtailConfigGenerator instead")
if json_value['inputDetail']['logType'] == 'apsara_log':
return LogtailConfigHelper.generate_apsara_log_config(json_value)
return LogtailConfigHelper.generate_common_reg_log_config(json_value) | python | def generate_logtail_config(json_value):
"""Generate logtail config from loaded json value
:param json_value:
:return:
"""
logger.warning("aliyun.log.LogtailConfigHelper is deprecated and will be removed in future version."
"Use LogtailConfigGenerator instead")
if json_value['inputDetail']['logType'] == 'apsara_log':
return LogtailConfigHelper.generate_apsara_log_config(json_value)
return LogtailConfigHelper.generate_common_reg_log_config(json_value) | [
"def",
"generate_logtail_config",
"(",
"json_value",
")",
":",
"logger",
".",
"warning",
"(",
"\"aliyun.log.LogtailConfigHelper is deprecated and will be removed in future version.\"",
"\"Use LogtailConfigGenerator instead\"",
")",
"if",
"json_value",
"[",
"'inputDetail'",
"]",
"... | Generate logtail config from loaded json value
:param json_value:
:return: | [
"Generate",
"logtail",
"config",
"from",
"loaded",
"json",
"value"
] | ac383db0a16abf1e5ef7df36074374184b43516e | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logtail_config_detail.py#L343-L354 | train | 198,611 |
aliyun/aliyun-log-python-sdk | aliyun/log/logger_hanlder.py | QueuedLogHandler._get_batch_requests | def _get_batch_requests(self, timeout=None):
"""try to get request as fast as possible, once empty and stop falg or time-out, just return Empty"""
reqs = []
s = time()
while len(reqs) < self.batch_size and (time() - s) < timeout:
try:
req = self.queue.get(block=False)
self.queue.task_done()
reqs.append(req)
except Empty as ex:
if self.stop_flag:
break
else:
sleep(0.1)
if not reqs:
raise Empty
elif len(reqs) <= 1:
return reqs[0]
else:
logitems = []
req = reqs[0]
for req in reqs:
logitems.extend(req.get_log_items())
ret = PutLogsRequest(self.project, self.log_store, req.topic, logitems=logitems)
ret.__record__ = req.__record__
return ret | python | def _get_batch_requests(self, timeout=None):
"""try to get request as fast as possible, once empty and stop falg or time-out, just return Empty"""
reqs = []
s = time()
while len(reqs) < self.batch_size and (time() - s) < timeout:
try:
req = self.queue.get(block=False)
self.queue.task_done()
reqs.append(req)
except Empty as ex:
if self.stop_flag:
break
else:
sleep(0.1)
if not reqs:
raise Empty
elif len(reqs) <= 1:
return reqs[0]
else:
logitems = []
req = reqs[0]
for req in reqs:
logitems.extend(req.get_log_items())
ret = PutLogsRequest(self.project, self.log_store, req.topic, logitems=logitems)
ret.__record__ = req.__record__
return ret | [
"def",
"_get_batch_requests",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"reqs",
"=",
"[",
"]",
"s",
"=",
"time",
"(",
")",
"while",
"len",
"(",
"reqs",
")",
"<",
"self",
".",
"batch_size",
"and",
"(",
"time",
"(",
")",
"-",
"s",
")",
... | try to get request as fast as possible, once empty and stop falg or time-out, just return Empty | [
"try",
"to",
"get",
"request",
"as",
"fast",
"as",
"possible",
"once",
"empty",
"and",
"stop",
"falg",
"or",
"time",
"-",
"out",
"just",
"return",
"Empty"
] | ac383db0a16abf1e5ef7df36074374184b43516e | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logger_hanlder.py#L378-L407 | train | 198,612 |
numenta/htmresearch | htmresearch/support/union_temporal_pooler_monitor_mixin.py | UnionTemporalPoolerMonitorMixin.mmGetPlotUnionSDRActivity | def mmGetPlotUnionSDRActivity(self, title="Union SDR Activity Raster",
showReset=False, resetShading=0.25):
""" Returns plot of the activity of union SDR bits.
@param title an optional title for the figure
@param showReset if true, the first set of activities after a reset
will have a gray background
@param resetShading If showReset is true, this float specifies the
intensity of the reset background with 0.0 being white and 1.0 being black
@return (Plot) plot
"""
unionSDRTrace = self.mmGetTraceUnionSDR().data
columnCount = self.getNumColumns()
activityType = "Union SDR Activity"
return self.mmGetCellTracePlot(unionSDRTrace, columnCount, activityType,
title=title, showReset=showReset,
resetShading=resetShading) | python | def mmGetPlotUnionSDRActivity(self, title="Union SDR Activity Raster",
showReset=False, resetShading=0.25):
""" Returns plot of the activity of union SDR bits.
@param title an optional title for the figure
@param showReset if true, the first set of activities after a reset
will have a gray background
@param resetShading If showReset is true, this float specifies the
intensity of the reset background with 0.0 being white and 1.0 being black
@return (Plot) plot
"""
unionSDRTrace = self.mmGetTraceUnionSDR().data
columnCount = self.getNumColumns()
activityType = "Union SDR Activity"
return self.mmGetCellTracePlot(unionSDRTrace, columnCount, activityType,
title=title, showReset=showReset,
resetShading=resetShading) | [
"def",
"mmGetPlotUnionSDRActivity",
"(",
"self",
",",
"title",
"=",
"\"Union SDR Activity Raster\"",
",",
"showReset",
"=",
"False",
",",
"resetShading",
"=",
"0.25",
")",
":",
"unionSDRTrace",
"=",
"self",
".",
"mmGetTraceUnionSDR",
"(",
")",
".",
"data",
"colu... | Returns plot of the activity of union SDR bits.
@param title an optional title for the figure
@param showReset if true, the first set of activities after a reset
will have a gray background
@param resetShading If showReset is true, this float specifies the
intensity of the reset background with 0.0 being white and 1.0 being black
@return (Plot) plot | [
"Returns",
"plot",
"of",
"the",
"activity",
"of",
"union",
"SDR",
"bits",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/union_temporal_pooler_monitor_mixin.py#L176-L191 | train | 198,613 |
numenta/htmresearch | htmresearch/support/union_temporal_pooler_monitor_mixin.py | UnionTemporalPoolerMonitorMixin.mmGetMetricStabilityConfusion | def mmGetMetricStabilityConfusion(self):
"""
For each iteration that doesn't follow a reset, looks at every other
iteration for the same world that doesn't follow a reset, and computes the
number of bits that show up in one or the other set of active cells for
that iteration, but not both. This metric returns the distribution of those
numbers.
@return (Metric) Stability confusion metric
"""
self._mmComputeSequenceRepresentationData()
numbers = self._mmData["stabilityConfusion"]
return Metric(self, "stability confusion", numbers) | python | def mmGetMetricStabilityConfusion(self):
"""
For each iteration that doesn't follow a reset, looks at every other
iteration for the same world that doesn't follow a reset, and computes the
number of bits that show up in one or the other set of active cells for
that iteration, but not both. This metric returns the distribution of those
numbers.
@return (Metric) Stability confusion metric
"""
self._mmComputeSequenceRepresentationData()
numbers = self._mmData["stabilityConfusion"]
return Metric(self, "stability confusion", numbers) | [
"def",
"mmGetMetricStabilityConfusion",
"(",
"self",
")",
":",
"self",
".",
"_mmComputeSequenceRepresentationData",
"(",
")",
"numbers",
"=",
"self",
".",
"_mmData",
"[",
"\"stabilityConfusion\"",
"]",
"return",
"Metric",
"(",
"self",
",",
"\"stability confusion\"",
... | For each iteration that doesn't follow a reset, looks at every other
iteration for the same world that doesn't follow a reset, and computes the
number of bits that show up in one or the other set of active cells for
that iteration, but not both. This metric returns the distribution of those
numbers.
@return (Metric) Stability confusion metric | [
"For",
"each",
"iteration",
"that",
"doesn",
"t",
"follow",
"a",
"reset",
"looks",
"at",
"every",
"other",
"iteration",
"for",
"the",
"same",
"world",
"that",
"doesn",
"t",
"follow",
"a",
"reset",
"and",
"computes",
"the",
"number",
"of",
"bits",
"that",
... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/union_temporal_pooler_monitor_mixin.py#L276-L288 | train | 198,614 |
numenta/htmresearch | htmresearch/support/union_temporal_pooler_monitor_mixin.py | UnionTemporalPoolerMonitorMixin.mmGetPlotStability | def mmGetPlotStability(self, title="Stability", showReset=False,
resetShading=0.25):
"""
Returns plot of the overlap metric between union SDRs within a sequence.
@param title an optional title for the figure
@return (Plot) plot
"""
plot = Plot(self, title)
self._mmComputeSequenceRepresentationData()
data = self._mmData["stabilityConfusion"]
plot.addGraph(sorted(data, reverse=True),
position=211,
xlabel="Time steps", ylabel="Overlap")
plot.addHistogram(data,
position=212,
bins=100,
xlabel="Overlap", ylabel="# time steps")
return plot | python | def mmGetPlotStability(self, title="Stability", showReset=False,
resetShading=0.25):
"""
Returns plot of the overlap metric between union SDRs within a sequence.
@param title an optional title for the figure
@return (Plot) plot
"""
plot = Plot(self, title)
self._mmComputeSequenceRepresentationData()
data = self._mmData["stabilityConfusion"]
plot.addGraph(sorted(data, reverse=True),
position=211,
xlabel="Time steps", ylabel="Overlap")
plot.addHistogram(data,
position=212,
bins=100,
xlabel="Overlap", ylabel="# time steps")
return plot | [
"def",
"mmGetPlotStability",
"(",
"self",
",",
"title",
"=",
"\"Stability\"",
",",
"showReset",
"=",
"False",
",",
"resetShading",
"=",
"0.25",
")",
":",
"plot",
"=",
"Plot",
"(",
"self",
",",
"title",
")",
"self",
".",
"_mmComputeSequenceRepresentationData",
... | Returns plot of the overlap metric between union SDRs within a sequence.
@param title an optional title for the figure
@return (Plot) plot | [
"Returns",
"plot",
"of",
"the",
"overlap",
"metric",
"between",
"union",
"SDRs",
"within",
"a",
"sequence",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/union_temporal_pooler_monitor_mixin.py#L291-L308 | train | 198,615 |
numenta/htmresearch | htmresearch/support/union_temporal_pooler_monitor_mixin.py | UnionTemporalPoolerMonitorMixin.mmGetMetricDistinctnessConfusion | def mmGetMetricDistinctnessConfusion(self):
"""
For each iteration that doesn't follow a reset, looks at every other
iteration for every other world that doesn't follow a reset, and computes
the number of bits that show up in both sets of active cells those that
iteration. This metric returns the distribution of those numbers.
@return (Metric) Distinctness confusion metric
"""
self._mmComputeSequenceRepresentationData()
numbers = self._mmData["distinctnessConfusion"]
return Metric(self, "distinctness confusion", numbers) | python | def mmGetMetricDistinctnessConfusion(self):
"""
For each iteration that doesn't follow a reset, looks at every other
iteration for every other world that doesn't follow a reset, and computes
the number of bits that show up in both sets of active cells those that
iteration. This metric returns the distribution of those numbers.
@return (Metric) Distinctness confusion metric
"""
self._mmComputeSequenceRepresentationData()
numbers = self._mmData["distinctnessConfusion"]
return Metric(self, "distinctness confusion", numbers) | [
"def",
"mmGetMetricDistinctnessConfusion",
"(",
"self",
")",
":",
"self",
".",
"_mmComputeSequenceRepresentationData",
"(",
")",
"numbers",
"=",
"self",
".",
"_mmData",
"[",
"\"distinctnessConfusion\"",
"]",
"return",
"Metric",
"(",
"self",
",",
"\"distinctness confus... | For each iteration that doesn't follow a reset, looks at every other
iteration for every other world that doesn't follow a reset, and computes
the number of bits that show up in both sets of active cells those that
iteration. This metric returns the distribution of those numbers.
@return (Metric) Distinctness confusion metric | [
"For",
"each",
"iteration",
"that",
"doesn",
"t",
"follow",
"a",
"reset",
"looks",
"at",
"every",
"other",
"iteration",
"for",
"every",
"other",
"world",
"that",
"doesn",
"t",
"follow",
"a",
"reset",
"and",
"computes",
"the",
"number",
"of",
"bits",
"that"... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/union_temporal_pooler_monitor_mixin.py#L311-L322 | train | 198,616 |
numenta/htmresearch | htmresearch/support/union_temporal_pooler_monitor_mixin.py | UnionTemporalPoolerMonitorMixin._mmUpdateDutyCycles | def _mmUpdateDutyCycles(self):
"""
Update the duty cycle variables internally tracked by the TM mixin.
"""
period = self.getDutyCyclePeriod()
unionSDRArray = numpy.zeros(self.getNumColumns())
unionSDRArray[list(self._mmTraces["unionSDR"].data[-1])] = 1
self._mmData["unionSDRDutyCycle"] = \
UnionTemporalPoolerMonitorMixin._mmUpdateDutyCyclesHelper(
self._mmData["unionSDRDutyCycle"], unionSDRArray, period)
self._mmData["persistenceDutyCycle"] = \
UnionTemporalPoolerMonitorMixin._mmUpdateDutyCyclesHelper(
self._mmData["persistenceDutyCycle"], self._poolingActivation, period) | python | def _mmUpdateDutyCycles(self):
"""
Update the duty cycle variables internally tracked by the TM mixin.
"""
period = self.getDutyCyclePeriod()
unionSDRArray = numpy.zeros(self.getNumColumns())
unionSDRArray[list(self._mmTraces["unionSDR"].data[-1])] = 1
self._mmData["unionSDRDutyCycle"] = \
UnionTemporalPoolerMonitorMixin._mmUpdateDutyCyclesHelper(
self._mmData["unionSDRDutyCycle"], unionSDRArray, period)
self._mmData["persistenceDutyCycle"] = \
UnionTemporalPoolerMonitorMixin._mmUpdateDutyCyclesHelper(
self._mmData["persistenceDutyCycle"], self._poolingActivation, period) | [
"def",
"_mmUpdateDutyCycles",
"(",
"self",
")",
":",
"period",
"=",
"self",
".",
"getDutyCyclePeriod",
"(",
")",
"unionSDRArray",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"getNumColumns",
"(",
")",
")",
"unionSDRArray",
"[",
"list",
"(",
"self",
".",
... | Update the duty cycle variables internally tracked by the TM mixin. | [
"Update",
"the",
"duty",
"cycle",
"variables",
"internally",
"tracked",
"by",
"the",
"TM",
"mixin",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/union_temporal_pooler_monitor_mixin.py#L417-L432 | train | 198,617 |
numenta/htmresearch | htmresearch/support/union_temporal_pooler_monitor_mixin.py | UnionTemporalPoolerMonitorMixin._mmComputeSequenceRepresentationData | def _mmComputeSequenceRepresentationData(self):
"""
Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values.
"""
if not self._sequenceRepresentationDataStale:
return
unionSDRTrace = self.mmGetTraceUnionSDR()
sequenceLabelsTrace = self.mmGetTraceSequenceLabels()
resetsTrace = self.mmGetTraceResets()
n = len(unionSDRTrace.data)
overlapMatrix = numpy.empty((n, n), dtype=uintType)
stabilityConfusionUnionSDR = []
distinctnessConfusionUnionSDR = []
for i in xrange(n):
for j in xrange(i+1):
overlapUnionSDR = len(unionSDRTrace.data[i] & unionSDRTrace.data[j])
overlapMatrix[i][j] = overlapUnionSDR
overlapMatrix[j][i] = overlapUnionSDR
if (i != j and
sequenceLabelsTrace.data[i] is not None and
not resetsTrace.data[i] and
sequenceLabelsTrace.data[j] is not None and
not resetsTrace.data[j]):
if sequenceLabelsTrace.data[i] == sequenceLabelsTrace.data[j]:
stabilityConfusionUnionSDR.append(overlapUnionSDR)
else:
distinctnessConfusionUnionSDR.append(overlapUnionSDR)
self._mmData["overlap"] = overlapMatrix
self._mmData["stabilityConfusion"] = stabilityConfusionUnionSDR
self._mmData["distinctnessConfusion"] = distinctnessConfusionUnionSDR
self._sequenceRepresentationDataStale = False | python | def _mmComputeSequenceRepresentationData(self):
"""
Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values.
"""
if not self._sequenceRepresentationDataStale:
return
unionSDRTrace = self.mmGetTraceUnionSDR()
sequenceLabelsTrace = self.mmGetTraceSequenceLabels()
resetsTrace = self.mmGetTraceResets()
n = len(unionSDRTrace.data)
overlapMatrix = numpy.empty((n, n), dtype=uintType)
stabilityConfusionUnionSDR = []
distinctnessConfusionUnionSDR = []
for i in xrange(n):
for j in xrange(i+1):
overlapUnionSDR = len(unionSDRTrace.data[i] & unionSDRTrace.data[j])
overlapMatrix[i][j] = overlapUnionSDR
overlapMatrix[j][i] = overlapUnionSDR
if (i != j and
sequenceLabelsTrace.data[i] is not None and
not resetsTrace.data[i] and
sequenceLabelsTrace.data[j] is not None and
not resetsTrace.data[j]):
if sequenceLabelsTrace.data[i] == sequenceLabelsTrace.data[j]:
stabilityConfusionUnionSDR.append(overlapUnionSDR)
else:
distinctnessConfusionUnionSDR.append(overlapUnionSDR)
self._mmData["overlap"] = overlapMatrix
self._mmData["stabilityConfusion"] = stabilityConfusionUnionSDR
self._mmData["distinctnessConfusion"] = distinctnessConfusionUnionSDR
self._sequenceRepresentationDataStale = False | [
"def",
"_mmComputeSequenceRepresentationData",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_sequenceRepresentationDataStale",
":",
"return",
"unionSDRTrace",
"=",
"self",
".",
"mmGetTraceUnionSDR",
"(",
")",
"sequenceLabelsTrace",
"=",
"self",
".",
"mmGetTraceSe... | Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values. | [
"Calculates",
"values",
"for",
"the",
"overlap",
"distance",
"matrix",
"stability",
"within",
"a",
"sequence",
"and",
"distinctness",
"between",
"sequences",
".",
"These",
"values",
"are",
"cached",
"so",
"that",
"they",
"do",
"need",
"to",
"be",
"recomputed",
... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/union_temporal_pooler_monitor_mixin.py#L435-L475 | train | 198,618 |
numenta/htmresearch | htmresearch/support/register_regions.py | registerResearchRegion | def registerResearchRegion(regionTypeName, moduleName=None):
"""
Register this region so that NuPIC can later find it.
@param regionTypeName: (str) type name of the region. E.g LanguageSensor.
@param moduleName: (str) location of the region class, only needed if
registering a region that is outside the expected "regions/" dir.
"""
global _PY_REGIONS
if moduleName is None:
# the region is located in the regions/ directory
moduleName = "htmresearch.regions." + regionTypeName
if regionTypeName not in _PY_REGIONS:
# Add new region class to the network.
module = __import__(moduleName, {}, {}, regionTypeName)
unregisteredClass = getattr(module, regionTypeName)
Network.registerRegion(unregisteredClass)
# Add region to list of registered PyRegions
_PY_REGIONS.append(regionTypeName) | python | def registerResearchRegion(regionTypeName, moduleName=None):
"""
Register this region so that NuPIC can later find it.
@param regionTypeName: (str) type name of the region. E.g LanguageSensor.
@param moduleName: (str) location of the region class, only needed if
registering a region that is outside the expected "regions/" dir.
"""
global _PY_REGIONS
if moduleName is None:
# the region is located in the regions/ directory
moduleName = "htmresearch.regions." + regionTypeName
if regionTypeName not in _PY_REGIONS:
# Add new region class to the network.
module = __import__(moduleName, {}, {}, regionTypeName)
unregisteredClass = getattr(module, regionTypeName)
Network.registerRegion(unregisteredClass)
# Add region to list of registered PyRegions
_PY_REGIONS.append(regionTypeName) | [
"def",
"registerResearchRegion",
"(",
"regionTypeName",
",",
"moduleName",
"=",
"None",
")",
":",
"global",
"_PY_REGIONS",
"if",
"moduleName",
"is",
"None",
":",
"# the region is located in the regions/ directory",
"moduleName",
"=",
"\"htmresearch.regions.\"",
"+",
"regi... | Register this region so that NuPIC can later find it.
@param regionTypeName: (str) type name of the region. E.g LanguageSensor.
@param moduleName: (str) location of the region class, only needed if
registering a region that is outside the expected "regions/" dir. | [
"Register",
"this",
"region",
"so",
"that",
"NuPIC",
"can",
"later",
"find",
"it",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/register_regions.py#L46-L65 | train | 198,619 |
numenta/htmresearch | htmresearch/algorithms/image_sparse_net.py | ImageSparseNet.loadNumpyImages | def loadNumpyImages(self, path, key=None):
"""
Loads images using numpy.
:param path: (string) Path to data file
:param key: (string) Object key in data file if it's a dict
Also stores image dimensions to later the original images. If there are
multiple channels, self.numChannels will store the number of channels,
otherwise it will be set to None.
"""
data = np.load(path)
if isinstance(data, dict):
if key is None:
raise ValueError("Images are stored as a dict, a key must be provided!")
try:
data = data[key]
except KeyError:
raise KeyError("Wrong key for provided data.")
if not isinstance(data, np.ndarray):
raise TypeError("Data must be stored as a dict or numpy array.")
self._initializeDimensions(data)
return data | python | def loadNumpyImages(self, path, key=None):
"""
Loads images using numpy.
:param path: (string) Path to data file
:param key: (string) Object key in data file if it's a dict
Also stores image dimensions to later the original images. If there are
multiple channels, self.numChannels will store the number of channels,
otherwise it will be set to None.
"""
data = np.load(path)
if isinstance(data, dict):
if key is None:
raise ValueError("Images are stored as a dict, a key must be provided!")
try:
data = data[key]
except KeyError:
raise KeyError("Wrong key for provided data.")
if not isinstance(data, np.ndarray):
raise TypeError("Data must be stored as a dict or numpy array.")
self._initializeDimensions(data)
return data | [
"def",
"loadNumpyImages",
"(",
"self",
",",
"path",
",",
"key",
"=",
"None",
")",
":",
"data",
"=",
"np",
".",
"load",
"(",
"path",
")",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"if",
"key",
"is",
"None",
":",
"raise",
"ValueError",
... | Loads images using numpy.
:param path: (string) Path to data file
:param key: (string) Object key in data file if it's a dict
Also stores image dimensions to later the original images. If there are
multiple channels, self.numChannels will store the number of channels,
otherwise it will be set to None. | [
"Loads",
"images",
"using",
"numpy",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/image_sparse_net.py#L76-L102 | train | 198,620 |
numenta/htmresearch | htmresearch/algorithms/image_sparse_net.py | ImageSparseNet._initializeDimensions | def _initializeDimensions(self, inputData):
"""
Stores the training images' dimensions, for convenience.
"""
if len(inputData.shape) == 2:
self.imageHeight, self.numImages = inputData.shape
self.imageWidth, self.numChannels = None, None
elif len(inputData.shape) == 3:
self.imageHeight, \
self.imageWidth, \
self.numImages = inputData.shape
self.numChannels = None
elif len(inputData.shape) == 4:
self.imageHeight, \
self.imageWidth, \
self.numChannels, \
self.numImages = inputData.shape
else:
raise ValueError("The provided image set has more than 4 dimensions.") | python | def _initializeDimensions(self, inputData):
"""
Stores the training images' dimensions, for convenience.
"""
if len(inputData.shape) == 2:
self.imageHeight, self.numImages = inputData.shape
self.imageWidth, self.numChannels = None, None
elif len(inputData.shape) == 3:
self.imageHeight, \
self.imageWidth, \
self.numImages = inputData.shape
self.numChannels = None
elif len(inputData.shape) == 4:
self.imageHeight, \
self.imageWidth, \
self.numChannels, \
self.numImages = inputData.shape
else:
raise ValueError("The provided image set has more than 4 dimensions.") | [
"def",
"_initializeDimensions",
"(",
"self",
",",
"inputData",
")",
":",
"if",
"len",
"(",
"inputData",
".",
"shape",
")",
"==",
"2",
":",
"self",
".",
"imageHeight",
",",
"self",
".",
"numImages",
"=",
"inputData",
".",
"shape",
"self",
".",
"imageWidth... | Stores the training images' dimensions, for convenience. | [
"Stores",
"the",
"training",
"images",
"dimensions",
"for",
"convenience",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/image_sparse_net.py#L105-L126 | train | 198,621 |
numenta/htmresearch | htmresearch/support/temporal_pooler_monitor_mixin.py | TemporalPoolerMonitorMixin.mmGetPermanencesPlot | def mmGetPermanencesPlot(self, title=None):
""" Returns plot of column permanences.
@param title an optional title for the figure
@return (Plot) plot
"""
plot = Plot(self, title)
data = numpy.zeros((self.getNumColumns(), self.getNumInputs()))
for i in xrange(self.getNumColumns()):
self.getPermanence(i, data[i])
plot.add2DArray(data, xlabel="Permanences", ylabel="Column")
return plot | python | def mmGetPermanencesPlot(self, title=None):
""" Returns plot of column permanences.
@param title an optional title for the figure
@return (Plot) plot
"""
plot = Plot(self, title)
data = numpy.zeros((self.getNumColumns(), self.getNumInputs()))
for i in xrange(self.getNumColumns()):
self.getPermanence(i, data[i])
plot.add2DArray(data, xlabel="Permanences", ylabel="Column")
return plot | [
"def",
"mmGetPermanencesPlot",
"(",
"self",
",",
"title",
"=",
"None",
")",
":",
"plot",
"=",
"Plot",
"(",
"self",
",",
"title",
")",
"data",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"self",
".",
"getNumColumns",
"(",
")",
",",
"self",
".",
"getNumInput... | Returns plot of column permanences.
@param title an optional title for the figure
@return (Plot) plot | [
"Returns",
"plot",
"of",
"column",
"permanences",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/temporal_pooler_monitor_mixin.py#L155-L167 | train | 198,622 |
numenta/htmresearch | htmresearch/frameworks/pytorch/sparse_speech_experiment.py | SparseSpeechExperiment.finalize | def finalize(self, params, rep):
"""
Save the full model once we are done.
"""
if params.get("saveNet", True):
saveDir = os.path.join(params["path"], params["name"],
"model_{}.pt".format(rep))
torch.save(self.model, saveDir) | python | def finalize(self, params, rep):
"""
Save the full model once we are done.
"""
if params.get("saveNet", True):
saveDir = os.path.join(params["path"], params["name"],
"model_{}.pt".format(rep))
torch.save(self.model, saveDir) | [
"def",
"finalize",
"(",
"self",
",",
"params",
",",
"rep",
")",
":",
"if",
"params",
".",
"get",
"(",
"\"saveNet\"",
",",
"True",
")",
":",
"saveDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"params",
"[",
"\"path\"",
"]",
",",
"params",
"[",
"... | Save the full model once we are done. | [
"Save",
"the",
"full",
"model",
"once",
"we",
"are",
"done",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/pytorch/sparse_speech_experiment.py#L231-L238 | train | 198,623 |
numenta/htmresearch | htmresearch/frameworks/pytorch/sparse_speech_experiment.py | SparseSpeechExperiment.loadDatasets | def loadDatasets(self, params):
"""
The GSC dataset specifies specific files to be used as training, test,
and validation. We assume the data has already been processed according
to those files into separate train, test, and valid directories.
For our experiment we use a subset of the data (10 categories out of 30),
just like the Kaggle competition.
"""
n_mels = 32
# Check if using pre-processed data or raw data
self.use_preprocessed_dataset = PreprocessedSpeechDataset.isValid(self.dataDir)
if self.use_preprocessed_dataset:
trainDataset = PreprocessedSpeechDataset(self.dataDir, subset="train")
validationDataset = PreprocessedSpeechDataset(self.dataDir, subset="valid",
silence_percentage=0)
testDataset = PreprocessedSpeechDataset(self.dataDir, subset="test",
silence_percentage=0)
bgNoiseDataset = PreprocessedSpeechDataset(self.dataDir, subset="noise",
silence_percentage=0)
else:
trainDataDir = os.path.join(self.dataDir, "train")
testDataDir = os.path.join(self.dataDir, "test")
validationDataDir = os.path.join(self.dataDir, "valid")
backgroundNoiseDir = os.path.join(self.dataDir, params["background_noise_dir"])
dataAugmentationTransform = transforms.Compose([
ChangeAmplitude(),
ChangeSpeedAndPitchAudio(),
FixAudioLength(),
ToSTFT(),
StretchAudioOnSTFT(),
TimeshiftAudioOnSTFT(),
FixSTFTDimension(),
])
featureTransform = transforms.Compose(
[
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
trainDataset = SpeechCommandsDataset(
trainDataDir,
transforms.Compose([
dataAugmentationTransform,
# add_bg_noise, # Uncomment to allow adding BG noise
# during training
featureTransform
]))
testFeatureTransform = transforms.Compose([
FixAudioLength(),
ToMelSpectrogram(n_mels=n_mels),
ToTensor('mel_spectrogram', 'input')
])
validationDataset = SpeechCommandsDataset(
validationDataDir,
testFeatureTransform,
silence_percentage=0,
)
testDataset = SpeechCommandsDataset(
testDataDir,
testFeatureTransform,
silence_percentage=0,
)
bg_dataset = BackgroundNoiseDataset(
backgroundNoiseDir,
transforms.Compose([FixAudioLength(), ToSTFT()]),
)
bgNoiseTransform = transforms.Compose([
FixAudioLength(),
ToSTFT(),
AddBackgroundNoiseOnSTFT(bg_dataset),
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
bgNoiseDataset = SpeechCommandsDataset(
testDataDir,
bgNoiseTransform,
silence_percentage=0,
)
weights = trainDataset.make_weights_for_balanced_classes()
sampler = WeightedRandomSampler(weights, len(weights))
# print("Number of training samples=",len(trainDataset))
# print("Number of validation samples=",len(validationDataset))
# print("Number of test samples=",len(testDataset))
self.train_loader = DataLoader(trainDataset,
batch_size=params["batch_size"],
sampler=sampler
)
self.validation_loader = DataLoader(validationDataset,
batch_size=params["batch_size"],
shuffle=False
)
self.test_loader = DataLoader(testDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False
)
self.bg_noise_loader = DataLoader(bgNoiseDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False
) | python | def loadDatasets(self, params):
"""
The GSC dataset specifies specific files to be used as training, test,
and validation. We assume the data has already been processed according
to those files into separate train, test, and valid directories.
For our experiment we use a subset of the data (10 categories out of 30),
just like the Kaggle competition.
"""
n_mels = 32
# Check if using pre-processed data or raw data
self.use_preprocessed_dataset = PreprocessedSpeechDataset.isValid(self.dataDir)
if self.use_preprocessed_dataset:
trainDataset = PreprocessedSpeechDataset(self.dataDir, subset="train")
validationDataset = PreprocessedSpeechDataset(self.dataDir, subset="valid",
silence_percentage=0)
testDataset = PreprocessedSpeechDataset(self.dataDir, subset="test",
silence_percentage=0)
bgNoiseDataset = PreprocessedSpeechDataset(self.dataDir, subset="noise",
silence_percentage=0)
else:
trainDataDir = os.path.join(self.dataDir, "train")
testDataDir = os.path.join(self.dataDir, "test")
validationDataDir = os.path.join(self.dataDir, "valid")
backgroundNoiseDir = os.path.join(self.dataDir, params["background_noise_dir"])
dataAugmentationTransform = transforms.Compose([
ChangeAmplitude(),
ChangeSpeedAndPitchAudio(),
FixAudioLength(),
ToSTFT(),
StretchAudioOnSTFT(),
TimeshiftAudioOnSTFT(),
FixSTFTDimension(),
])
featureTransform = transforms.Compose(
[
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
trainDataset = SpeechCommandsDataset(
trainDataDir,
transforms.Compose([
dataAugmentationTransform,
# add_bg_noise, # Uncomment to allow adding BG noise
# during training
featureTransform
]))
testFeatureTransform = transforms.Compose([
FixAudioLength(),
ToMelSpectrogram(n_mels=n_mels),
ToTensor('mel_spectrogram', 'input')
])
validationDataset = SpeechCommandsDataset(
validationDataDir,
testFeatureTransform,
silence_percentage=0,
)
testDataset = SpeechCommandsDataset(
testDataDir,
testFeatureTransform,
silence_percentage=0,
)
bg_dataset = BackgroundNoiseDataset(
backgroundNoiseDir,
transforms.Compose([FixAudioLength(), ToSTFT()]),
)
bgNoiseTransform = transforms.Compose([
FixAudioLength(),
ToSTFT(),
AddBackgroundNoiseOnSTFT(bg_dataset),
ToMelSpectrogramFromSTFT(n_mels=n_mels),
DeleteSTFT(),
ToTensor('mel_spectrogram', 'input')
])
bgNoiseDataset = SpeechCommandsDataset(
testDataDir,
bgNoiseTransform,
silence_percentage=0,
)
weights = trainDataset.make_weights_for_balanced_classes()
sampler = WeightedRandomSampler(weights, len(weights))
# print("Number of training samples=",len(trainDataset))
# print("Number of validation samples=",len(validationDataset))
# print("Number of test samples=",len(testDataset))
self.train_loader = DataLoader(trainDataset,
batch_size=params["batch_size"],
sampler=sampler
)
self.validation_loader = DataLoader(validationDataset,
batch_size=params["batch_size"],
shuffle=False
)
self.test_loader = DataLoader(testDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False
)
self.bg_noise_loader = DataLoader(bgNoiseDataset,
batch_size=params["batch_size"],
sampler=None,
shuffle=False
) | [
"def",
"loadDatasets",
"(",
"self",
",",
"params",
")",
":",
"n_mels",
"=",
"32",
"# Check if using pre-processed data or raw data",
"self",
".",
"use_preprocessed_dataset",
"=",
"PreprocessedSpeechDataset",
".",
"isValid",
"(",
"self",
".",
"dataDir",
")",
"if",
"s... | The GSC dataset specifies specific files to be used as training, test,
and validation. We assume the data has already been processed according
to those files into separate train, test, and valid directories.
For our experiment we use a subset of the data (10 categories out of 30),
just like the Kaggle competition. | [
"The",
"GSC",
"dataset",
"specifies",
"specific",
"files",
"to",
"be",
"used",
"as",
"training",
"test",
"and",
"validation",
".",
"We",
"assume",
"the",
"data",
"has",
"already",
"been",
"processed",
"according",
"to",
"those",
"files",
"into",
"separate",
... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/pytorch/sparse_speech_experiment.py#L432-L550 | train | 198,624 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Sphere.contains | def contains(self, location):
"""
Checks that the provided point is on the sphere.
"""
return self.almostEqual(
sum([coord ** 2 for coord in location]), self.radius ** 2
) | python | def contains(self, location):
"""
Checks that the provided point is on the sphere.
"""
return self.almostEqual(
sum([coord ** 2 for coord in location]), self.radius ** 2
) | [
"def",
"contains",
"(",
"self",
",",
"location",
")",
":",
"return",
"self",
".",
"almostEqual",
"(",
"sum",
"(",
"[",
"coord",
"**",
"2",
"for",
"coord",
"in",
"location",
"]",
")",
",",
"self",
".",
"radius",
"**",
"2",
")"
] | Checks that the provided point is on the sphere. | [
"Checks",
"that",
"the",
"provided",
"point",
"is",
"on",
"the",
"sphere",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L94-L100 | train | 198,625 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Cylinder.contains | def contains(self, location):
"""
Checks that the provided point is on the cylinder.
"""
if self.almostEqual(location[0] ** 2 + location[1] ** 2, self.radius ** 2):
return abs(location[2]) < self.height / 2.
if self.almostEqual(location[2], self.height / 2.):
return location[0] ** 2 + location[1] ** 2 < self.radius ** 2
return False | python | def contains(self, location):
"""
Checks that the provided point is on the cylinder.
"""
if self.almostEqual(location[0] ** 2 + location[1] ** 2, self.radius ** 2):
return abs(location[2]) < self.height / 2.
if self.almostEqual(location[2], self.height / 2.):
return location[0] ** 2 + location[1] ** 2 < self.radius ** 2
return False | [
"def",
"contains",
"(",
"self",
",",
"location",
")",
":",
"if",
"self",
".",
"almostEqual",
"(",
"location",
"[",
"0",
"]",
"**",
"2",
"+",
"location",
"[",
"1",
"]",
"**",
"2",
",",
"self",
".",
"radius",
"**",
"2",
")",
":",
"return",
"abs",
... | Checks that the provided point is on the cylinder. | [
"Checks",
"that",
"the",
"provided",
"point",
"is",
"on",
"the",
"cylinder",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L229-L237 | train | 198,626 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Cylinder.sampleLocation | def sampleLocation(self):
"""
Simple method to sample uniformly from a cylinder.
"""
areaRatio = self.radius / (self.radius + self.height)
if random.random() < areaRatio:
return self._sampleLocationOnDisc()
else:
return self._sampleLocationOnSide() | python | def sampleLocation(self):
"""
Simple method to sample uniformly from a cylinder.
"""
areaRatio = self.radius / (self.radius + self.height)
if random.random() < areaRatio:
return self._sampleLocationOnDisc()
else:
return self._sampleLocationOnSide() | [
"def",
"sampleLocation",
"(",
"self",
")",
":",
"areaRatio",
"=",
"self",
".",
"radius",
"/",
"(",
"self",
".",
"radius",
"+",
"self",
".",
"height",
")",
"if",
"random",
".",
"random",
"(",
")",
"<",
"areaRatio",
":",
"return",
"self",
".",
"_sample... | Simple method to sample uniformly from a cylinder. | [
"Simple",
"method",
"to",
"sample",
"uniformly",
"from",
"a",
"cylinder",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L240-L248 | train | 198,627 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Cylinder.sampleLocationFromFeature | def sampleLocationFromFeature(self, feature):
"""
Samples a location from the provided specific features.
"""
if feature == "topDisc":
return self._sampleLocationOnDisc(top=True)
elif feature == "topEdge":
return self._sampleLocationOnEdge(top=True)
elif feature == "bottomDisc":
return self._sampleLocationOnDisc(top=False)
elif feature == "bottomEdge":
return self._sampleLocationOnEdge(top=False)
elif feature == "side":
return self._sampleLocationOnSide()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) | python | def sampleLocationFromFeature(self, feature):
"""
Samples a location from the provided specific features.
"""
if feature == "topDisc":
return self._sampleLocationOnDisc(top=True)
elif feature == "topEdge":
return self._sampleLocationOnEdge(top=True)
elif feature == "bottomDisc":
return self._sampleLocationOnDisc(top=False)
elif feature == "bottomEdge":
return self._sampleLocationOnEdge(top=False)
elif feature == "side":
return self._sampleLocationOnSide()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) | [
"def",
"sampleLocationFromFeature",
"(",
"self",
",",
"feature",
")",
":",
"if",
"feature",
"==",
"\"topDisc\"",
":",
"return",
"self",
".",
"_sampleLocationOnDisc",
"(",
"top",
"=",
"True",
")",
"elif",
"feature",
"==",
"\"topEdge\"",
":",
"return",
"self",
... | Samples a location from the provided specific features. | [
"Samples",
"a",
"location",
"from",
"the",
"provided",
"specific",
"features",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L251-L268 | train | 198,628 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Cylinder._sampleLocationOnDisc | def _sampleLocationOnDisc(self, top=None):
"""
Helper method to sample from the top and bottom discs of a cylinder.
If top is set to True, samples only from top disc. If top is set to False,
samples only from bottom disc. If not set (defaults to None), samples from
both discs.
"""
if top is None:
z = random.choice([-1, 1]) * self.height / 2.
else:
z = self.height / 2. if top else - self.height / 2.
sampledAngle = 2 * random.random() * pi
sampledRadius = self.radius * sqrt(random.random())
x, y = sampledRadius * cos(sampledAngle), sampledRadius * sin(sampledAngle)
return [x, y, z] | python | def _sampleLocationOnDisc(self, top=None):
"""
Helper method to sample from the top and bottom discs of a cylinder.
If top is set to True, samples only from top disc. If top is set to False,
samples only from bottom disc. If not set (defaults to None), samples from
both discs.
"""
if top is None:
z = random.choice([-1, 1]) * self.height / 2.
else:
z = self.height / 2. if top else - self.height / 2.
sampledAngle = 2 * random.random() * pi
sampledRadius = self.radius * sqrt(random.random())
x, y = sampledRadius * cos(sampledAngle), sampledRadius * sin(sampledAngle)
return [x, y, z] | [
"def",
"_sampleLocationOnDisc",
"(",
"self",
",",
"top",
"=",
"None",
")",
":",
"if",
"top",
"is",
"None",
":",
"z",
"=",
"random",
".",
"choice",
"(",
"[",
"-",
"1",
",",
"1",
"]",
")",
"*",
"self",
".",
"height",
"/",
"2.",
"else",
":",
"z",
... | Helper method to sample from the top and bottom discs of a cylinder.
If top is set to True, samples only from top disc. If top is set to False,
samples only from bottom disc. If not set (defaults to None), samples from
both discs. | [
"Helper",
"method",
"to",
"sample",
"from",
"the",
"top",
"and",
"bottom",
"discs",
"of",
"a",
"cylinder",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L271-L286 | train | 198,629 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Cylinder._sampleLocationOnEdge | def _sampleLocationOnEdge(self, top=None):
"""
Helper method to sample from the top and bottom edges of a cylinder.
If top is set to True, samples only from top edge. If top is set to False,
samples only from bottom edge. If not set (defaults to None), samples from
both edges.
"""
if top is None:
z = random.choice([-1, 1]) * self.height / 2.
else:
z = self.height / 2. if top else - self.height / 2.
sampledAngle = 2 * random.random() * pi
x, y = self.radius * cos(sampledAngle), self.radius * sin(sampledAngle)
return [x, y, z] | python | def _sampleLocationOnEdge(self, top=None):
"""
Helper method to sample from the top and bottom edges of a cylinder.
If top is set to True, samples only from top edge. If top is set to False,
samples only from bottom edge. If not set (defaults to None), samples from
both edges.
"""
if top is None:
z = random.choice([-1, 1]) * self.height / 2.
else:
z = self.height / 2. if top else - self.height / 2.
sampledAngle = 2 * random.random() * pi
x, y = self.radius * cos(sampledAngle), self.radius * sin(sampledAngle)
return [x, y, z] | [
"def",
"_sampleLocationOnEdge",
"(",
"self",
",",
"top",
"=",
"None",
")",
":",
"if",
"top",
"is",
"None",
":",
"z",
"=",
"random",
".",
"choice",
"(",
"[",
"-",
"1",
",",
"1",
"]",
")",
"*",
"self",
".",
"height",
"/",
"2.",
"else",
":",
"z",
... | Helper method to sample from the top and bottom edges of a cylinder.
If top is set to True, samples only from top edge. If top is set to False,
samples only from bottom edge. If not set (defaults to None), samples from
both edges. | [
"Helper",
"method",
"to",
"sample",
"from",
"the",
"top",
"and",
"bottom",
"edges",
"of",
"a",
"cylinder",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L289-L303 | train | 198,630 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Cylinder._sampleLocationOnSide | def _sampleLocationOnSide(self):
"""
Helper method to sample from the lateral surface of a cylinder.
"""
z = random.uniform(-1, 1) * self.height / 2.
sampledAngle = 2 * random.random() * pi
x, y = self.radius * cos(sampledAngle), self.radius * sin(sampledAngle)
return [x, y, z] | python | def _sampleLocationOnSide(self):
"""
Helper method to sample from the lateral surface of a cylinder.
"""
z = random.uniform(-1, 1) * self.height / 2.
sampledAngle = 2 * random.random() * pi
x, y = self.radius * cos(sampledAngle), self.radius * sin(sampledAngle)
return [x, y, z] | [
"def",
"_sampleLocationOnSide",
"(",
"self",
")",
":",
"z",
"=",
"random",
".",
"uniform",
"(",
"-",
"1",
",",
"1",
")",
"*",
"self",
".",
"height",
"/",
"2.",
"sampledAngle",
"=",
"2",
"*",
"random",
".",
"random",
"(",
")",
"*",
"pi",
"x",
",",... | Helper method to sample from the lateral surface of a cylinder. | [
"Helper",
"method",
"to",
"sample",
"from",
"the",
"lateral",
"surface",
"of",
"a",
"cylinder",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L306-L313 | train | 198,631 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Box.contains | def contains(self, location):
"""
A location is on the box if one of the dimension is "satured").
"""
for i, coord in enumerate(location):
if self.almostEqual(abs(coord), self.dimensions[i] / 2.):
return True
return False | python | def contains(self, location):
"""
A location is on the box if one of the dimension is "satured").
"""
for i, coord in enumerate(location):
if self.almostEqual(abs(coord), self.dimensions[i] / 2.):
return True
return False | [
"def",
"contains",
"(",
"self",
",",
"location",
")",
":",
"for",
"i",
",",
"coord",
"in",
"enumerate",
"(",
"location",
")",
":",
"if",
"self",
".",
"almostEqual",
"(",
"abs",
"(",
"coord",
")",
",",
"self",
".",
"dimensions",
"[",
"i",
"]",
"/",
... | A location is on the box if one of the dimension is "satured"). | [
"A",
"location",
"is",
"on",
"the",
"box",
"if",
"one",
"of",
"the",
"dimension",
"is",
"satured",
")",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L412-L419 | train | 198,632 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Box.sampleLocationFromFeature | def sampleLocationFromFeature(self, feature):
"""
Samples a location from one specific feature.
This is only supported with three dimensions.
"""
if feature == "face":
return self._sampleFromFaces()
elif feature == "edge":
return self._sampleFromEdges()
elif feature == "vertex":
return self._sampleFromVertices()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) | python | def sampleLocationFromFeature(self, feature):
"""
Samples a location from one specific feature.
This is only supported with three dimensions.
"""
if feature == "face":
return self._sampleFromFaces()
elif feature == "edge":
return self._sampleFromEdges()
elif feature == "vertex":
return self._sampleFromVertices()
elif feature == "random":
return self.sampleLocation()
else:
raise NameError("No such feature in {}: {}".format(self, feature)) | [
"def",
"sampleLocationFromFeature",
"(",
"self",
",",
"feature",
")",
":",
"if",
"feature",
"==",
"\"face\"",
":",
"return",
"self",
".",
"_sampleFromFaces",
"(",
")",
"elif",
"feature",
"==",
"\"edge\"",
":",
"return",
"self",
".",
"_sampleFromEdges",
"(",
... | Samples a location from one specific feature.
This is only supported with three dimensions. | [
"Samples",
"a",
"location",
"from",
"one",
"specific",
"feature",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L429-L444 | train | 198,633 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Box._sampleFromFaces | def _sampleFromFaces(self):
"""
We start by sampling a dimension to "max out", then sample the sign and
the other dimensions' values.
"""
coordinates = [random.uniform(-1, 1) * dim / 2. for dim in self.dimensions]
dim = random.choice(range(self.dimension))
coordinates[dim] = self.dimensions[dim] / 2. * random.choice([-1, 1])
return coordinates | python | def _sampleFromFaces(self):
"""
We start by sampling a dimension to "max out", then sample the sign and
the other dimensions' values.
"""
coordinates = [random.uniform(-1, 1) * dim / 2. for dim in self.dimensions]
dim = random.choice(range(self.dimension))
coordinates[dim] = self.dimensions[dim] / 2. * random.choice([-1, 1])
return coordinates | [
"def",
"_sampleFromFaces",
"(",
"self",
")",
":",
"coordinates",
"=",
"[",
"random",
".",
"uniform",
"(",
"-",
"1",
",",
"1",
")",
"*",
"dim",
"/",
"2.",
"for",
"dim",
"in",
"self",
".",
"dimensions",
"]",
"dim",
"=",
"random",
".",
"choice",
"(",
... | We start by sampling a dimension to "max out", then sample the sign and
the other dimensions' values. | [
"We",
"start",
"by",
"sampling",
"a",
"dimension",
"to",
"max",
"out",
"then",
"sample",
"the",
"sign",
"and",
"the",
"other",
"dimensions",
"values",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L447-L455 | train | 198,634 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | Box.plot | def plot(self, numPoints=100):
"""
Specific plotting method for boxes.
Only supports 3-dimensional objects.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# generate cylinder
x = np.linspace(- self.dimensions[0]/2., self.dimensions[0]/2., numPoints)
y = np.linspace(- self.dimensions[1]/2., self.dimensions[1]/2., numPoints)
z = np.linspace(- self.dimensions[2]/2., self.dimensions[2]/2., numPoints)
# plot
Xc, Yc = np.meshgrid(x, y)
ax.plot_surface(Xc, Yc, -self.dimensions[2]/2,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, Yc, self.dimensions[2]/2,
alpha=0.2, rstride=20, cstride=10)
Yc, Zc = np.meshgrid(y, z)
ax.plot_surface(-self.dimensions[0]/2, Yc, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(self.dimensions[0]/2, Yc, Zc,
alpha=0.2, rstride=20, cstride=10)
Xc, Zc = np.meshgrid(x, z)
ax.plot_surface(Xc, -self.dimensions[1]/2, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, self.dimensions[1]/2, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("{}".format(self))
return fig, ax | python | def plot(self, numPoints=100):
"""
Specific plotting method for boxes.
Only supports 3-dimensional objects.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# generate cylinder
x = np.linspace(- self.dimensions[0]/2., self.dimensions[0]/2., numPoints)
y = np.linspace(- self.dimensions[1]/2., self.dimensions[1]/2., numPoints)
z = np.linspace(- self.dimensions[2]/2., self.dimensions[2]/2., numPoints)
# plot
Xc, Yc = np.meshgrid(x, y)
ax.plot_surface(Xc, Yc, -self.dimensions[2]/2,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, Yc, self.dimensions[2]/2,
alpha=0.2, rstride=20, cstride=10)
Yc, Zc = np.meshgrid(y, z)
ax.plot_surface(-self.dimensions[0]/2, Yc, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(self.dimensions[0]/2, Yc, Zc,
alpha=0.2, rstride=20, cstride=10)
Xc, Zc = np.meshgrid(x, z)
ax.plot_surface(Xc, -self.dimensions[1]/2, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.plot_surface(Xc, self.dimensions[1]/2, Zc,
alpha=0.2, rstride=20, cstride=10)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("{}".format(self))
return fig, ax | [
"def",
"plot",
"(",
"self",
",",
"numPoints",
"=",
"100",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
",",
"projection",
"=",
"'3d'",
")",
"# generate cylinder",
"x",
"=",
"np",
".",
"lins... | Specific plotting method for boxes.
Only supports 3-dimensional objects. | [
"Specific",
"plotting",
"method",
"for",
"boxes",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L484-L520 | train | 198,635 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | PlyModel.visualize | def visualize(self, numPoints=100):
"""
Visualization utility for models.
Helps to debug the math and logic.
Helps to monitor complex objects with difficult to define boundaries.
Only supports 3-dimensional objects.
TODO: center the objects using scale, rotate and translate operations on mesh objects.
"""
try:
import pyqtgraph as pg
import pyqtgraph.multiprocess as mp
import pyqtgraph.opengl as gl
except ImportError as e:
print("PyQtGraph needs to be installed.")
return (None, None, None, None, None)
class PlyVisWindow:
"""
The pyqtgraph visualization utility window class
Creates a remote process with viewbox frame for visualizations
Provided access to mesh and scatter for realtime update to view.
"""
def __init__(self):
self.proc = mp.QtProcess()
self.rpg = self.proc._import('pyqtgraph')
self.rgl = self.proc._import('pyqtgraph.opengl')
self.rview = self.rgl.GLViewWidget()
self.rview.setBackgroundColor('k')
self.rview.setCameraPosition(distance=10)
self.grid = self.rgl.GLGridItem()
self.rview.addItem(self.grid)
self.rpg.setConfigOption('background', 'w')
self.rpg.setConfigOption('foreground', 'k')
def snapshot(self, name=""):
"""
utility to grabframe of the visualization window.
@param name (string) helps to avoid overwriting grabbed images programmatically.
"""
self.rview.grabFrameBuffer().save("{}.png".format(name))
# We might need this for future purposes Dont Delete
# class MeshUpdate:
# def __init__(self, proc):
# self.data_x = proc.transfer([])
# self.data_y = proc.transfer([])
# self._t = None
# @property
# def t(self):
# return self._t
# def update(self,x):
# self.data_y.extend([x], _callSync='async')
# self.data_x.extend([self.t], _callSync='async',)
# self.curve.setData(y=self.data_y, _callSync='async')
pg.mkQApp()
self.graphicsWindow = PlyVisWindow()
self.graphicsWindow.rview.setWindowTitle(self.file)
vertices = self.vertices.data
vertices = np.array(vertices.tolist())
faces = np.array([self.faces[i]['vertex_indices'] for i in range(self.faces.count)])
self.mesh = self.graphicsWindow.rgl.GLMeshItem(vertexes=vertices, faces=faces,
shader='normalColor', drawEdges=True,
drawFaces=True, computeNormals=False,
smooth=False)
self.graphicsWindow.rview.addItem(self.mesh)
self.graphicsWindow.rview.show()
pos = np.empty((numPoints,3))
size = np.ones((numPoints,))
color = np.ones((numPoints,4))
self.scatter = self.graphicsWindow.rgl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=True)
self.graphicsWindow.rview.addItem(self.scatter)
return self.scatter, self.mesh, pos, size, color | python | def visualize(self, numPoints=100):
"""
Visualization utility for models.
Helps to debug the math and logic.
Helps to monitor complex objects with difficult to define boundaries.
Only supports 3-dimensional objects.
TODO: center the objects using scale, rotate and translate operations on mesh objects.
"""
try:
import pyqtgraph as pg
import pyqtgraph.multiprocess as mp
import pyqtgraph.opengl as gl
except ImportError as e:
print("PyQtGraph needs to be installed.")
return (None, None, None, None, None)
class PlyVisWindow:
"""
The pyqtgraph visualization utility window class
Creates a remote process with viewbox frame for visualizations
Provided access to mesh and scatter for realtime update to view.
"""
def __init__(self):
self.proc = mp.QtProcess()
self.rpg = self.proc._import('pyqtgraph')
self.rgl = self.proc._import('pyqtgraph.opengl')
self.rview = self.rgl.GLViewWidget()
self.rview.setBackgroundColor('k')
self.rview.setCameraPosition(distance=10)
self.grid = self.rgl.GLGridItem()
self.rview.addItem(self.grid)
self.rpg.setConfigOption('background', 'w')
self.rpg.setConfigOption('foreground', 'k')
def snapshot(self, name=""):
"""
utility to grabframe of the visualization window.
@param name (string) helps to avoid overwriting grabbed images programmatically.
"""
self.rview.grabFrameBuffer().save("{}.png".format(name))
# We might need this for future purposes Dont Delete
# class MeshUpdate:
# def __init__(self, proc):
# self.data_x = proc.transfer([])
# self.data_y = proc.transfer([])
# self._t = None
# @property
# def t(self):
# return self._t
# def update(self,x):
# self.data_y.extend([x], _callSync='async')
# self.data_x.extend([self.t], _callSync='async',)
# self.curve.setData(y=self.data_y, _callSync='async')
pg.mkQApp()
self.graphicsWindow = PlyVisWindow()
self.graphicsWindow.rview.setWindowTitle(self.file)
vertices = self.vertices.data
vertices = np.array(vertices.tolist())
faces = np.array([self.faces[i]['vertex_indices'] for i in range(self.faces.count)])
self.mesh = self.graphicsWindow.rgl.GLMeshItem(vertexes=vertices, faces=faces,
shader='normalColor', drawEdges=True,
drawFaces=True, computeNormals=False,
smooth=False)
self.graphicsWindow.rview.addItem(self.mesh)
self.graphicsWindow.rview.show()
pos = np.empty((numPoints,3))
size = np.ones((numPoints,))
color = np.ones((numPoints,4))
self.scatter = self.graphicsWindow.rgl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=True)
self.graphicsWindow.rview.addItem(self.scatter)
return self.scatter, self.mesh, pos, size, color | [
"def",
"visualize",
"(",
"self",
",",
"numPoints",
"=",
"100",
")",
":",
"try",
":",
"import",
"pyqtgraph",
"as",
"pg",
"import",
"pyqtgraph",
".",
"multiprocess",
"as",
"mp",
"import",
"pyqtgraph",
".",
"opengl",
"as",
"gl",
"except",
"ImportError",
"as",... | Visualization utility for models.
Helps to debug the math and logic.
Helps to monitor complex objects with difficult to define boundaries.
Only supports 3-dimensional objects.
TODO: center the objects using scale, rotate and translate operations on mesh objects. | [
"Visualization",
"utility",
"for",
"models",
".",
"Helps",
"to",
"debug",
"the",
"math",
"and",
"logic",
".",
"Helps",
"to",
"monitor",
"complex",
"objects",
"with",
"difficult",
"to",
"define",
"boundaries",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L757-L835 | train | 198,636 |
numenta/htmresearch | htmresearch/frameworks/layers/sequence_object_machine.py | SequenceObjectMachine.createRandomSequences | def createRandomSequences(self,
numSequences,
sequenceLength):
"""
Creates a set of random sequences, each with sequenceLength elements,
and adds them to the machine.
"""
for _ in xrange(numSequences):
self.addObject(
[numpy.random.randint(0, self.numFeatures)
for _ in xrange(sequenceLength)]
) | python | def createRandomSequences(self,
numSequences,
sequenceLength):
"""
Creates a set of random sequences, each with sequenceLength elements,
and adds them to the machine.
"""
for _ in xrange(numSequences):
self.addObject(
[numpy.random.randint(0, self.numFeatures)
for _ in xrange(sequenceLength)]
) | [
"def",
"createRandomSequences",
"(",
"self",
",",
"numSequences",
",",
"sequenceLength",
")",
":",
"for",
"_",
"in",
"xrange",
"(",
"numSequences",
")",
":",
"self",
".",
"addObject",
"(",
"[",
"numpy",
".",
"random",
".",
"randint",
"(",
"0",
",",
"self... | Creates a set of random sequences, each with sequenceLength elements,
and adds them to the machine. | [
"Creates",
"a",
"set",
"of",
"random",
"sequences",
"each",
"with",
"sequenceLength",
"elements",
"and",
"adds",
"them",
"to",
"the",
"machine",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/sequence_object_machine.py#L185-L196 | train | 198,637 |
numenta/htmresearch | htmresearch/frameworks/layers/sequence_object_machine.py | SequenceObjectMachine._addNoise | def _addNoise(self, pattern, noiseLevel, inputSize):
"""
Adds noise to the given pattern and returns the new one.
A noiseLevel of 0.1 means that 10% of the ON bits will be replaced by
other randomly chosen ON bits. The returned SDR will still contain the
same number of bits.
"""
if pattern is None:
return None
# Bits that could be noise. These can't be from the original set.
candidateBits = list(set(range(inputSize)) - set(pattern))
random.shuffle(candidateBits)
newBits = set()
for bit in pattern:
if random.random() < noiseLevel:
newBits.add(candidateBits.pop())
else:
newBits.add(bit)
return newBits | python | def _addNoise(self, pattern, noiseLevel, inputSize):
"""
Adds noise to the given pattern and returns the new one.
A noiseLevel of 0.1 means that 10% of the ON bits will be replaced by
other randomly chosen ON bits. The returned SDR will still contain the
same number of bits.
"""
if pattern is None:
return None
# Bits that could be noise. These can't be from the original set.
candidateBits = list(set(range(inputSize)) - set(pattern))
random.shuffle(candidateBits)
newBits = set()
for bit in pattern:
if random.random() < noiseLevel:
newBits.add(candidateBits.pop())
else:
newBits.add(bit)
return newBits | [
"def",
"_addNoise",
"(",
"self",
",",
"pattern",
",",
"noiseLevel",
",",
"inputSize",
")",
":",
"if",
"pattern",
"is",
"None",
":",
"return",
"None",
"# Bits that could be noise. These can't be from the original set.",
"candidateBits",
"=",
"list",
"(",
"set",
"(",
... | Adds noise to the given pattern and returns the new one.
A noiseLevel of 0.1 means that 10% of the ON bits will be replaced by
other randomly chosen ON bits. The returned SDR will still contain the
same number of bits. | [
"Adds",
"noise",
"to",
"the",
"given",
"pattern",
"and",
"returns",
"the",
"new",
"one",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/sequence_object_machine.py#L242-L265 | train | 198,638 |
numenta/htmresearch | htmresearch/frameworks/layers/sequence_object_machine.py | SequenceObjectMachine._generateFeatures | def _generateFeatures(self):
"""
Generates a pool of features to be used for the experiments.
For each index, numColumns SDR's are created, as locations for the same
feature should be different for each column.
"""
size = self.sensorInputSize
bits = self.numInputBits
self.features = []
for _ in xrange(self.numColumns):
self.features.append(
[self._generatePattern(bits, size) for _ in xrange(self.numFeatures)]
) | python | def _generateFeatures(self):
"""
Generates a pool of features to be used for the experiments.
For each index, numColumns SDR's are created, as locations for the same
feature should be different for each column.
"""
size = self.sensorInputSize
bits = self.numInputBits
self.features = []
for _ in xrange(self.numColumns):
self.features.append(
[self._generatePattern(bits, size) for _ in xrange(self.numFeatures)]
) | [
"def",
"_generateFeatures",
"(",
"self",
")",
":",
"size",
"=",
"self",
".",
"sensorInputSize",
"bits",
"=",
"self",
".",
"numInputBits",
"self",
".",
"features",
"=",
"[",
"]",
"for",
"_",
"in",
"xrange",
"(",
"self",
".",
"numColumns",
")",
":",
"sel... | Generates a pool of features to be used for the experiments.
For each index, numColumns SDR's are created, as locations for the same
feature should be different for each column. | [
"Generates",
"a",
"pool",
"of",
"features",
"to",
"be",
"used",
"for",
"the",
"experiments",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/sequence_object_machine.py#L268-L282 | train | 198,639 |
numenta/htmresearch | projects/l2_pooling/single_column_sp.py | createThreeObjects | def createThreeObjects():
"""
Helper function that creates a set of three objects used for basic
experiments.
:return: (list(list(tuple)) List of lists of feature / location pairs.
"""
objectA = zip(range(10), range(10))
objectB = [(0, 0), (2, 2), (1, 1), (1, 4), (4, 2), (4, 1)]
objectC = [(0, 0), (1, 1), (3, 1), (0, 1)]
return [objectA, objectB, objectC] | python | def createThreeObjects():
"""
Helper function that creates a set of three objects used for basic
experiments.
:return: (list(list(tuple)) List of lists of feature / location pairs.
"""
objectA = zip(range(10), range(10))
objectB = [(0, 0), (2, 2), (1, 1), (1, 4), (4, 2), (4, 1)]
objectC = [(0, 0), (1, 1), (3, 1), (0, 1)]
return [objectA, objectB, objectC] | [
"def",
"createThreeObjects",
"(",
")",
":",
"objectA",
"=",
"zip",
"(",
"range",
"(",
"10",
")",
",",
"range",
"(",
"10",
")",
")",
"objectB",
"=",
"[",
"(",
"0",
",",
"0",
")",
",",
"(",
"2",
",",
"2",
")",
",",
"(",
"1",
",",
"1",
")",
... | Helper function that creates a set of three objects used for basic
experiments.
:return: (list(list(tuple)) List of lists of feature / location pairs. | [
"Helper",
"function",
"that",
"creates",
"a",
"set",
"of",
"three",
"objects",
"used",
"for",
"basic",
"experiments",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/single_column_sp.py#L32-L42 | train | 198,640 |
numenta/htmresearch | projects/l2_pooling/single_column_sp.py | runSharedFeatures | def runSharedFeatures(noiseLevel=None, profile=False):
"""
Runs a simple experiment where three objects share a number of location,
feature pairs.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
exp = L4L2Experiment(
"shared_features",
enableLateralSP=True,
enableFeedForwardSP=True
)
pairs = createThreeObjects()
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024
)
for object in pairs:
objects.addObject(object)
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile()
inferConfig = {
"numSteps": 10,
"noiseLevel": noiseLevel,
"pairs": {
0: zip(range(10), range(10))
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
) | python | def runSharedFeatures(noiseLevel=None, profile=False):
"""
Runs a simple experiment where three objects share a number of location,
feature pairs.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
exp = L4L2Experiment(
"shared_features",
enableLateralSP=True,
enableFeedForwardSP=True
)
pairs = createThreeObjects()
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024
)
for object in pairs:
objects.addObject(object)
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile()
inferConfig = {
"numSteps": 10,
"noiseLevel": noiseLevel,
"pairs": {
0: zip(range(10), range(10))
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
) | [
"def",
"runSharedFeatures",
"(",
"noiseLevel",
"=",
"None",
",",
"profile",
"=",
"False",
")",
":",
"exp",
"=",
"L4L2Experiment",
"(",
"\"shared_features\"",
",",
"enableLateralSP",
"=",
"True",
",",
"enableFeedForwardSP",
"=",
"True",
")",
"pairs",
"=",
"crea... | Runs a simple experiment where three objects share a number of location,
feature pairs.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference | [
"Runs",
"a",
"simple",
"experiment",
"where",
"three",
"objects",
"share",
"a",
"number",
"of",
"location",
"feature",
"pairs",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/single_column_sp.py#L46-L96 | train | 198,641 |
numenta/htmresearch | projects/l2_pooling/single_column_sp.py | runStretchExperiment | def runStretchExperiment(numObjects=25):
"""
Generates a lot of random objects to profile the network.
Parameters:
----------------------------
@param numObjects (int)
Number of objects to create and learn.
"""
exp = L4L2Experiment(
"profiling_experiment",
enableLateralSP = True,
enableFeedForwardSP=True
)
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024
)
objects.createRandomObjects(numObjects=numObjects, numPoints=10)
exp.learnObjects(objects.provideObjectsToLearn())
exp.printProfile()
inferConfig = {
"numSteps": len(objects[0]),
"pairs": {
0: objects[0]
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0)
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"]
) | python | def runStretchExperiment(numObjects=25):
"""
Generates a lot of random objects to profile the network.
Parameters:
----------------------------
@param numObjects (int)
Number of objects to create and learn.
"""
exp = L4L2Experiment(
"profiling_experiment",
enableLateralSP = True,
enableFeedForwardSP=True
)
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024
)
objects.createRandomObjects(numObjects=numObjects, numPoints=10)
exp.learnObjects(objects.provideObjectsToLearn())
exp.printProfile()
inferConfig = {
"numSteps": len(objects[0]),
"pairs": {
0: objects[0]
}
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=0)
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"]
) | [
"def",
"runStretchExperiment",
"(",
"numObjects",
"=",
"25",
")",
":",
"exp",
"=",
"L4L2Experiment",
"(",
"\"profiling_experiment\"",
",",
"enableLateralSP",
"=",
"True",
",",
"enableFeedForwardSP",
"=",
"True",
")",
"objects",
"=",
"createObjectMachine",
"(",
"ma... | Generates a lot of random objects to profile the network.
Parameters:
----------------------------
@param numObjects (int)
Number of objects to create and learn. | [
"Generates",
"a",
"lot",
"of",
"random",
"objects",
"to",
"profile",
"the",
"network",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/single_column_sp.py#L161-L201 | train | 198,642 |
numenta/htmresearch | htmresearch/frameworks/layers/physical_object_base.py | PhysicalObject.plot | def plot(self, numPoints=100):
"""
Plots the object in a 3D scatter.
This method should be overriden when possible. This default behavior simply
samples numPoints points from the object and plots them in a 3d scatter.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for feature in self._FEATURES:
for _ in xrange(numPoints):
x, y, z = tuple(self.sampleLocationFromFeature(feature))
ax.scatter(x, y, z, marker=".")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title("{}".format(self))
return fig, ax | python | def plot(self, numPoints=100):
"""
Plots the object in a 3D scatter.
This method should be overriden when possible. This default behavior simply
samples numPoints points from the object and plots them in a 3d scatter.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for feature in self._FEATURES:
for _ in xrange(numPoints):
x, y, z = tuple(self.sampleLocationFromFeature(feature))
ax.scatter(x, y, z, marker=".")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title("{}".format(self))
return fig, ax | [
"def",
"plot",
"(",
"self",
",",
"numPoints",
"=",
"100",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
",",
"projection",
"=",
"'3d'",
")",
"for",
"feature",
"in",
"self",
".",
"_FEATURES",... | Plots the object in a 3D scatter.
This method should be overriden when possible. This default behavior simply
samples numPoints points from the object and plots them in a 3d scatter. | [
"Plots",
"the",
"object",
"in",
"a",
"3D",
"scatter",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_object_base.py#L124-L145 | train | 198,643 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | printSequence | def printSequence(x, formatString="%d"):
"""
Compact print a list or numpy array.
"""
numElements = len(x)
s = ""
for j in range(numElements):
s += formatString % x[j]
print s | python | def printSequence(x, formatString="%d"):
"""
Compact print a list or numpy array.
"""
numElements = len(x)
s = ""
for j in range(numElements):
s += formatString % x[j]
print s | [
"def",
"printSequence",
"(",
"x",
",",
"formatString",
"=",
"\"%d\"",
")",
":",
"numElements",
"=",
"len",
"(",
"x",
")",
"s",
"=",
"\"\"",
"for",
"j",
"in",
"range",
"(",
"numElements",
")",
":",
"s",
"+=",
"formatString",
"%",
"x",
"[",
"j",
"]",... | Compact print a list or numpy array. | [
"Compact",
"print",
"a",
"list",
"or",
"numpy",
"array",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L46-L54 | train | 198,644 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | printSequences | def printSequences(x, formatString="%d"):
"""
Print a bunch of sequences stored in a 2D numpy array.
"""
[seqLen, numElements] = x.shape
for i in range(seqLen):
s = ""
for j in range(numElements):
s += formatString % x[i][j]
print s | python | def printSequences(x, formatString="%d"):
"""
Print a bunch of sequences stored in a 2D numpy array.
"""
[seqLen, numElements] = x.shape
for i in range(seqLen):
s = ""
for j in range(numElements):
s += formatString % x[i][j]
print s | [
"def",
"printSequences",
"(",
"x",
",",
"formatString",
"=",
"\"%d\"",
")",
":",
"[",
"seqLen",
",",
"numElements",
"]",
"=",
"x",
".",
"shape",
"for",
"i",
"in",
"range",
"(",
"seqLen",
")",
":",
"s",
"=",
"\"\"",
"for",
"j",
"in",
"range",
"(",
... | Print a bunch of sequences stored in a 2D numpy array. | [
"Print",
"a",
"bunch",
"of",
"sequences",
"stored",
"in",
"a",
"2D",
"numpy",
"array",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L56-L65 | train | 198,645 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | SMSequences.initialize | def initialize(self, useRandomEncoder):
"""
Initialize the various data structures.
"""
self.setRandomSeed(self.seed)
self.dim = numpy.shape(self.spatialConfig)[-1]
self.spatialMap = dict( zip( map(tuple, list(self.spatialConfig)),
self.sensoryInputElements))
self.lengthMotorInput1D = (2*self.maxDisplacement + 1) * \
self.numActiveBitsMotorInput
uniqueSensoryElements = list(set(self.sensoryInputElementsPool))
if useRandomEncoder:
self.sensoryEncoder = SDRCategoryEncoder(n=1024,
w=self.numActiveBitsSensoryInput,
categoryList=uniqueSensoryElements,
forced=True)
self.lengthSensoryInput = self.sensoryEncoder.getWidth()
else:
self.lengthSensoryInput = (len(self.sensoryInputElementsPool)+1) * \
self.numActiveBitsSensoryInput
self.sensoryEncoder = CategoryEncoder(w=self.numActiveBitsSensoryInput,
categoryList=uniqueSensoryElements, forced=True)
motorEncoder1D = ScalarEncoder(n=self.lengthMotorInput1D,
w=self.numActiveBitsMotorInput,
minval=-self.maxDisplacement,
maxval=self.maxDisplacement,
clipInput=True,
forced=True)
self.motorEncoder = VectorEncoder(length=self.dim, encoder=motorEncoder1D) | python | def initialize(self, useRandomEncoder):
"""
Initialize the various data structures.
"""
self.setRandomSeed(self.seed)
self.dim = numpy.shape(self.spatialConfig)[-1]
self.spatialMap = dict( zip( map(tuple, list(self.spatialConfig)),
self.sensoryInputElements))
self.lengthMotorInput1D = (2*self.maxDisplacement + 1) * \
self.numActiveBitsMotorInput
uniqueSensoryElements = list(set(self.sensoryInputElementsPool))
if useRandomEncoder:
self.sensoryEncoder = SDRCategoryEncoder(n=1024,
w=self.numActiveBitsSensoryInput,
categoryList=uniqueSensoryElements,
forced=True)
self.lengthSensoryInput = self.sensoryEncoder.getWidth()
else:
self.lengthSensoryInput = (len(self.sensoryInputElementsPool)+1) * \
self.numActiveBitsSensoryInput
self.sensoryEncoder = CategoryEncoder(w=self.numActiveBitsSensoryInput,
categoryList=uniqueSensoryElements, forced=True)
motorEncoder1D = ScalarEncoder(n=self.lengthMotorInput1D,
w=self.numActiveBitsMotorInput,
minval=-self.maxDisplacement,
maxval=self.maxDisplacement,
clipInput=True,
forced=True)
self.motorEncoder = VectorEncoder(length=self.dim, encoder=motorEncoder1D) | [
"def",
"initialize",
"(",
"self",
",",
"useRandomEncoder",
")",
":",
"self",
".",
"setRandomSeed",
"(",
"self",
".",
"seed",
")",
"self",
".",
"dim",
"=",
"numpy",
".",
"shape",
"(",
"self",
".",
"spatialConfig",
")",
"[",
"-",
"1",
"]",
"self",
".",... | Initialize the various data structures. | [
"Initialize",
"the",
"various",
"data",
"structures",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L142-L179 | train | 198,646 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | SMSequences.generateSensorimotorSequence | def generateSensorimotorSequence(self, sequenceLength):
"""
Generate sensorimotor sequences of length sequenceLength.
@param sequenceLength (int)
Length of the sensorimotor sequence.
@return (tuple) Contains:
sensorySequence (list)
Encoded sensory input for whole sequence.
motorSequence (list)
Encoded motor input for whole sequence.
sensorimotorSequence (list)
Encoder sensorimotor input for whole sequence. This is useful
when you want to give external input to temporal memory.
"""
motorSequence = []
sensorySequence = []
sensorimotorSequence = []
currentEyeLoc = self.nupicRandomChoice(self.spatialConfig)
for i in xrange(sequenceLength):
currentSensoryInput = self.spatialMap[tuple(currentEyeLoc)]
nextEyeLoc, currentEyeV = self.getNextEyeLocation(currentEyeLoc)
if self.verbosity:
print "sensory input = ", currentSensoryInput, \
"eye location = ", currentEyeLoc, \
" motor command = ", currentEyeV
sensoryInput = self.encodeSensoryInput(currentSensoryInput)
motorInput = self.encodeMotorInput(list(currentEyeV))
sensorimotorInput = numpy.concatenate((sensoryInput, motorInput))
sensorySequence.append(sensoryInput)
motorSequence.append(motorInput)
sensorimotorSequence.append(sensorimotorInput)
currentEyeLoc = nextEyeLoc
return (sensorySequence, motorSequence, sensorimotorSequence) | python | def generateSensorimotorSequence(self, sequenceLength):
"""
Generate sensorimotor sequences of length sequenceLength.
@param sequenceLength (int)
Length of the sensorimotor sequence.
@return (tuple) Contains:
sensorySequence (list)
Encoded sensory input for whole sequence.
motorSequence (list)
Encoded motor input for whole sequence.
sensorimotorSequence (list)
Encoder sensorimotor input for whole sequence. This is useful
when you want to give external input to temporal memory.
"""
motorSequence = []
sensorySequence = []
sensorimotorSequence = []
currentEyeLoc = self.nupicRandomChoice(self.spatialConfig)
for i in xrange(sequenceLength):
currentSensoryInput = self.spatialMap[tuple(currentEyeLoc)]
nextEyeLoc, currentEyeV = self.getNextEyeLocation(currentEyeLoc)
if self.verbosity:
print "sensory input = ", currentSensoryInput, \
"eye location = ", currentEyeLoc, \
" motor command = ", currentEyeV
sensoryInput = self.encodeSensoryInput(currentSensoryInput)
motorInput = self.encodeMotorInput(list(currentEyeV))
sensorimotorInput = numpy.concatenate((sensoryInput, motorInput))
sensorySequence.append(sensoryInput)
motorSequence.append(motorInput)
sensorimotorSequence.append(sensorimotorInput)
currentEyeLoc = nextEyeLoc
return (sensorySequence, motorSequence, sensorimotorSequence) | [
"def",
"generateSensorimotorSequence",
"(",
"self",
",",
"sequenceLength",
")",
":",
"motorSequence",
"=",
"[",
"]",
"sensorySequence",
"=",
"[",
"]",
"sensorimotorSequence",
"=",
"[",
"]",
"currentEyeLoc",
"=",
"self",
".",
"nupicRandomChoice",
"(",
"self",
"."... | Generate sensorimotor sequences of length sequenceLength.
@param sequenceLength (int)
Length of the sensorimotor sequence.
@return (tuple) Contains:
sensorySequence (list)
Encoded sensory input for whole sequence.
motorSequence (list)
Encoded motor input for whole sequence.
sensorimotorSequence (list)
Encoder sensorimotor input for whole sequence. This is useful
when you want to give external input to temporal memory. | [
"Generate",
"sensorimotor",
"sequences",
"of",
"length",
"sequenceLength",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L182-L226 | train | 198,647 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | SMSequences.getNextEyeLocation | def getNextEyeLocation(self, currentEyeLoc):
"""
Generate next eye location based on current eye location.
@param currentEyeLoc (numpy.array)
Current coordinate describing the eye location in the world.
@return (tuple) Contains:
nextEyeLoc (numpy.array)
Coordinate of the next eye location.
eyeDiff (numpy.array)
Vector describing change from currentEyeLoc to nextEyeLoc.
"""
possibleEyeLocs = []
for loc in self.spatialConfig:
shift = abs(max(loc - currentEyeLoc))
if self.minDisplacement <= shift <= self.maxDisplacement:
possibleEyeLocs.append(loc)
nextEyeLoc = self.nupicRandomChoice(possibleEyeLocs)
eyeDiff = nextEyeLoc - currentEyeLoc
return nextEyeLoc, eyeDiff | python | def getNextEyeLocation(self, currentEyeLoc):
"""
Generate next eye location based on current eye location.
@param currentEyeLoc (numpy.array)
Current coordinate describing the eye location in the world.
@return (tuple) Contains:
nextEyeLoc (numpy.array)
Coordinate of the next eye location.
eyeDiff (numpy.array)
Vector describing change from currentEyeLoc to nextEyeLoc.
"""
possibleEyeLocs = []
for loc in self.spatialConfig:
shift = abs(max(loc - currentEyeLoc))
if self.minDisplacement <= shift <= self.maxDisplacement:
possibleEyeLocs.append(loc)
nextEyeLoc = self.nupicRandomChoice(possibleEyeLocs)
eyeDiff = nextEyeLoc - currentEyeLoc
return nextEyeLoc, eyeDiff | [
"def",
"getNextEyeLocation",
"(",
"self",
",",
"currentEyeLoc",
")",
":",
"possibleEyeLocs",
"=",
"[",
"]",
"for",
"loc",
"in",
"self",
".",
"spatialConfig",
":",
"shift",
"=",
"abs",
"(",
"max",
"(",
"loc",
"-",
"currentEyeLoc",
")",
")",
"if",
"self",
... | Generate next eye location based on current eye location.
@param currentEyeLoc (numpy.array)
Current coordinate describing the eye location in the world.
@return (tuple) Contains:
nextEyeLoc (numpy.array)
Coordinate of the next eye location.
eyeDiff (numpy.array)
Vector describing change from currentEyeLoc to nextEyeLoc. | [
"Generate",
"next",
"eye",
"location",
"based",
"on",
"current",
"eye",
"location",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L279-L303 | train | 198,648 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | SMSequences.setRandomSeed | def setRandomSeed(self, seed):
"""
Reset the nupic random generator. This is necessary to reset random seed to
generate new sequences.
@param seed (int)
Seed for nupic.bindings.Random.
"""
self.seed = seed
self._random = Random()
self._random.setSeed(seed) | python | def setRandomSeed(self, seed):
"""
Reset the nupic random generator. This is necessary to reset random seed to
generate new sequences.
@param seed (int)
Seed for nupic.bindings.Random.
"""
self.seed = seed
self._random = Random()
self._random.setSeed(seed) | [
"def",
"setRandomSeed",
"(",
"self",
",",
"seed",
")",
":",
"self",
".",
"seed",
"=",
"seed",
"self",
".",
"_random",
"=",
"Random",
"(",
")",
"self",
".",
"_random",
".",
"setSeed",
"(",
"seed",
")"
] | Reset the nupic random generator. This is necessary to reset random seed to
generate new sequences.
@param seed (int)
Seed for nupic.bindings.Random. | [
"Reset",
"the",
"nupic",
"random",
"generator",
".",
"This",
"is",
"necessary",
"to",
"reset",
"random",
"seed",
"to",
"generate",
"new",
"sequences",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L306-L316 | train | 198,649 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | SMSequences.encodeMotorInput | def encodeMotorInput(self, motorInput):
"""
Encode motor command to bit vector.
@param motorInput (1D numpy.array)
Motor command to be encoded.
@return (1D numpy.array)
Encoded motor command.
"""
if not hasattr(motorInput, "__iter__"):
motorInput = list([motorInput])
return self.motorEncoder.encode(motorInput) | python | def encodeMotorInput(self, motorInput):
"""
Encode motor command to bit vector.
@param motorInput (1D numpy.array)
Motor command to be encoded.
@return (1D numpy.array)
Encoded motor command.
"""
if not hasattr(motorInput, "__iter__"):
motorInput = list([motorInput])
return self.motorEncoder.encode(motorInput) | [
"def",
"encodeMotorInput",
"(",
"self",
",",
"motorInput",
")",
":",
"if",
"not",
"hasattr",
"(",
"motorInput",
",",
"\"__iter__\"",
")",
":",
"motorInput",
"=",
"list",
"(",
"[",
"motorInput",
"]",
")",
"return",
"self",
".",
"motorEncoder",
".",
"encode"... | Encode motor command to bit vector.
@param motorInput (1D numpy.array)
Motor command to be encoded.
@return (1D numpy.array)
Encoded motor command. | [
"Encode",
"motor",
"command",
"to",
"bit",
"vector",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L333-L346 | train | 198,650 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | SMSequences.decodeMotorInput | def decodeMotorInput(self, motorInputPattern):
"""
Decode motor command from bit vector.
@param motorInputPattern (1D numpy.array)
Encoded motor command.
@return (1D numpy.array)
Decoded motor command.
"""
key = self.motorEncoder.decode(motorInputPattern)[0].keys()[0]
motorCommand = self.motorEncoder.decode(motorInputPattern)[0][key][1][0]
return motorCommand | python | def decodeMotorInput(self, motorInputPattern):
"""
Decode motor command from bit vector.
@param motorInputPattern (1D numpy.array)
Encoded motor command.
@return (1D numpy.array)
Decoded motor command.
"""
key = self.motorEncoder.decode(motorInputPattern)[0].keys()[0]
motorCommand = self.motorEncoder.decode(motorInputPattern)[0][key][1][0]
return motorCommand | [
"def",
"decodeMotorInput",
"(",
"self",
",",
"motorInputPattern",
")",
":",
"key",
"=",
"self",
".",
"motorEncoder",
".",
"decode",
"(",
"motorInputPattern",
")",
"[",
"0",
"]",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"motorCommand",
"=",
"self",
".",
"m... | Decode motor command from bit vector.
@param motorInputPattern (1D numpy.array)
Encoded motor command.
@return (1D numpy.array)
Decoded motor command. | [
"Decode",
"motor",
"command",
"from",
"bit",
"vector",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L349-L362 | train | 198,651 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | SMSequences.printSensoryCodingScheme | def printSensoryCodingScheme(self):
"""
Print sensory inputs along with their encoded versions.
"""
print "\nsensory coding scheme: "
for loc in self.spatialConfig:
sensoryElement = self.spatialMap[tuple(loc)]
print sensoryElement, "%s : " % loc,
printSequence(self.encodeSensoryInput(sensoryElement)) | python | def printSensoryCodingScheme(self):
"""
Print sensory inputs along with their encoded versions.
"""
print "\nsensory coding scheme: "
for loc in self.spatialConfig:
sensoryElement = self.spatialMap[tuple(loc)]
print sensoryElement, "%s : " % loc,
printSequence(self.encodeSensoryInput(sensoryElement)) | [
"def",
"printSensoryCodingScheme",
"(",
"self",
")",
":",
"print",
"\"\\nsensory coding scheme: \"",
"for",
"loc",
"in",
"self",
".",
"spatialConfig",
":",
"sensoryElement",
"=",
"self",
".",
"spatialMap",
"[",
"tuple",
"(",
"loc",
")",
"]",
"print",
"sensoryEle... | Print sensory inputs along with their encoded versions. | [
"Print",
"sensory",
"inputs",
"along",
"with",
"their",
"encoded",
"versions",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L391-L399 | train | 198,652 |
numenta/htmresearch | htmresearch/data/sm_sequences.py | SMSequences.build | def build(self, n, vec):
"""
Recursive function to help print motor coding scheme.
"""
for i in range(-self.maxDisplacement, self.maxDisplacement+1):
next = vec + [i]
if n == 1:
print '{:>5}\t'.format(next), " = ",
printSequence(self.encodeMotorInput(next))
else:
self.build(n-1, next) | python | def build(self, n, vec):
"""
Recursive function to help print motor coding scheme.
"""
for i in range(-self.maxDisplacement, self.maxDisplacement+1):
next = vec + [i]
if n == 1:
print '{:>5}\t'.format(next), " = ",
printSequence(self.encodeMotorInput(next))
else:
self.build(n-1, next) | [
"def",
"build",
"(",
"self",
",",
"n",
",",
"vec",
")",
":",
"for",
"i",
"in",
"range",
"(",
"-",
"self",
".",
"maxDisplacement",
",",
"self",
".",
"maxDisplacement",
"+",
"1",
")",
":",
"next",
"=",
"vec",
"+",
"[",
"i",
"]",
"if",
"n",
"==",
... | Recursive function to help print motor coding scheme. | [
"Recursive",
"function",
"to",
"help",
"print",
"motor",
"coding",
"scheme",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L411-L421 | train | 198,653 |
numenta/htmresearch | htmresearch/frameworks/layers/combined_sequence_experiment.py | L4TMExperiment.getDefaultTMParams | def getDefaultTMParams(self, inputSize, numInputBits):
"""
Returns a good default set of parameters to use in the TM region.
"""
sampleSize = int(1.5 * numInputBits)
if numInputBits == 20:
activationThreshold = 18
minThreshold = 18
elif numInputBits == 10:
activationThreshold = 8
minThreshold = 8
else:
activationThreshold = int(numInputBits * .6)
minThreshold = activationThreshold
return {
"columnCount": inputSize,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.41,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.03,
"minThreshold": minThreshold,
"basalPredictedSegmentDecrement": 0.003,
"apicalPredictedSegmentDecrement": 0.0,
"reducedBasalThreshold": int(activationThreshold*0.6),
"activationThreshold": activationThreshold,
"sampleSize": sampleSize,
"implementation": "ApicalTiebreak",
"seed": self.seed
} | python | def getDefaultTMParams(self, inputSize, numInputBits):
"""
Returns a good default set of parameters to use in the TM region.
"""
sampleSize = int(1.5 * numInputBits)
if numInputBits == 20:
activationThreshold = 18
minThreshold = 18
elif numInputBits == 10:
activationThreshold = 8
minThreshold = 8
else:
activationThreshold = int(numInputBits * .6)
minThreshold = activationThreshold
return {
"columnCount": inputSize,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.41,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.03,
"minThreshold": minThreshold,
"basalPredictedSegmentDecrement": 0.003,
"apicalPredictedSegmentDecrement": 0.0,
"reducedBasalThreshold": int(activationThreshold*0.6),
"activationThreshold": activationThreshold,
"sampleSize": sampleSize,
"implementation": "ApicalTiebreak",
"seed": self.seed
} | [
"def",
"getDefaultTMParams",
"(",
"self",
",",
"inputSize",
",",
"numInputBits",
")",
":",
"sampleSize",
"=",
"int",
"(",
"1.5",
"*",
"numInputBits",
")",
"if",
"numInputBits",
"==",
"20",
":",
"activationThreshold",
"=",
"18",
"minThreshold",
"=",
"18",
"el... | Returns a good default set of parameters to use in the TM region. | [
"Returns",
"a",
"good",
"default",
"set",
"of",
"parameters",
"to",
"use",
"in",
"the",
"TM",
"region",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/combined_sequence_experiment.py#L178-L211 | train | 198,654 |
numenta/htmresearch | projects/energy_based_pooling/energy_based_models/utils.py | create_movie | def create_movie(fig, update_figure, filename, title, fps=15, dpi=100):
"""Helps us to create a movie."""
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=title)
writer = FFMpegWriter(fps=fps, metadata=metadata)
with writer.saving(fig, filename, dpi):
t = 0
while True:
if update_figure(t):
writer.grab_frame()
t += 1
else:
break | python | def create_movie(fig, update_figure, filename, title, fps=15, dpi=100):
"""Helps us to create a movie."""
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=title)
writer = FFMpegWriter(fps=fps, metadata=metadata)
with writer.saving(fig, filename, dpi):
t = 0
while True:
if update_figure(t):
writer.grab_frame()
t += 1
else:
break | [
"def",
"create_movie",
"(",
"fig",
",",
"update_figure",
",",
"filename",
",",
"title",
",",
"fps",
"=",
"15",
",",
"dpi",
"=",
"100",
")",
":",
"FFMpegWriter",
"=",
"manimation",
".",
"writers",
"[",
"'ffmpeg'",
"]",
"metadata",
"=",
"dict",
"(",
"tit... | Helps us to create a movie. | [
"Helps",
"us",
"to",
"create",
"a",
"movie",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/energy_based_pooling/energy_based_models/utils.py#L61-L74 | train | 198,655 |
numenta/htmresearch | projects/sdr_paper/run_simulations.py | createExperimentArgs | def createExperimentArgs():
"""Run the basic probability of false positives experiment."""
experimentArguments = []
# for n in [300, 500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300,
# 2500, 2700, 2900, 3100, 3300, 3500, 3700, 3900]:
for n in [1500, 1700, 1900, 2100]:
for a in [128]:
# Some parameter combinations are just not worth running!
if ( a==64 and n<=1500 ) or ( a==128 and n<= 1900 ) or ( a==256 ):
experimentArguments.append(
("./sdr_calculations2", "results_errorbars/temp_"+str(n)+"_"+str(a)+".csv",
"200000", str(n), str(a), "0"),
)
return experimentArguments | python | def createExperimentArgs():
"""Run the basic probability of false positives experiment."""
experimentArguments = []
# for n in [300, 500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300,
# 2500, 2700, 2900, 3100, 3300, 3500, 3700, 3900]:
for n in [1500, 1700, 1900, 2100]:
for a in [128]:
# Some parameter combinations are just not worth running!
if ( a==64 and n<=1500 ) or ( a==128 and n<= 1900 ) or ( a==256 ):
experimentArguments.append(
("./sdr_calculations2", "results_errorbars/temp_"+str(n)+"_"+str(a)+".csv",
"200000", str(n), str(a), "0"),
)
return experimentArguments | [
"def",
"createExperimentArgs",
"(",
")",
":",
"experimentArguments",
"=",
"[",
"]",
"# for n in [300, 500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300,",
"# 2500, 2700, 2900, 3100, 3300, 3500, 3700, 3900]:",
"for",
"n",
"in",
"[",
"1500",
",",
"1700",
",",
"190... | Run the basic probability of false positives experiment. | [
"Run",
"the",
"basic",
"probability",
"of",
"false",
"positives",
"experiment",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/run_simulations.py#L31-L44 | train | 198,656 |
numenta/htmresearch | projects/sdr_paper/run_simulations.py | createNoiseExperimentArgs | def createNoiseExperimentArgs():
"""Run the probability of false negatives with noise experiment."""
experimentArguments = []
n = 6000
for a in [128]:
noisePct = 0.75
while noisePct <= 0.85:
noise = int(round(noisePct*a,0))
# Some parameter combinations are just not worth running!
experimentArguments.append(
("./sdr_calculations2",
"results_noise_10m/temp_"+str(n)+"_"+str(a)+"_"+str(noise)+"_30.csv",
"200000", str(n), str(a), str(noise))
)
noisePct += 0.05
return experimentArguments | python | def createNoiseExperimentArgs():
"""Run the probability of false negatives with noise experiment."""
experimentArguments = []
n = 6000
for a in [128]:
noisePct = 0.75
while noisePct <= 0.85:
noise = int(round(noisePct*a,0))
# Some parameter combinations are just not worth running!
experimentArguments.append(
("./sdr_calculations2",
"results_noise_10m/temp_"+str(n)+"_"+str(a)+"_"+str(noise)+"_30.csv",
"200000", str(n), str(a), str(noise))
)
noisePct += 0.05
return experimentArguments | [
"def",
"createNoiseExperimentArgs",
"(",
")",
":",
"experimentArguments",
"=",
"[",
"]",
"n",
"=",
"6000",
"for",
"a",
"in",
"[",
"128",
"]",
":",
"noisePct",
"=",
"0.75",
"while",
"noisePct",
"<=",
"0.85",
":",
"noise",
"=",
"int",
"(",
"round",
"(",
... | Run the probability of false negatives with noise experiment. | [
"Run",
"the",
"probability",
"of",
"false",
"negatives",
"with",
"noise",
"experiment",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/run_simulations.py#L47-L62 | train | 198,657 |
numenta/htmresearch | projects/location_layer/memorize_math_pooled_tm/generate_sdrs.py | generateMinicolumnSDRs | def generateMinicolumnSDRs(n, w, threshold):
"""
Wraps enumerateDistantSDRsBruteForce, caching its result on the filesystem.
"""
if not os.path.exists("sdrs"):
os.makedirs("sdrs")
filename = "sdrs/{}_{}_{}.json".format(n, w, threshold)
if len(glob.glob(filename)) > 0:
with open(filename, "r") as fIn:
sdrs = json.load(fIn)
else:
begin = time.time()
sdrs = enumerateDistantSDRsBruteForce(n, w, threshold)
end = time.time()
with open(filename, "w") as fOut:
json.dump([sdr.tolist() for sdr in sdrs], fOut)
print("Saved", filename)
print("Elapsed time: {:.2f} seconds".format(end - begin))
return sdrs | python | def generateMinicolumnSDRs(n, w, threshold):
"""
Wraps enumerateDistantSDRsBruteForce, caching its result on the filesystem.
"""
if not os.path.exists("sdrs"):
os.makedirs("sdrs")
filename = "sdrs/{}_{}_{}.json".format(n, w, threshold)
if len(glob.glob(filename)) > 0:
with open(filename, "r") as fIn:
sdrs = json.load(fIn)
else:
begin = time.time()
sdrs = enumerateDistantSDRsBruteForce(n, w, threshold)
end = time.time()
with open(filename, "w") as fOut:
json.dump([sdr.tolist() for sdr in sdrs], fOut)
print("Saved", filename)
print("Elapsed time: {:.2f} seconds".format(end - begin))
return sdrs | [
"def",
"generateMinicolumnSDRs",
"(",
"n",
",",
"w",
",",
"threshold",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"\"sdrs\"",
")",
":",
"os",
".",
"makedirs",
"(",
"\"sdrs\"",
")",
"filename",
"=",
"\"sdrs/{}_{}_{}.json\"",
".",
"forma... | Wraps enumerateDistantSDRsBruteForce, caching its result on the filesystem. | [
"Wraps",
"enumerateDistantSDRsBruteForce",
"caching",
"its",
"result",
"on",
"the",
"filesystem",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/location_layer/memorize_math_pooled_tm/generate_sdrs.py#L43-L64 | train | 198,658 |
numenta/htmresearch | projects/location_layer/memorize_math_pooled_tm/generate_sdrs.py | createEvenlySpreadSDRs | def createEvenlySpreadSDRs(numSDRs, n, w):
"""
Return a set of ~random SDRs that use every available bit
an equal number of times, +- 1.
"""
assert w <= n
available = np.arange(n)
np.random.shuffle(available)
SDRs = []
for _ in xrange(numSDRs):
selected = available[:w]
available = available[w:]
if available.size == 0:
remainderSelected = np.random.choice(
np.setdiff1d(np.arange(n), selected),
size=(w - selected.size),
replace= False)
selected = np.append(selected, remainderSelected)
available = np.setdiff1d(np.arange(n), remainderSelected)
np.random.shuffle(available)
selected.sort()
SDRs.append(selected)
return SDRs | python | def createEvenlySpreadSDRs(numSDRs, n, w):
"""
Return a set of ~random SDRs that use every available bit
an equal number of times, +- 1.
"""
assert w <= n
available = np.arange(n)
np.random.shuffle(available)
SDRs = []
for _ in xrange(numSDRs):
selected = available[:w]
available = available[w:]
if available.size == 0:
remainderSelected = np.random.choice(
np.setdiff1d(np.arange(n), selected),
size=(w - selected.size),
replace= False)
selected = np.append(selected, remainderSelected)
available = np.setdiff1d(np.arange(n), remainderSelected)
np.random.shuffle(available)
selected.sort()
SDRs.append(selected)
return SDRs | [
"def",
"createEvenlySpreadSDRs",
"(",
"numSDRs",
",",
"n",
",",
"w",
")",
":",
"assert",
"w",
"<=",
"n",
"available",
"=",
"np",
".",
"arange",
"(",
"n",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"available",
")",
"SDRs",
"=",
"[",
"]",
"for",... | Return a set of ~random SDRs that use every available bit
an equal number of times, +- 1. | [
"Return",
"a",
"set",
"of",
"~random",
"SDRs",
"that",
"use",
"every",
"available",
"bit",
"an",
"equal",
"number",
"of",
"times",
"+",
"-",
"1",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/location_layer/memorize_math_pooled_tm/generate_sdrs.py#L67-L96 | train | 198,659 |
numenta/htmresearch | projects/location_layer/memorize_math_pooled_tm/generate_sdrs.py | carefullyCollideContexts | def carefullyCollideContexts(numContexts, numCells, numMinicolumns):
"""
Use a greedy algorithm to choose how each minicolumn should distribute
contexts between its cells.
@return (list of lists of lists of ints)
iContext integers for each cell, grouped by minicolumn. For example,
[[[1, 3], [2,4]],
[[1, 2]]]
would specify that cell 0 connects to location 1 and location 3, while cell
1 connects to locations 2 and 4, and cell 2 (in the second minicolumn)
connects to locations 1 and 2.
"""
minicolumns = []
for _ in xrange(numMinicolumns):
contextsForCell = [set() for _ in xrange(numCells)]
contexts = range(numContexts)
random.shuffle(contexts)
while len(contexts) > 0:
eligibleCells = range(len(contextsForCell))
while len(contexts) > 0 and len(eligibleCells) > 0:
candidateAdditions = [(context, cell)
for context in contexts
for cell in eligibleCells]
# How many new duplicate collisions will come from this addition?
#
# For every other context in on this cell, check how many times this
# pair occurs elsewhere.
badness = [sum(sum(1 if (context in otherCellContexts and
otherContext in otherCellContexts) else 0
for minicolumn in minicolumns
for otherCellContexts in minicolumn)
for otherContext in contextsForCell[cell])
for context, cell in candidateAdditions]
selectedContext, selectedCell = candidateAdditions[
badness.index(min(badness))]
contextsForCell[selectedCell].add(selectedContext)
eligibleCells.remove(selectedCell)
contexts.remove(selectedContext)
minicolumns.append(contextsForCell)
return minicolumns | python | def carefullyCollideContexts(numContexts, numCells, numMinicolumns):
"""
Use a greedy algorithm to choose how each minicolumn should distribute
contexts between its cells.
@return (list of lists of lists of ints)
iContext integers for each cell, grouped by minicolumn. For example,
[[[1, 3], [2,4]],
[[1, 2]]]
would specify that cell 0 connects to location 1 and location 3, while cell
1 connects to locations 2 and 4, and cell 2 (in the second minicolumn)
connects to locations 1 and 2.
"""
minicolumns = []
for _ in xrange(numMinicolumns):
contextsForCell = [set() for _ in xrange(numCells)]
contexts = range(numContexts)
random.shuffle(contexts)
while len(contexts) > 0:
eligibleCells = range(len(contextsForCell))
while len(contexts) > 0 and len(eligibleCells) > 0:
candidateAdditions = [(context, cell)
for context in contexts
for cell in eligibleCells]
# How many new duplicate collisions will come from this addition?
#
# For every other context in on this cell, check how many times this
# pair occurs elsewhere.
badness = [sum(sum(1 if (context in otherCellContexts and
otherContext in otherCellContexts) else 0
for minicolumn in minicolumns
for otherCellContexts in minicolumn)
for otherContext in contextsForCell[cell])
for context, cell in candidateAdditions]
selectedContext, selectedCell = candidateAdditions[
badness.index(min(badness))]
contextsForCell[selectedCell].add(selectedContext)
eligibleCells.remove(selectedCell)
contexts.remove(selectedContext)
minicolumns.append(contextsForCell)
return minicolumns | [
"def",
"carefullyCollideContexts",
"(",
"numContexts",
",",
"numCells",
",",
"numMinicolumns",
")",
":",
"minicolumns",
"=",
"[",
"]",
"for",
"_",
"in",
"xrange",
"(",
"numMinicolumns",
")",
":",
"contextsForCell",
"=",
"[",
"set",
"(",
")",
"for",
"_",
"i... | Use a greedy algorithm to choose how each minicolumn should distribute
contexts between its cells.
@return (list of lists of lists of ints)
iContext integers for each cell, grouped by minicolumn. For example,
[[[1, 3], [2,4]],
[[1, 2]]]
would specify that cell 0 connects to location 1 and location 3, while cell
1 connects to locations 2 and 4, and cell 2 (in the second minicolumn)
connects to locations 1 and 2. | [
"Use",
"a",
"greedy",
"algorithm",
"to",
"choose",
"how",
"each",
"minicolumn",
"should",
"distribute",
"contexts",
"between",
"its",
"cells",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/location_layer/memorize_math_pooled_tm/generate_sdrs.py#L99-L147 | train | 198,660 |
numenta/htmresearch | projects/nik/hello_ik.py | printSegmentForCell | def printSegmentForCell(tm, cell):
"""Print segment information for this cell"""
print "Segments for cell", cell, ":"
for seg in tm.basalConnections._cells[cell]._segments:
print " ",
synapses = seg._synapses
for s in synapses:
print "%d:%g" %(s.presynapticCell,s.permanence),
print | python | def printSegmentForCell(tm, cell):
"""Print segment information for this cell"""
print "Segments for cell", cell, ":"
for seg in tm.basalConnections._cells[cell]._segments:
print " ",
synapses = seg._synapses
for s in synapses:
print "%d:%g" %(s.presynapticCell,s.permanence),
print | [
"def",
"printSegmentForCell",
"(",
"tm",
",",
"cell",
")",
":",
"print",
"\"Segments for cell\"",
",",
"cell",
",",
"\":\"",
"for",
"seg",
"in",
"tm",
".",
"basalConnections",
".",
"_cells",
"[",
"cell",
"]",
".",
"_segments",
":",
"print",
"\" \"",
","... | Print segment information for this cell | [
"Print",
"segment",
"information",
"for",
"this",
"cell"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/nik/hello_ik.py#L36-L44 | train | 198,661 |
numenta/htmresearch | htmresearch/regions/RawValues.py | RawValues.compute | def compute(self, inputs, outputs):
"""
Get the next record from the queue and outputs it.
"""
if len(self.queue) > 0:
# Take the top element of the data queue
data = self.queue.pop()
else:
raise Exception("RawValues: No data: queue is empty ")
# Copy data into output vectors
outputs["resetOut"][0] = data["reset"]
outputs["dataOut"][:] = data["dataOut"] | python | def compute(self, inputs, outputs):
"""
Get the next record from the queue and outputs it.
"""
if len(self.queue) > 0:
# Take the top element of the data queue
data = self.queue.pop()
else:
raise Exception("RawValues: No data: queue is empty ")
# Copy data into output vectors
outputs["resetOut"][0] = data["reset"]
outputs["dataOut"][:] = data["dataOut"] | [
"def",
"compute",
"(",
"self",
",",
"inputs",
",",
"outputs",
")",
":",
"if",
"len",
"(",
"self",
".",
"queue",
")",
">",
"0",
":",
"# Take the top element of the data queue",
"data",
"=",
"self",
".",
"queue",
".",
"pop",
"(",
")",
"else",
":",
"raise... | Get the next record from the queue and outputs it. | [
"Get",
"the",
"next",
"record",
"from",
"the",
"queue",
"and",
"outputs",
"it",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/regions/RawValues.py#L80-L92 | train | 198,662 |
numenta/htmresearch | htmresearch/regions/RawValues.py | RawValues.addDataToQueue | def addDataToQueue(self, displacement, reset=False):
"""
Add the given displacement to the region's internal queue. Calls to compute
will cause items in the queue to be dequeued in FIFO order.
:param displacement: Two floats representing translation vector [dx, dy] to
be passed to the linked regions via 'dataOut'
:type displacement: list
:param reset: Reset flag to be passed to the linked regions via 'resetOut'
:type reset: bool
"""
self.queue.appendleft({
"dataOut": list(displacement),
"reset": bool(reset)
}) | python | def addDataToQueue(self, displacement, reset=False):
"""
Add the given displacement to the region's internal queue. Calls to compute
will cause items in the queue to be dequeued in FIFO order.
:param displacement: Two floats representing translation vector [dx, dy] to
be passed to the linked regions via 'dataOut'
:type displacement: list
:param reset: Reset flag to be passed to the linked regions via 'resetOut'
:type reset: bool
"""
self.queue.appendleft({
"dataOut": list(displacement),
"reset": bool(reset)
}) | [
"def",
"addDataToQueue",
"(",
"self",
",",
"displacement",
",",
"reset",
"=",
"False",
")",
":",
"self",
".",
"queue",
".",
"appendleft",
"(",
"{",
"\"dataOut\"",
":",
"list",
"(",
"displacement",
")",
",",
"\"reset\"",
":",
"bool",
"(",
"reset",
")",
... | Add the given displacement to the region's internal queue. Calls to compute
will cause items in the queue to be dequeued in FIFO order.
:param displacement: Two floats representing translation vector [dx, dy] to
be passed to the linked regions via 'dataOut'
:type displacement: list
:param reset: Reset flag to be passed to the linked regions via 'resetOut'
:type reset: bool | [
"Add",
"the",
"given",
"displacement",
"to",
"the",
"region",
"s",
"internal",
"queue",
".",
"Calls",
"to",
"compute",
"will",
"cause",
"items",
"in",
"the",
"queue",
"to",
"be",
"dequeued",
"in",
"FIFO",
"order",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/regions/RawValues.py#L94-L108 | train | 198,663 |
numenta/htmresearch | htmresearch/frameworks/union_temporal_pooling/union_temporal_pooler_experiment.py | UnionTemporalPoolerExperiment.runNetworkOnSequences | def runNetworkOnSequences(self, inputSequences, inputCategories, tmLearn=True,
upLearn=None, classifierLearn=False, verbosity=0,
progressInterval=None):
"""
Runs Union Temporal Pooler network on specified sequence.
@param inputSequences One or more sequences of input patterns.
Each should be terminated with None.
@param inputCategories A sequence of category representations
for each element in inputSequences
Each should be terminated with None.
@param tmLearn: (bool) Temporal Memory learning mode
@param upLearn: (None, bool) Union Temporal Pooler learning mode. If None,
Union Temporal Pooler will not be run.
@param classifierLearn: (bool) Classifier learning mode
@param progressInterval: (int) Interval of console progress updates
in terms of timesteps.
"""
currentTime = time.time()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
self.runNetworkOnPattern(sensorPattern,
tmLearn=tmLearn,
upLearn=upLearn,
sequenceLabel=inputCategory)
if classifierLearn and sensorPattern is not None:
unionSDR = self.up.getUnionSDR()
upCellCount = self.up.getColumnDimensions()
self.classifier.learn(unionSDR, inputCategory, isSparse=upCellCount)
if verbosity > 1:
pprint.pprint("{0} is category {1}".format(unionSDR, inputCategory))
if progressInterval is not None and i > 0 and i % progressInterval == 0:
elapsed = (time.time() - currentTime) / 60.0
print ("Ran {0} / {1} elements of sequence in "
"{2:0.2f} minutes.".format(i, len(inputSequences), elapsed))
currentTime = time.time()
print MonitorMixinBase.mmPrettyPrintMetrics(
self.tm.mmGetDefaultMetrics())
if verbosity >= 2:
traces = self.tm.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.tm.mmGetTraceResets())
if upLearn is not None:
traces = self.up.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.up.mmGetTraceResets())
print | python | def runNetworkOnSequences(self, inputSequences, inputCategories, tmLearn=True,
upLearn=None, classifierLearn=False, verbosity=0,
progressInterval=None):
"""
Runs Union Temporal Pooler network on specified sequence.
@param inputSequences One or more sequences of input patterns.
Each should be terminated with None.
@param inputCategories A sequence of category representations
for each element in inputSequences
Each should be terminated with None.
@param tmLearn: (bool) Temporal Memory learning mode
@param upLearn: (None, bool) Union Temporal Pooler learning mode. If None,
Union Temporal Pooler will not be run.
@param classifierLearn: (bool) Classifier learning mode
@param progressInterval: (int) Interval of console progress updates
in terms of timesteps.
"""
currentTime = time.time()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
self.runNetworkOnPattern(sensorPattern,
tmLearn=tmLearn,
upLearn=upLearn,
sequenceLabel=inputCategory)
if classifierLearn and sensorPattern is not None:
unionSDR = self.up.getUnionSDR()
upCellCount = self.up.getColumnDimensions()
self.classifier.learn(unionSDR, inputCategory, isSparse=upCellCount)
if verbosity > 1:
pprint.pprint("{0} is category {1}".format(unionSDR, inputCategory))
if progressInterval is not None and i > 0 and i % progressInterval == 0:
elapsed = (time.time() - currentTime) / 60.0
print ("Ran {0} / {1} elements of sequence in "
"{2:0.2f} minutes.".format(i, len(inputSequences), elapsed))
currentTime = time.time()
print MonitorMixinBase.mmPrettyPrintMetrics(
self.tm.mmGetDefaultMetrics())
if verbosity >= 2:
traces = self.tm.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.tm.mmGetTraceResets())
if upLearn is not None:
traces = self.up.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.up.mmGetTraceResets())
print | [
"def",
"runNetworkOnSequences",
"(",
"self",
",",
"inputSequences",
",",
"inputCategories",
",",
"tmLearn",
"=",
"True",
",",
"upLearn",
"=",
"None",
",",
"classifierLearn",
"=",
"False",
",",
"verbosity",
"=",
"0",
",",
"progressInterval",
"=",
"None",
")",
... | Runs Union Temporal Pooler network on specified sequence.
@param inputSequences One or more sequences of input patterns.
Each should be terminated with None.
@param inputCategories A sequence of category representations
for each element in inputSequences
Each should be terminated with None.
@param tmLearn: (bool) Temporal Memory learning mode
@param upLearn: (None, bool) Union Temporal Pooler learning mode. If None,
Union Temporal Pooler will not be run.
@param classifierLearn: (bool) Classifier learning mode
@param progressInterval: (int) Interval of console progress updates
in terms of timesteps. | [
"Runs",
"Union",
"Temporal",
"Pooler",
"network",
"on",
"specified",
"sequence",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/union_temporal_pooling/union_temporal_pooler_experiment.py#L134-L192 | train | 198,664 |
numenta/htmresearch | htmresearch/frameworks/union_temporal_pooling/union_temporal_pooler_experiment.py | UnionTemporalPoolerExperiment.getUnionTemporalPoolerInput | def getUnionTemporalPoolerInput(self):
"""
Gets the Union Temporal Pooler input from the Temporal Memory
"""
activeCells = numpy.zeros(self.tm.numberOfCells()).astype(realDType)
activeCells[list(self.tm.activeCellsIndices())] = 1
predictedActiveCells = numpy.zeros(self.tm.numberOfCells()).astype(
realDType)
predictedActiveCells[list(self.tm.predictedActiveCellsIndices())] = 1
burstingColumns = numpy.zeros(self.tm.numberOfColumns()).astype(realDType)
burstingColumns[list(self.tm.unpredictedActiveColumns)] = 1
return activeCells, predictedActiveCells, burstingColumns | python | def getUnionTemporalPoolerInput(self):
"""
Gets the Union Temporal Pooler input from the Temporal Memory
"""
activeCells = numpy.zeros(self.tm.numberOfCells()).astype(realDType)
activeCells[list(self.tm.activeCellsIndices())] = 1
predictedActiveCells = numpy.zeros(self.tm.numberOfCells()).astype(
realDType)
predictedActiveCells[list(self.tm.predictedActiveCellsIndices())] = 1
burstingColumns = numpy.zeros(self.tm.numberOfColumns()).astype(realDType)
burstingColumns[list(self.tm.unpredictedActiveColumns)] = 1
return activeCells, predictedActiveCells, burstingColumns | [
"def",
"getUnionTemporalPoolerInput",
"(",
"self",
")",
":",
"activeCells",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"tm",
".",
"numberOfCells",
"(",
")",
")",
".",
"astype",
"(",
"realDType",
")",
"activeCells",
"[",
"list",
"(",
"self",
".",
"tm",... | Gets the Union Temporal Pooler input from the Temporal Memory | [
"Gets",
"the",
"Union",
"Temporal",
"Pooler",
"input",
"from",
"the",
"Temporal",
"Memory"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/union_temporal_pooling/union_temporal_pooler_experiment.py#L213-L227 | train | 198,665 |
numenta/htmresearch | projects/energy_based_pooling/energy_based_models/energy_based_pooler.py | EnergyBasedPooler.compute | def compute(self, inputVector, learn, activeArray):
"""This method resembles the primary public method of the SpatialPooler class.
It takes a input vector and outputs the indices of the active columns. If 'learn'
is set to True, this method also performs weight updates and updates to the activity
statistics according to the respective methods implemented below."""
x = inputVector
y = self.encode(x)
active_units = np.where(y==1.)[0]
if learn:
self.update_statistics([y])
self.update_weights([x],[y])
activeArray[active_units] = 1.
return active_units | python | def compute(self, inputVector, learn, activeArray):
"""This method resembles the primary public method of the SpatialPooler class.
It takes a input vector and outputs the indices of the active columns. If 'learn'
is set to True, this method also performs weight updates and updates to the activity
statistics according to the respective methods implemented below."""
x = inputVector
y = self.encode(x)
active_units = np.where(y==1.)[0]
if learn:
self.update_statistics([y])
self.update_weights([x],[y])
activeArray[active_units] = 1.
return active_units | [
"def",
"compute",
"(",
"self",
",",
"inputVector",
",",
"learn",
",",
"activeArray",
")",
":",
"x",
"=",
"inputVector",
"y",
"=",
"self",
".",
"encode",
"(",
"x",
")",
"active_units",
"=",
"np",
".",
"where",
"(",
"y",
"==",
"1.",
")",
"[",
"0",
... | This method resembles the primary public method of the SpatialPooler class.
It takes a input vector and outputs the indices of the active columns. If 'learn'
is set to True, this method also performs weight updates and updates to the activity
statistics according to the respective methods implemented below. | [
"This",
"method",
"resembles",
"the",
"primary",
"public",
"method",
"of",
"the",
"SpatialPooler",
"class",
".",
"It",
"takes",
"a",
"input",
"vector",
"and",
"outputs",
"the",
"indices",
"of",
"the",
"active",
"columns",
".",
"If",
"learn",
"is",
"set",
"... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/energy_based_pooling/energy_based_models/energy_based_pooler.py#L178-L192 | train | 198,666 |
numenta/htmresearch | projects/energy_based_pooling/energy_based_models/energy_based_pooler.py | EnergyBasedPooler.encode_batch | def encode_batch(self, inputBatch):
"""Encodes a whole batch of input arrays, without learning."""
X = inputBatch
encode = self.encode
Y = np.array([ encode(x) for x in X])
return Y | python | def encode_batch(self, inputBatch):
"""Encodes a whole batch of input arrays, without learning."""
X = inputBatch
encode = self.encode
Y = np.array([ encode(x) for x in X])
return Y | [
"def",
"encode_batch",
"(",
"self",
",",
"inputBatch",
")",
":",
"X",
"=",
"inputBatch",
"encode",
"=",
"self",
".",
"encode",
"Y",
"=",
"np",
".",
"array",
"(",
"[",
"encode",
"(",
"x",
")",
"for",
"x",
"in",
"X",
"]",
")",
"return",
"Y"
] | Encodes a whole batch of input arrays, without learning. | [
"Encodes",
"a",
"whole",
"batch",
"of",
"input",
"arrays",
"without",
"learning",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/energy_based_pooling/energy_based_models/energy_based_pooler.py#L208-L213 | train | 198,667 |
numenta/htmresearch | projects/energy_based_pooling/energy_based_models/energy_based_pooler.py | EnergyBasedPooler.learn | def learn(self, x):
"""Encodes an input array, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below."""
y = self.encode(x)
self.update_statistics([y])
self.update_weights([x],[y])
return y | python | def learn(self, x):
"""Encodes an input array, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below."""
y = self.encode(x)
self.update_statistics([y])
self.update_weights([x],[y])
return y | [
"def",
"learn",
"(",
"self",
",",
"x",
")",
":",
"y",
"=",
"self",
".",
"encode",
"(",
"x",
")",
"self",
".",
"update_statistics",
"(",
"[",
"y",
"]",
")",
"self",
".",
"update_weights",
"(",
"[",
"x",
"]",
",",
"[",
"y",
"]",
")",
"return",
... | Encodes an input array, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below. | [
"Encodes",
"an",
"input",
"array",
"and",
"performs",
"weight",
"updates",
"and",
"updates",
"to",
"the",
"activity",
"statistics",
"according",
"to",
"the",
"respective",
"methods",
"implemented",
"below",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/energy_based_pooling/energy_based_models/energy_based_pooler.py#L215-L221 | train | 198,668 |
numenta/htmresearch | projects/energy_based_pooling/energy_based_models/energy_based_pooler.py | EnergyBasedPooler.learn_batch | def learn_batch(self, inputBatch):
"""Encodes a whole batch of input arrays, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below."""
X = inputBatch
Y = self.encode_batch(X)
self.update_statistics(Y)
self.update_weights(X,Y)
return Y | python | def learn_batch(self, inputBatch):
"""Encodes a whole batch of input arrays, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below."""
X = inputBatch
Y = self.encode_batch(X)
self.update_statistics(Y)
self.update_weights(X,Y)
return Y | [
"def",
"learn_batch",
"(",
"self",
",",
"inputBatch",
")",
":",
"X",
"=",
"inputBatch",
"Y",
"=",
"self",
".",
"encode_batch",
"(",
"X",
")",
"self",
".",
"update_statistics",
"(",
"Y",
")",
"self",
".",
"update_weights",
"(",
"X",
",",
"Y",
")",
"re... | Encodes a whole batch of input arrays, and performs weight updates and updates to the activity
statistics according to the respective methods implemented below. | [
"Encodes",
"a",
"whole",
"batch",
"of",
"input",
"arrays",
"and",
"performs",
"weight",
"updates",
"and",
"updates",
"to",
"the",
"activity",
"statistics",
"according",
"to",
"the",
"respective",
"methods",
"implemented",
"below",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/energy_based_pooling/energy_based_models/energy_based_pooler.py#L223-L230 | train | 198,669 |
numenta/htmresearch | projects/energy_based_pooling/energy_based_models/energy_based_pooler.py | EnergyBasedPooler.update_statistics | def update_statistics(self, activityVectors):
"""Updates the variable that maintains exponential moving averages of
individual and pairwise unit activiy"""
Y = activityVectors
n = self.output_size
A = np.zeros((n, n))
batchSize = len(Y)
for y in Y:
active_units = np.where( y == 1 )[0]
for i in active_units:
for j in active_units:
A[i,j] += 1.
A = A/batchSize
self.average_activity = self.exponential_moving_average(self.average_activity, A, self.smoothing_period) | python | def update_statistics(self, activityVectors):
"""Updates the variable that maintains exponential moving averages of
individual and pairwise unit activiy"""
Y = activityVectors
n = self.output_size
A = np.zeros((n, n))
batchSize = len(Y)
for y in Y:
active_units = np.where( y == 1 )[0]
for i in active_units:
for j in active_units:
A[i,j] += 1.
A = A/batchSize
self.average_activity = self.exponential_moving_average(self.average_activity, A, self.smoothing_period) | [
"def",
"update_statistics",
"(",
"self",
",",
"activityVectors",
")",
":",
"Y",
"=",
"activityVectors",
"n",
"=",
"self",
".",
"output_size",
"A",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"batchSize",
"=",
"len",
"(",
"Y",
")",
"... | Updates the variable that maintains exponential moving averages of
individual and pairwise unit activiy | [
"Updates",
"the",
"variable",
"that",
"maintains",
"exponential",
"moving",
"averages",
"of",
"individual",
"and",
"pairwise",
"unit",
"activiy"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/energy_based_pooling/energy_based_models/energy_based_pooler.py#L232-L247 | train | 198,670 |
numenta/htmresearch | projects/sdr_paper/pytorch_experiments/tensor_sdr_properties.py | getSparseWeights | def getSparseWeights(weightSparsity, inputSize, outputSize):
"""
Return a randomly initialized weight matrix
Size is outputSize X inputSize, with sparsity weightSparsity%
"""
# Initialize weights in the typical fashion.
w = torch.Tensor(outputSize, inputSize)
stdv = 1. / math.sqrt(w.size(1))
w.data.uniform_(-stdv, stdv)
numZeros = int(round((1.0 - weightSparsity) * inputSize))
outputIndices = np.arange(outputSize)
inputIndices = np.array([np.random.permutation(inputSize)[:numZeros]
for _ in outputIndices], dtype=np.long)
# Create tensor indices for all non-zero weights
zeroIndices = np.empty((outputSize, numZeros, 2), dtype=np.long)
zeroIndices[:, :, 0] = outputIndices[:, None]
zeroIndices[:, :, 1] = inputIndices
zeroIndices = torch.LongTensor(zeroIndices.reshape(-1, 2))
zeroWts = (zeroIndices[:, 0], zeroIndices[:, 1])
w.data[zeroWts] = 0.0
return w | python | def getSparseWeights(weightSparsity, inputSize, outputSize):
"""
Return a randomly initialized weight matrix
Size is outputSize X inputSize, with sparsity weightSparsity%
"""
# Initialize weights in the typical fashion.
w = torch.Tensor(outputSize, inputSize)
stdv = 1. / math.sqrt(w.size(1))
w.data.uniform_(-stdv, stdv)
numZeros = int(round((1.0 - weightSparsity) * inputSize))
outputIndices = np.arange(outputSize)
inputIndices = np.array([np.random.permutation(inputSize)[:numZeros]
for _ in outputIndices], dtype=np.long)
# Create tensor indices for all non-zero weights
zeroIndices = np.empty((outputSize, numZeros, 2), dtype=np.long)
zeroIndices[:, :, 0] = outputIndices[:, None]
zeroIndices[:, :, 1] = inputIndices
zeroIndices = torch.LongTensor(zeroIndices.reshape(-1, 2))
zeroWts = (zeroIndices[:, 0], zeroIndices[:, 1])
w.data[zeroWts] = 0.0
return w | [
"def",
"getSparseWeights",
"(",
"weightSparsity",
",",
"inputSize",
",",
"outputSize",
")",
":",
"# Initialize weights in the typical fashion.",
"w",
"=",
"torch",
".",
"Tensor",
"(",
"outputSize",
",",
"inputSize",
")",
"stdv",
"=",
"1.",
"/",
"math",
".",
"sqr... | Return a randomly initialized weight matrix
Size is outputSize X inputSize, with sparsity weightSparsity% | [
"Return",
"a",
"randomly",
"initialized",
"weight",
"matrix",
"Size",
"is",
"outputSize",
"X",
"inputSize",
"with",
"sparsity",
"weightSparsity%"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/pytorch_experiments/tensor_sdr_properties.py#L34-L59 | train | 198,671 |
numenta/htmresearch | projects/sdr_paper/pytorch_experiments/tensor_sdr_properties.py | plotOverlapHistogram | def plotOverlapHistogram(v, w, title, base="random"):
"""
Given a vector v, compute the overlap with the weight matrix w and save
the histogram of overlaps.
"""
overlaps = v.matmul(w.t())
# Plot histogram of overlaps
bins = np.linspace(float(overlaps.min()), float(overlaps.max()), 28)
plt.hist(overlaps, bins, alpha=0.5, label='All cols')
plt.legend(loc='upper right')
plt.xlabel("Overlap scores")
plt.ylabel("Frequency")
plt.title(title)
plt.savefig(base+"_1")
plt.close()
return overlaps | python | def plotOverlapHistogram(v, w, title, base="random"):
"""
Given a vector v, compute the overlap with the weight matrix w and save
the histogram of overlaps.
"""
overlaps = v.matmul(w.t())
# Plot histogram of overlaps
bins = np.linspace(float(overlaps.min()), float(overlaps.max()), 28)
plt.hist(overlaps, bins, alpha=0.5, label='All cols')
plt.legend(loc='upper right')
plt.xlabel("Overlap scores")
plt.ylabel("Frequency")
plt.title(title)
plt.savefig(base+"_1")
plt.close()
return overlaps | [
"def",
"plotOverlapHistogram",
"(",
"v",
",",
"w",
",",
"title",
",",
"base",
"=",
"\"random\"",
")",
":",
"overlaps",
"=",
"v",
".",
"matmul",
"(",
"w",
".",
"t",
"(",
")",
")",
"# Plot histogram of overlaps",
"bins",
"=",
"np",
".",
"linspace",
"(",
... | Given a vector v, compute the overlap with the weight matrix w and save
the histogram of overlaps. | [
"Given",
"a",
"vector",
"v",
"compute",
"the",
"overlap",
"with",
"the",
"weight",
"matrix",
"w",
"and",
"save",
"the",
"histogram",
"of",
"overlaps",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/pytorch_experiments/tensor_sdr_properties.py#L62-L79 | train | 198,672 |
numenta/htmresearch | projects/sdr_paper/pytorch_experiments/tensor_sdr_properties.py | plotOverlaps | def plotOverlaps(vList, w, base="random", k=20):
"""
Given a list of vectors v, compute the overlap of each with the weight matrix
w and plot the overlap curves.
"""
for i,v in enumerate(vList):
if i==0:
col = "m"
label = "Random vector"
else:
col="c"
label = ""
if i==1: label="Test images"
# Get a sorted list of overlap values, in decreasing order
overlaps = v.matmul(w.t())
sortedOverlaps = overlaps.sort()[0].tolist()[0][::-1]
plt.plot(sortedOverlaps,col,label=label)
plt.axvspan(0, k, facecolor="g", alpha=0.3, label="Active units")
plt.legend(loc="upper right")
plt.xlabel("Units")
plt.ylabel("Overlap scores")
plt.title("Sorted unit overlaps of a sparse net.")
plt.savefig(base+"_2")
plt.close() | python | def plotOverlaps(vList, w, base="random", k=20):
"""
Given a list of vectors v, compute the overlap of each with the weight matrix
w and plot the overlap curves.
"""
for i,v in enumerate(vList):
if i==0:
col = "m"
label = "Random vector"
else:
col="c"
label = ""
if i==1: label="Test images"
# Get a sorted list of overlap values, in decreasing order
overlaps = v.matmul(w.t())
sortedOverlaps = overlaps.sort()[0].tolist()[0][::-1]
plt.plot(sortedOverlaps,col,label=label)
plt.axvspan(0, k, facecolor="g", alpha=0.3, label="Active units")
plt.legend(loc="upper right")
plt.xlabel("Units")
plt.ylabel("Overlap scores")
plt.title("Sorted unit overlaps of a sparse net.")
plt.savefig(base+"_2")
plt.close() | [
"def",
"plotOverlaps",
"(",
"vList",
",",
"w",
",",
"base",
"=",
"\"random\"",
",",
"k",
"=",
"20",
")",
":",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"vList",
")",
":",
"if",
"i",
"==",
"0",
":",
"col",
"=",
"\"m\"",
"label",
"=",
"\"Rand... | Given a list of vectors v, compute the overlap of each with the weight matrix
w and plot the overlap curves. | [
"Given",
"a",
"list",
"of",
"vectors",
"v",
"compute",
"the",
"overlap",
"of",
"each",
"with",
"the",
"weight",
"matrix",
"w",
"and",
"plot",
"the",
"overlap",
"curves",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sdr_paper/pytorch_experiments/tensor_sdr_properties.py#L82-L106 | train | 198,673 |
numenta/htmresearch | htmresearch/support/lateral_pooler/utils.py | random_mini_batches | def random_mini_batches(X, Y, minibatch_size, seed=None):
"""
Compute a list of minibatches from inputs X and targets Y.
A datapoint is expected to be represented as a column in
the data matrices X and Y.
"""
d = X.shape[1]
size = minibatch_size
minibatches = []
if Y is None:
Y = np.zeros((1, d))
np.random.seed(seed)
perm = np.random.permutation(d)
for t in range(0, d, size):
subset = perm[t: t+size]
minibatches.append((X[:, subset], Y[:, subset]))
return minibatches | python | def random_mini_batches(X, Y, minibatch_size, seed=None):
"""
Compute a list of minibatches from inputs X and targets Y.
A datapoint is expected to be represented as a column in
the data matrices X and Y.
"""
d = X.shape[1]
size = minibatch_size
minibatches = []
if Y is None:
Y = np.zeros((1, d))
np.random.seed(seed)
perm = np.random.permutation(d)
for t in range(0, d, size):
subset = perm[t: t+size]
minibatches.append((X[:, subset], Y[:, subset]))
return minibatches | [
"def",
"random_mini_batches",
"(",
"X",
",",
"Y",
",",
"minibatch_size",
",",
"seed",
"=",
"None",
")",
":",
"d",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"size",
"=",
"minibatch_size",
"minibatches",
"=",
"[",
"]",
"if",
"Y",
"is",
"None",
":",
"Y",... | Compute a list of minibatches from inputs X and targets Y.
A datapoint is expected to be represented as a column in
the data matrices X and Y. | [
"Compute",
"a",
"list",
"of",
"minibatches",
"from",
"inputs",
"X",
"and",
"targets",
"Y",
".",
"A",
"datapoint",
"is",
"expected",
"to",
"be",
"represented",
"as",
"a",
"column",
"in",
"the",
"data",
"matrices",
"X",
"and",
"Y",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/lateral_pooler/utils.py#L96-L116 | train | 198,674 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | generateRandomSDR | def generateRandomSDR(numSDR, numDims, numActiveInputBits, seed=42):
"""
Generate a set of random SDR's
@param numSDR:
@param nDim:
@param numActiveInputBits:
"""
randomSDRs = np.zeros((numSDR, numDims), dtype=uintType)
indices = np.array(range(numDims))
np.random.seed(seed)
for i in range(numSDR):
randomIndices = np.random.permutation(indices)
activeBits = randomIndices[:numActiveInputBits]
randomSDRs[i, activeBits] = 1
return randomSDRs | python | def generateRandomSDR(numSDR, numDims, numActiveInputBits, seed=42):
"""
Generate a set of random SDR's
@param numSDR:
@param nDim:
@param numActiveInputBits:
"""
randomSDRs = np.zeros((numSDR, numDims), dtype=uintType)
indices = np.array(range(numDims))
np.random.seed(seed)
for i in range(numSDR):
randomIndices = np.random.permutation(indices)
activeBits = randomIndices[:numActiveInputBits]
randomSDRs[i, activeBits] = 1
return randomSDRs | [
"def",
"generateRandomSDR",
"(",
"numSDR",
",",
"numDims",
",",
"numActiveInputBits",
",",
"seed",
"=",
"42",
")",
":",
"randomSDRs",
"=",
"np",
".",
"zeros",
"(",
"(",
"numSDR",
",",
"numDims",
")",
",",
"dtype",
"=",
"uintType",
")",
"indices",
"=",
... | Generate a set of random SDR's
@param numSDR:
@param nDim:
@param numActiveInputBits: | [
"Generate",
"a",
"set",
"of",
"random",
"SDR",
"s"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L152-L167 | train | 198,675 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | percentOverlap | def percentOverlap(x1, x2):
"""
Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2
"""
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
percentOverlap = 0
minX1X2 = min(nonZeroX1, nonZeroX2)
if minX1X2 > 0:
overlap = float(np.dot(x1.T, x2))
percentOverlap = overlap / minX1X2
return percentOverlap | python | def percentOverlap(x1, x2):
"""
Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2
"""
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
percentOverlap = 0
minX1X2 = min(nonZeroX1, nonZeroX2)
if minX1X2 > 0:
overlap = float(np.dot(x1.T, x2))
percentOverlap = overlap / minX1X2
return percentOverlap | [
"def",
"percentOverlap",
"(",
"x1",
",",
"x2",
")",
":",
"nonZeroX1",
"=",
"np",
".",
"count_nonzero",
"(",
"x1",
")",
"nonZeroX2",
"=",
"np",
".",
"count_nonzero",
"(",
"x2",
")",
"percentOverlap",
"=",
"0",
"minX1X2",
"=",
"min",
"(",
"nonZeroX1",
",... | Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2 | [
"Computes",
"the",
"percentage",
"of",
"overlap",
"between",
"vectors",
"x1",
"and",
"x2",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L400-L419 | train | 198,676 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | addNoiseToVector | def addNoiseToVector(inputVector, noiseLevel, vectorType):
"""
Add noise to SDRs
@param inputVector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
@param vectorType (string) "sparse" or "dense"
"""
if vectorType == 'sparse':
corruptSparseVector(inputVector, noiseLevel)
elif vectorType == 'dense':
corruptDenseVector(inputVector, noiseLevel)
else:
raise ValueError("vectorType must be 'sparse' or 'dense' ") | python | def addNoiseToVector(inputVector, noiseLevel, vectorType):
"""
Add noise to SDRs
@param inputVector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
@param vectorType (string) "sparse" or "dense"
"""
if vectorType == 'sparse':
corruptSparseVector(inputVector, noiseLevel)
elif vectorType == 'dense':
corruptDenseVector(inputVector, noiseLevel)
else:
raise ValueError("vectorType must be 'sparse' or 'dense' ") | [
"def",
"addNoiseToVector",
"(",
"inputVector",
",",
"noiseLevel",
",",
"vectorType",
")",
":",
"if",
"vectorType",
"==",
"'sparse'",
":",
"corruptSparseVector",
"(",
"inputVector",
",",
"noiseLevel",
")",
"elif",
"vectorType",
"==",
"'dense'",
":",
"corruptDenseVe... | Add noise to SDRs
@param inputVector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
@param vectorType (string) "sparse" or "dense" | [
"Add",
"noise",
"to",
"SDRs"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L423-L435 | train | 198,677 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | corruptDenseVector | def corruptDenseVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1 | python | def corruptDenseVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1 | [
"def",
"corruptDenseVector",
"(",
"vector",
",",
"noiseLevel",
")",
":",
"size",
"=",
"len",
"(",
"vector",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"rnd",
"=",
"random",
".",
"random",
"(",
")",
"if",
"rnd",
"<",
"noiseLevel",
":",
"... | Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector. | [
"Corrupts",
"a",
"binary",
"vector",
"by",
"inverting",
"noiseLevel",
"percent",
"of",
"its",
"bits",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L438-L452 | train | 198,678 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | corruptSparseVector | def corruptSparseVector(sdr, noiseLevel):
"""
Add noise to sdr by turning off numNoiseBits active bits and turning on
numNoiseBits in active bits
@param sdr (array) Numpy array of the SDR
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
numNoiseBits = int(noiseLevel * np.sum(sdr))
if numNoiseBits <= 0:
return sdr
activeBits = np.where(sdr > 0)[0]
inActiveBits = np.where(sdr == 0)[0]
turnOffBits = np.random.permutation(activeBits)
turnOnBits = np.random.permutation(inActiveBits)
turnOffBits = turnOffBits[:numNoiseBits]
turnOnBits = turnOnBits[:numNoiseBits]
sdr[turnOffBits] = 0
sdr[turnOnBits] = 1 | python | def corruptSparseVector(sdr, noiseLevel):
"""
Add noise to sdr by turning off numNoiseBits active bits and turning on
numNoiseBits in active bits
@param sdr (array) Numpy array of the SDR
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
numNoiseBits = int(noiseLevel * np.sum(sdr))
if numNoiseBits <= 0:
return sdr
activeBits = np.where(sdr > 0)[0]
inActiveBits = np.where(sdr == 0)[0]
turnOffBits = np.random.permutation(activeBits)
turnOnBits = np.random.permutation(inActiveBits)
turnOffBits = turnOffBits[:numNoiseBits]
turnOnBits = turnOnBits[:numNoiseBits]
sdr[turnOffBits] = 0
sdr[turnOnBits] = 1 | [
"def",
"corruptSparseVector",
"(",
"sdr",
",",
"noiseLevel",
")",
":",
"numNoiseBits",
"=",
"int",
"(",
"noiseLevel",
"*",
"np",
".",
"sum",
"(",
"sdr",
")",
")",
"if",
"numNoiseBits",
"<=",
"0",
":",
"return",
"sdr",
"activeBits",
"=",
"np",
".",
"whe... | Add noise to sdr by turning off numNoiseBits active bits and turning on
numNoiseBits in active bits
@param sdr (array) Numpy array of the SDR
@param noiseLevel (float) amount of noise to be applied on the vector. | [
"Add",
"noise",
"to",
"sdr",
"by",
"turning",
"off",
"numNoiseBits",
"active",
"bits",
"and",
"turning",
"on",
"numNoiseBits",
"in",
"active",
"bits"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L456-L476 | train | 198,679 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | calculateOverlapCurve | def calculateOverlapCurve(sp, inputVectors):
"""
Evalulate noise robustness of SP for a given set of SDRs
@param sp a spatial pooler instance
@param inputVectors list of arrays.
:return:
"""
columnNumber = np.prod(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
outputColumns = np.zeros((numInputVector, columnNumber), dtype=uintType)
outputColumnsCorrupted = np.zeros((numInputVector, columnNumber),
dtype=uintType)
noiseLevelList = np.linspace(0, 1.0, 21)
inputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
outputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
for i in range(numInputVector):
for j in range(len(noiseLevelList)):
inputVectorCorrupted = copy.deepcopy(inputVectors[i][:])
corruptSparseVector(inputVectorCorrupted, noiseLevelList[j])
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
sp.compute(inputVectorCorrupted, False,
outputColumnsCorrupted[i][:])
inputOverlapScore[i][j] = percentOverlap(inputVectors[i][:],
inputVectorCorrupted)
outputOverlapScore[i][j] = percentOverlap(outputColumns[i][:],
outputColumnsCorrupted[i][:])
return noiseLevelList, inputOverlapScore, outputOverlapScore | python | def calculateOverlapCurve(sp, inputVectors):
"""
Evalulate noise robustness of SP for a given set of SDRs
@param sp a spatial pooler instance
@param inputVectors list of arrays.
:return:
"""
columnNumber = np.prod(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
outputColumns = np.zeros((numInputVector, columnNumber), dtype=uintType)
outputColumnsCorrupted = np.zeros((numInputVector, columnNumber),
dtype=uintType)
noiseLevelList = np.linspace(0, 1.0, 21)
inputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
outputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
for i in range(numInputVector):
for j in range(len(noiseLevelList)):
inputVectorCorrupted = copy.deepcopy(inputVectors[i][:])
corruptSparseVector(inputVectorCorrupted, noiseLevelList[j])
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
sp.compute(inputVectorCorrupted, False,
outputColumnsCorrupted[i][:])
inputOverlapScore[i][j] = percentOverlap(inputVectors[i][:],
inputVectorCorrupted)
outputOverlapScore[i][j] = percentOverlap(outputColumns[i][:],
outputColumnsCorrupted[i][:])
return noiseLevelList, inputOverlapScore, outputOverlapScore | [
"def",
"calculateOverlapCurve",
"(",
"sp",
",",
"inputVectors",
")",
":",
"columnNumber",
"=",
"np",
".",
"prod",
"(",
"sp",
".",
"getColumnDimensions",
"(",
")",
")",
"numInputVector",
",",
"inputSize",
"=",
"inputVectors",
".",
"shape",
"outputColumns",
"=",... | Evalulate noise robustness of SP for a given set of SDRs
@param sp a spatial pooler instance
@param inputVectors list of arrays.
:return: | [
"Evalulate",
"noise",
"robustness",
"of",
"SP",
"for",
"a",
"given",
"set",
"of",
"SDRs"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L480-L511 | train | 198,680 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | classifySPoutput | def classifySPoutput(targetOutputColumns, outputColumns):
"""
Classify the SP output
@param targetOutputColumns (list) The target outputs, corresponding to
different classes
@param outputColumns (array) The current output
@return classLabel (int) classification outcome
"""
numTargets, numDims = targetOutputColumns.shape
overlap = np.zeros((numTargets,))
for i in range(numTargets):
overlap[i] = percentOverlap(outputColumns, targetOutputColumns[i, :])
classLabel = np.argmax(overlap)
return classLabel | python | def classifySPoutput(targetOutputColumns, outputColumns):
"""
Classify the SP output
@param targetOutputColumns (list) The target outputs, corresponding to
different classes
@param outputColumns (array) The current output
@return classLabel (int) classification outcome
"""
numTargets, numDims = targetOutputColumns.shape
overlap = np.zeros((numTargets,))
for i in range(numTargets):
overlap[i] = percentOverlap(outputColumns, targetOutputColumns[i, :])
classLabel = np.argmax(overlap)
return classLabel | [
"def",
"classifySPoutput",
"(",
"targetOutputColumns",
",",
"outputColumns",
")",
":",
"numTargets",
",",
"numDims",
"=",
"targetOutputColumns",
".",
"shape",
"overlap",
"=",
"np",
".",
"zeros",
"(",
"(",
"numTargets",
",",
")",
")",
"for",
"i",
"in",
"range... | Classify the SP output
@param targetOutputColumns (list) The target outputs, corresponding to
different classes
@param outputColumns (array) The current output
@return classLabel (int) classification outcome | [
"Classify",
"the",
"SP",
"output"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L515-L528 | train | 198,681 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | classificationAccuracyVsNoise | def classificationAccuracyVsNoise(sp, inputVectors, noiseLevelList):
"""
Evaluate whether the SP output is classifiable, with varying amount of noise
@param sp a spatial pooler instance
@param inputVectors (list) list of input SDRs
@param noiseLevelList (list) list of noise levels
:return:
"""
numInputVector, inputSize = inputVectors.shape
if sp is None:
targetOutputColumns = copy.deepcopy(inputVectors)
else:
columnNumber = np.prod(sp.getColumnDimensions())
# calculate target output given the uncorrupted input vectors
targetOutputColumns = np.zeros((numInputVector, columnNumber),
dtype=uintType)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, targetOutputColumns[i][:])
outcomes = np.zeros((len(noiseLevelList), numInputVector))
for i in range(len(noiseLevelList)):
for j in range(numInputVector):
corruptedInputVector = copy.deepcopy(inputVectors[j][:])
corruptSparseVector(corruptedInputVector, noiseLevelList[i])
if sp is None:
outputColumns = copy.deepcopy(corruptedInputVector)
else:
outputColumns = np.zeros((columnNumber, ), dtype=uintType)
sp.compute(corruptedInputVector, False, outputColumns)
predictedClassLabel = classifySPoutput(targetOutputColumns, outputColumns)
outcomes[i][j] = predictedClassLabel == j
predictionAccuracy = np.mean(outcomes, 1)
return predictionAccuracy | python | def classificationAccuracyVsNoise(sp, inputVectors, noiseLevelList):
"""
Evaluate whether the SP output is classifiable, with varying amount of noise
@param sp a spatial pooler instance
@param inputVectors (list) list of input SDRs
@param noiseLevelList (list) list of noise levels
:return:
"""
numInputVector, inputSize = inputVectors.shape
if sp is None:
targetOutputColumns = copy.deepcopy(inputVectors)
else:
columnNumber = np.prod(sp.getColumnDimensions())
# calculate target output given the uncorrupted input vectors
targetOutputColumns = np.zeros((numInputVector, columnNumber),
dtype=uintType)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, targetOutputColumns[i][:])
outcomes = np.zeros((len(noiseLevelList), numInputVector))
for i in range(len(noiseLevelList)):
for j in range(numInputVector):
corruptedInputVector = copy.deepcopy(inputVectors[j][:])
corruptSparseVector(corruptedInputVector, noiseLevelList[i])
if sp is None:
outputColumns = copy.deepcopy(corruptedInputVector)
else:
outputColumns = np.zeros((columnNumber, ), dtype=uintType)
sp.compute(corruptedInputVector, False, outputColumns)
predictedClassLabel = classifySPoutput(targetOutputColumns, outputColumns)
outcomes[i][j] = predictedClassLabel == j
predictionAccuracy = np.mean(outcomes, 1)
return predictionAccuracy | [
"def",
"classificationAccuracyVsNoise",
"(",
"sp",
",",
"inputVectors",
",",
"noiseLevelList",
")",
":",
"numInputVector",
",",
"inputSize",
"=",
"inputVectors",
".",
"shape",
"if",
"sp",
"is",
"None",
":",
"targetOutputColumns",
"=",
"copy",
".",
"deepcopy",
"(... | Evaluate whether the SP output is classifiable, with varying amount of noise
@param sp a spatial pooler instance
@param inputVectors (list) list of input SDRs
@param noiseLevelList (list) list of noise levels
:return: | [
"Evaluate",
"whether",
"the",
"SP",
"output",
"is",
"classifiable",
"with",
"varying",
"amount",
"of",
"noise"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L532-L568 | train | 198,682 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | plotExampleInputOutput | def plotExampleInputOutput(sp, inputVectors, saveFigPrefix=None):
"""
Plot example input & output
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns,), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
fig, axs = plt.subplots(2, 1)
axs[0].imshow(inputVectors[:, :200], cmap='gray', interpolation="nearest")
axs[0].set_ylabel('input #')
axs[0].set_title('input vectors')
axs[1].imshow(outputColumns[:, :200], cmap='gray', interpolation="nearest")
axs[1].set_ylabel('input #')
axs[1].set_title('output vectors')
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output.pdf'.format(saveFigPrefix))
inputDensity = np.sum(inputVectors, 1) / float(inputSize)
outputDensity = np.sum(outputColumns, 1) / float(numColumns)
fig, axs = plt.subplots(2, 1)
axs[0].plot(inputDensity)
axs[0].set_xlabel('input #')
axs[0].set_ylim([0, 0.2])
axs[1].plot(outputDensity)
axs[1].set_xlabel('input #')
axs[1].set_ylim([0, 0.05])
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output_density.pdf'.format(saveFigPrefix)) | python | def plotExampleInputOutput(sp, inputVectors, saveFigPrefix=None):
"""
Plot example input & output
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns,), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
fig, axs = plt.subplots(2, 1)
axs[0].imshow(inputVectors[:, :200], cmap='gray', interpolation="nearest")
axs[0].set_ylabel('input #')
axs[0].set_title('input vectors')
axs[1].imshow(outputColumns[:, :200], cmap='gray', interpolation="nearest")
axs[1].set_ylabel('input #')
axs[1].set_title('output vectors')
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output.pdf'.format(saveFigPrefix))
inputDensity = np.sum(inputVectors, 1) / float(inputSize)
outputDensity = np.sum(outputColumns, 1) / float(numColumns)
fig, axs = plt.subplots(2, 1)
axs[0].plot(inputDensity)
axs[0].set_xlabel('input #')
axs[0].set_ylim([0, 0.2])
axs[1].plot(outputDensity)
axs[1].set_xlabel('input #')
axs[1].set_ylim([0, 0.05])
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output_density.pdf'.format(saveFigPrefix)) | [
"def",
"plotExampleInputOutput",
"(",
"sp",
",",
"inputVectors",
",",
"saveFigPrefix",
"=",
"None",
")",
":",
"numInputVector",
",",
"inputSize",
"=",
"inputVectors",
".",
"shape",
"numColumns",
"=",
"np",
".",
"prod",
"(",
"sp",
".",
"getColumnDimensions",
"(... | Plot example input & output
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors | [
"Plot",
"example",
"input",
"&",
"output"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L571-L617 | train | 198,683 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | inspectSpatialPoolerStats | def inspectSpatialPoolerStats(sp, inputVectors, saveFigPrefix=None):
"""
Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns, ), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
avgInputOverlap = np.mean(inputOverlap, 0)
entropy = calculateEntropy(outputColumns)
activationProb = np.mean(outputColumns.astype(realDType), 0)
dutyCycleDist, binEdge = np.histogram(activationProb,
bins=10, range=[-0.005, 0.095])
dutyCycleDist = dutyCycleDist.astype('float32') / np.sum(dutyCycleDist)
binCenter = (binEdge[1:] + binEdge[:-1])/2
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(connectedCounts)
axs[0, 0].set_xlabel('# Connected Synapse')
axs[0, 1].hist(winnerInputOverlap)
axs[0, 1].set_xlabel('# winner input overlap')
axs[1, 0].bar(binEdge[:-1]+0.001, dutyCycleDist, width=.008)
axs[1, 0].set_xlim([-0.005, .1])
axs[1, 0].set_xlabel('Activation Frequency')
axs[1, 0].set_title('Entropy: {}'.format(entropy))
axs[1, 1].plot(connectedCounts, activationProb, '.')
axs[1, 1].set_xlabel('connection #')
axs[1, 1].set_ylabel('activation freq')
plt.tight_layout()
if saveFigPrefix is not None:
plt.savefig('figures/{}_network_stats.pdf'.format(saveFigPrefix))
return fig | python | def inspectSpatialPoolerStats(sp, inputVectors, saveFigPrefix=None):
"""
Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns, ), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
avgInputOverlap = np.mean(inputOverlap, 0)
entropy = calculateEntropy(outputColumns)
activationProb = np.mean(outputColumns.astype(realDType), 0)
dutyCycleDist, binEdge = np.histogram(activationProb,
bins=10, range=[-0.005, 0.095])
dutyCycleDist = dutyCycleDist.astype('float32') / np.sum(dutyCycleDist)
binCenter = (binEdge[1:] + binEdge[:-1])/2
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(connectedCounts)
axs[0, 0].set_xlabel('# Connected Synapse')
axs[0, 1].hist(winnerInputOverlap)
axs[0, 1].set_xlabel('# winner input overlap')
axs[1, 0].bar(binEdge[:-1]+0.001, dutyCycleDist, width=.008)
axs[1, 0].set_xlim([-0.005, .1])
axs[1, 0].set_xlabel('Activation Frequency')
axs[1, 0].set_title('Entropy: {}'.format(entropy))
axs[1, 1].plot(connectedCounts, activationProb, '.')
axs[1, 1].set_xlabel('connection #')
axs[1, 1].set_ylabel('activation freq')
plt.tight_layout()
if saveFigPrefix is not None:
plt.savefig('figures/{}_network_stats.pdf'.format(saveFigPrefix))
return fig | [
"def",
"inspectSpatialPoolerStats",
"(",
"sp",
",",
"inputVectors",
",",
"saveFigPrefix",
"=",
"None",
")",
":",
"numInputVector",
",",
"inputSize",
"=",
"inputVectors",
".",
"shape",
"numColumns",
"=",
"np",
".",
"prod",
"(",
"sp",
".",
"getColumnDimensions",
... | Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors | [
"Inspect",
"the",
"statistics",
"of",
"a",
"spatial",
"pooler",
"given",
"a",
"set",
"of",
"input",
"vectors"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L621-L673 | train | 198,684 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | calculateEntropy | def calculateEntropy(activeColumns, type='binary'):
"""
calculate the mean entropy given activation history
@param activeColumns (array) 2D numpy array of activation history
@return entropy (float) mean entropy
"""
activationProb = np.mean(activeColumns, 0)
if type == 'binary':
totalEntropy = np.sum(binaryEntropyVectorized(activationProb))
elif type == 'renyi':
totalEntropy = np.sum(renyiEntropyVectorized(activationProb))
else:
raise ValueError('unknown entropy type')
numberOfColumns = activeColumns.shape[1]
# return mean entropy
return totalEntropy/numberOfColumns | python | def calculateEntropy(activeColumns, type='binary'):
"""
calculate the mean entropy given activation history
@param activeColumns (array) 2D numpy array of activation history
@return entropy (float) mean entropy
"""
activationProb = np.mean(activeColumns, 0)
if type == 'binary':
totalEntropy = np.sum(binaryEntropyVectorized(activationProb))
elif type == 'renyi':
totalEntropy = np.sum(renyiEntropyVectorized(activationProb))
else:
raise ValueError('unknown entropy type')
numberOfColumns = activeColumns.shape[1]
# return mean entropy
return totalEntropy/numberOfColumns | [
"def",
"calculateEntropy",
"(",
"activeColumns",
",",
"type",
"=",
"'binary'",
")",
":",
"activationProb",
"=",
"np",
".",
"mean",
"(",
"activeColumns",
",",
"0",
")",
"if",
"type",
"==",
"'binary'",
":",
"totalEntropy",
"=",
"np",
".",
"sum",
"(",
"bina... | calculate the mean entropy given activation history
@param activeColumns (array) 2D numpy array of activation history
@return entropy (float) mean entropy | [
"calculate",
"the",
"mean",
"entropy",
"given",
"activation",
"history"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L741-L757 | train | 198,685 |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | meanMutualInformation | def meanMutualInformation(sp, activeColumnsCurrentEpoch, columnsUnderInvestigation = []):
"""
Computes the mean of the mutual information
of pairs taken from a list of columns.
"""
if len(columnsUnderInvestigation) == 0:
columns = range(np.prod(sp.getColumnDimensions()))
else:
columns = columnsUnderInvestigation
numCols = len(columns)
sumMutualInfo = 0
normalizingConst = numCols*(numCols - 1)/2
for i in range(numCols):
for j in range(i+1, numCols):
sumMutualInfo += mutualInformation(sp, activeColumnsCurrentEpoch, columns[i], columns[j])
return sumMutualInfo/normalizingConst | python | def meanMutualInformation(sp, activeColumnsCurrentEpoch, columnsUnderInvestigation = []):
"""
Computes the mean of the mutual information
of pairs taken from a list of columns.
"""
if len(columnsUnderInvestigation) == 0:
columns = range(np.prod(sp.getColumnDimensions()))
else:
columns = columnsUnderInvestigation
numCols = len(columns)
sumMutualInfo = 0
normalizingConst = numCols*(numCols - 1)/2
for i in range(numCols):
for j in range(i+1, numCols):
sumMutualInfo += mutualInformation(sp, activeColumnsCurrentEpoch, columns[i], columns[j])
return sumMutualInfo/normalizingConst | [
"def",
"meanMutualInformation",
"(",
"sp",
",",
"activeColumnsCurrentEpoch",
",",
"columnsUnderInvestigation",
"=",
"[",
"]",
")",
":",
"if",
"len",
"(",
"columnsUnderInvestigation",
")",
"==",
"0",
":",
"columns",
"=",
"range",
"(",
"np",
".",
"prod",
"(",
... | Computes the mean of the mutual information
of pairs taken from a list of columns. | [
"Computes",
"the",
"mean",
"of",
"the",
"mutual",
"information",
"of",
"pairs",
"taken",
"from",
"a",
"list",
"of",
"columns",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L924-L940 | train | 198,686 |
numenta/htmresearch | htmresearch/frameworks/thalamus/thalamus.py | Thalamus.learnL6Pattern | def learnL6Pattern(self, l6Pattern, cellsToLearnOn):
"""
Learn the given l6Pattern on TRN cell dendrites. The TRN cells to learn
are given in cellsTeLearnOn. Each of these cells will learn this pattern on
a single dendritic segment.
:param l6Pattern:
An SDR from L6. List of indices corresponding to L6 cells.
:param cellsToLearnOn:
Each cell index is (x,y) corresponding to the TRN cells that should learn
this pattern. For each cell, create a new dendrite that stores this
pattern. The SDR is stored on this dendrite
"""
cellIndices = [self.trnCellIndex(x) for x in cellsToLearnOn]
newSegments = self.trnConnections.createSegments(cellIndices)
self.trnConnections.growSynapses(newSegments, l6Pattern, 1.0) | python | def learnL6Pattern(self, l6Pattern, cellsToLearnOn):
"""
Learn the given l6Pattern on TRN cell dendrites. The TRN cells to learn
are given in cellsTeLearnOn. Each of these cells will learn this pattern on
a single dendritic segment.
:param l6Pattern:
An SDR from L6. List of indices corresponding to L6 cells.
:param cellsToLearnOn:
Each cell index is (x,y) corresponding to the TRN cells that should learn
this pattern. For each cell, create a new dendrite that stores this
pattern. The SDR is stored on this dendrite
"""
cellIndices = [self.trnCellIndex(x) for x in cellsToLearnOn]
newSegments = self.trnConnections.createSegments(cellIndices)
self.trnConnections.growSynapses(newSegments, l6Pattern, 1.0) | [
"def",
"learnL6Pattern",
"(",
"self",
",",
"l6Pattern",
",",
"cellsToLearnOn",
")",
":",
"cellIndices",
"=",
"[",
"self",
".",
"trnCellIndex",
"(",
"x",
")",
"for",
"x",
"in",
"cellsToLearnOn",
"]",
"newSegments",
"=",
"self",
".",
"trnConnections",
".",
"... | Learn the given l6Pattern on TRN cell dendrites. The TRN cells to learn
are given in cellsTeLearnOn. Each of these cells will learn this pattern on
a single dendritic segment.
:param l6Pattern:
An SDR from L6. List of indices corresponding to L6 cells.
:param cellsToLearnOn:
Each cell index is (x,y) corresponding to the TRN cells that should learn
this pattern. For each cell, create a new dendrite that stores this
pattern. The SDR is stored on this dendrite | [
"Learn",
"the",
"given",
"l6Pattern",
"on",
"TRN",
"cell",
"dendrites",
".",
"The",
"TRN",
"cells",
"to",
"learn",
"are",
"given",
"in",
"cellsTeLearnOn",
".",
"Each",
"of",
"these",
"cells",
"will",
"learn",
"this",
"pattern",
"on",
"a",
"single",
"dendri... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus.py#L143-L161 | train | 198,687 |
numenta/htmresearch | htmresearch/frameworks/thalamus/thalamus.py | Thalamus.computeFeedForwardActivity | def computeFeedForwardActivity(self, feedForwardInput):
"""
Activate trnCells according to the l6Input. These in turn will impact
bursting mode in relay cells that are connected to these trnCells.
Given the feedForwardInput, compute which cells will be silent, tonic,
or bursting.
:param feedForwardInput:
a numpy matrix of shape relayCellShape containing 0's and 1's
:return:
feedForwardInput is modified to contain 0, 1, or 2. A "2" indicates
bursting cells.
"""
ff = feedForwardInput.copy()
# For each relay cell, see if any of its FF inputs are active.
for x in range(self.relayWidth):
for y in range(self.relayHeight):
inputCells = self._preSynapticFFCells(x, y)
for idx in inputCells:
if feedForwardInput[idx] != 0:
ff[x, y] = 1.0
continue
# If yes, and it is in burst mode, this cell bursts
# If yes, and it is not in burst mode, then we just get tonic input.
# ff += self.burstReadyCells * ff
ff2 = ff * 0.4 + self.burstReadyCells * ff
return ff2 | python | def computeFeedForwardActivity(self, feedForwardInput):
"""
Activate trnCells according to the l6Input. These in turn will impact
bursting mode in relay cells that are connected to these trnCells.
Given the feedForwardInput, compute which cells will be silent, tonic,
or bursting.
:param feedForwardInput:
a numpy matrix of shape relayCellShape containing 0's and 1's
:return:
feedForwardInput is modified to contain 0, 1, or 2. A "2" indicates
bursting cells.
"""
ff = feedForwardInput.copy()
# For each relay cell, see if any of its FF inputs are active.
for x in range(self.relayWidth):
for y in range(self.relayHeight):
inputCells = self._preSynapticFFCells(x, y)
for idx in inputCells:
if feedForwardInput[idx] != 0:
ff[x, y] = 1.0
continue
# If yes, and it is in burst mode, this cell bursts
# If yes, and it is not in burst mode, then we just get tonic input.
# ff += self.burstReadyCells * ff
ff2 = ff * 0.4 + self.burstReadyCells * ff
return ff2 | [
"def",
"computeFeedForwardActivity",
"(",
"self",
",",
"feedForwardInput",
")",
":",
"ff",
"=",
"feedForwardInput",
".",
"copy",
"(",
")",
"# For each relay cell, see if any of its FF inputs are active.",
"for",
"x",
"in",
"range",
"(",
"self",
".",
"relayWidth",
")",... | Activate trnCells according to the l6Input. These in turn will impact
bursting mode in relay cells that are connected to these trnCells.
Given the feedForwardInput, compute which cells will be silent, tonic,
or bursting.
:param feedForwardInput:
a numpy matrix of shape relayCellShape containing 0's and 1's
:return:
feedForwardInput is modified to contain 0, 1, or 2. A "2" indicates
bursting cells. | [
"Activate",
"trnCells",
"according",
"to",
"the",
"l6Input",
".",
"These",
"in",
"turn",
"will",
"impact",
"bursting",
"mode",
"in",
"relay",
"cells",
"that",
"are",
"connected",
"to",
"these",
"trnCells",
".",
"Given",
"the",
"feedForwardInput",
"compute",
"w... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus.py#L202-L231 | train | 198,688 |
numenta/htmresearch | htmresearch/frameworks/thalamus/thalamus.py | Thalamus.reset | def reset(self):
"""
Set everything back to zero
"""
self.trnOverlaps = []
self.activeTRNSegments = []
self.activeTRNCellIndices = []
self.relayOverlaps = []
self.activeRelaySegments = []
self.burstReadyCellIndices = []
self.burstReadyCells = np.zeros((self.relayWidth, self.relayHeight)) | python | def reset(self):
"""
Set everything back to zero
"""
self.trnOverlaps = []
self.activeTRNSegments = []
self.activeTRNCellIndices = []
self.relayOverlaps = []
self.activeRelaySegments = []
self.burstReadyCellIndices = []
self.burstReadyCells = np.zeros((self.relayWidth, self.relayHeight)) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"trnOverlaps",
"=",
"[",
"]",
"self",
".",
"activeTRNSegments",
"=",
"[",
"]",
"self",
".",
"activeTRNCellIndices",
"=",
"[",
"]",
"self",
".",
"relayOverlaps",
"=",
"[",
"]",
"self",
".",
"activeRela... | Set everything back to zero | [
"Set",
"everything",
"back",
"to",
"zero"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus.py#L234-L244 | train | 198,689 |
numenta/htmresearch | htmresearch/frameworks/thalamus/thalamus.py | Thalamus._initializeTRNToRelayCellConnections | def _initializeTRNToRelayCellConnections(self):
"""
Initialize TRN to relay cell connectivity. For each relay cell, create a
dendritic segment for each TRN cell it connects to.
"""
for x in range(self.relayWidth):
for y in range(self.relayHeight):
# Create one dendrite for each trn cell that projects to this relay cell
# This dendrite contains one synapse corresponding to this TRN->relay
# connection.
relayCellIndex = self.relayCellIndex((x,y))
trnCells = self._preSynapticTRNCells(x, y)
for trnCell in trnCells:
newSegment = self.relayConnections.createSegments([relayCellIndex])
self.relayConnections.growSynapses(newSegment,
[self.trnCellIndex(trnCell)], 1.0) | python | def _initializeTRNToRelayCellConnections(self):
"""
Initialize TRN to relay cell connectivity. For each relay cell, create a
dendritic segment for each TRN cell it connects to.
"""
for x in range(self.relayWidth):
for y in range(self.relayHeight):
# Create one dendrite for each trn cell that projects to this relay cell
# This dendrite contains one synapse corresponding to this TRN->relay
# connection.
relayCellIndex = self.relayCellIndex((x,y))
trnCells = self._preSynapticTRNCells(x, y)
for trnCell in trnCells:
newSegment = self.relayConnections.createSegments([relayCellIndex])
self.relayConnections.growSynapses(newSegment,
[self.trnCellIndex(trnCell)], 1.0) | [
"def",
"_initializeTRNToRelayCellConnections",
"(",
"self",
")",
":",
"for",
"x",
"in",
"range",
"(",
"self",
".",
"relayWidth",
")",
":",
"for",
"y",
"in",
"range",
"(",
"self",
".",
"relayHeight",
")",
":",
"# Create one dendrite for each trn cell that projects ... | Initialize TRN to relay cell connectivity. For each relay cell, create a
dendritic segment for each TRN cell it connects to. | [
"Initialize",
"TRN",
"to",
"relay",
"cell",
"connectivity",
".",
"For",
"each",
"relay",
"cell",
"create",
"a",
"dendritic",
"segment",
"for",
"each",
"TRN",
"cell",
"it",
"connects",
"to",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus.py#L295-L311 | train | 198,690 |
numenta/htmresearch | projects/cio_sdr/compare_words.py | countWordOverlapFrequencies | def countWordOverlapFrequencies(filename="goodOverlapPairs.pkl"):
"""
Count how many high overlaps each word has, and print it out
"""
with open(filename,"rb") as f:
goodOverlapPairs = pickle.load(f)
with open("word_bitmaps_40_bits_minimum.pkl","rb") as f:
bitmaps = pickle.load(f)
# Count how often each word has a highly overlapping match with other words
wordFrequencies = {}
for w1, w2, overlap in goodOverlapPairs:
wordFrequencies[w1] = wordFrequencies.get(w1, 0) + 1
printTemplate = PrettyTable(["Num High Overlaps", "Word", "On Bits"],
sortby="Num High Overlaps", reversesort=True)
for word in wordFrequencies.iterkeys():
printTemplate.add_row([wordFrequencies[word], word, len(bitmaps[word])])
print printTemplate | python | def countWordOverlapFrequencies(filename="goodOverlapPairs.pkl"):
"""
Count how many high overlaps each word has, and print it out
"""
with open(filename,"rb") as f:
goodOverlapPairs = pickle.load(f)
with open("word_bitmaps_40_bits_minimum.pkl","rb") as f:
bitmaps = pickle.load(f)
# Count how often each word has a highly overlapping match with other words
wordFrequencies = {}
for w1, w2, overlap in goodOverlapPairs:
wordFrequencies[w1] = wordFrequencies.get(w1, 0) + 1
printTemplate = PrettyTable(["Num High Overlaps", "Word", "On Bits"],
sortby="Num High Overlaps", reversesort=True)
for word in wordFrequencies.iterkeys():
printTemplate.add_row([wordFrequencies[word], word, len(bitmaps[word])])
print printTemplate | [
"def",
"countWordOverlapFrequencies",
"(",
"filename",
"=",
"\"goodOverlapPairs.pkl\"",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"goodOverlapPairs",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"with",
"open",
"(",
"\"word... | Count how many high overlaps each word has, and print it out | [
"Count",
"how",
"many",
"high",
"overlaps",
"each",
"word",
"has",
"and",
"print",
"it",
"out"
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/cio_sdr/compare_words.py#L119-L141 | train | 198,691 |
numenta/htmresearch | htmresearch/algorithms/apical_dependent_temporal_memory.py | ApicalDependentTemporalMemory.activateCells | def activateCells(self,
activeColumns,
basalReinforceCandidates,
apicalReinforceCandidates,
basalGrowthCandidates,
apicalGrowthCandidates,
learn=True):
"""
Activate cells in the specified columns, using the result of the previous
'depolarizeCells' as predictions. Then learn.
@param activeColumns (numpy array)
List of active columns
@param basalReinforceCandidates (numpy array)
List of bits that the active cells may reinforce basal synapses to.
@param apicalReinforceCandidates (numpy array)
List of bits that the active cells may reinforce apical synapses to.
@param basalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new basal synapses to.
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
# Calculate active cells
(correctPredictedCells,
burstingColumns) = np2.setCompare(self.predictedCells, activeColumns,
self.predictedCells / self.cellsPerColumn,
rightMinusLeft=True)
newActiveCells = np.concatenate((correctPredictedCells,
np2.getAllCellsInColumns(
burstingColumns, self.cellsPerColumn)))
# Calculate learning
(learningActiveBasalSegments,
learningActiveApicalSegments,
learningMatchingBasalSegments,
learningMatchingApicalSegments,
basalSegmentsToPunish,
apicalSegmentsToPunish,
newSegmentCells,
learningCells) = self._calculateLearning(activeColumns,
burstingColumns,
correctPredictedCells,
self.activeBasalSegments,
self.activeApicalSegments,
self.matchingBasalSegments,
self.matchingApicalSegments,
self.basalPotentialOverlaps,
self.apicalPotentialOverlaps)
if learn:
# Learn on existing segments
for learningSegments in (learningActiveBasalSegments,
learningMatchingBasalSegments):
self._learn(self.basalConnections, self.rng, learningSegments,
basalReinforceCandidates, basalGrowthCandidates,
self.basalPotentialOverlaps,
self.initialPermanence, self.sampleSize,
self.permanenceIncrement, self.permanenceDecrement,
self.maxSynapsesPerSegment)
for learningSegments in (learningActiveApicalSegments,
learningMatchingApicalSegments):
self._learn(self.apicalConnections, self.rng, learningSegments,
apicalReinforceCandidates, apicalGrowthCandidates,
self.apicalPotentialOverlaps, self.initialPermanence,
self.sampleSize, self.permanenceIncrement,
self.permanenceDecrement, self.maxSynapsesPerSegment)
# Punish incorrect predictions
if self.basalPredictedSegmentDecrement != 0.0:
self.basalConnections.adjustActiveSynapses(
basalSegmentsToPunish, basalReinforceCandidates,
-self.basalPredictedSegmentDecrement)
if self.apicalPredictedSegmentDecrement != 0.0:
self.apicalConnections.adjustActiveSynapses(
apicalSegmentsToPunish, apicalReinforceCandidates,
-self.apicalPredictedSegmentDecrement)
# Only grow segments if there is basal *and* apical input.
if len(basalGrowthCandidates) > 0 and len(apicalGrowthCandidates) > 0:
self._learnOnNewSegments(self.basalConnections, self.rng,
newSegmentCells, basalGrowthCandidates,
self.initialPermanence, self.sampleSize,
self.maxSynapsesPerSegment)
self._learnOnNewSegments(self.apicalConnections, self.rng,
newSegmentCells, apicalGrowthCandidates,
self.initialPermanence, self.sampleSize,
self.maxSynapsesPerSegment)
# Save the results
newActiveCells.sort()
learningCells.sort()
self.activeCells = newActiveCells
self.winnerCells = learningCells
self.predictedActiveCells = correctPredictedCells | python | def activateCells(self,
activeColumns,
basalReinforceCandidates,
apicalReinforceCandidates,
basalGrowthCandidates,
apicalGrowthCandidates,
learn=True):
"""
Activate cells in the specified columns, using the result of the previous
'depolarizeCells' as predictions. Then learn.
@param activeColumns (numpy array)
List of active columns
@param basalReinforceCandidates (numpy array)
List of bits that the active cells may reinforce basal synapses to.
@param apicalReinforceCandidates (numpy array)
List of bits that the active cells may reinforce apical synapses to.
@param basalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new basal synapses to.
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
# Calculate active cells
(correctPredictedCells,
burstingColumns) = np2.setCompare(self.predictedCells, activeColumns,
self.predictedCells / self.cellsPerColumn,
rightMinusLeft=True)
newActiveCells = np.concatenate((correctPredictedCells,
np2.getAllCellsInColumns(
burstingColumns, self.cellsPerColumn)))
# Calculate learning
(learningActiveBasalSegments,
learningActiveApicalSegments,
learningMatchingBasalSegments,
learningMatchingApicalSegments,
basalSegmentsToPunish,
apicalSegmentsToPunish,
newSegmentCells,
learningCells) = self._calculateLearning(activeColumns,
burstingColumns,
correctPredictedCells,
self.activeBasalSegments,
self.activeApicalSegments,
self.matchingBasalSegments,
self.matchingApicalSegments,
self.basalPotentialOverlaps,
self.apicalPotentialOverlaps)
if learn:
# Learn on existing segments
for learningSegments in (learningActiveBasalSegments,
learningMatchingBasalSegments):
self._learn(self.basalConnections, self.rng, learningSegments,
basalReinforceCandidates, basalGrowthCandidates,
self.basalPotentialOverlaps,
self.initialPermanence, self.sampleSize,
self.permanenceIncrement, self.permanenceDecrement,
self.maxSynapsesPerSegment)
for learningSegments in (learningActiveApicalSegments,
learningMatchingApicalSegments):
self._learn(self.apicalConnections, self.rng, learningSegments,
apicalReinforceCandidates, apicalGrowthCandidates,
self.apicalPotentialOverlaps, self.initialPermanence,
self.sampleSize, self.permanenceIncrement,
self.permanenceDecrement, self.maxSynapsesPerSegment)
# Punish incorrect predictions
if self.basalPredictedSegmentDecrement != 0.0:
self.basalConnections.adjustActiveSynapses(
basalSegmentsToPunish, basalReinforceCandidates,
-self.basalPredictedSegmentDecrement)
if self.apicalPredictedSegmentDecrement != 0.0:
self.apicalConnections.adjustActiveSynapses(
apicalSegmentsToPunish, apicalReinforceCandidates,
-self.apicalPredictedSegmentDecrement)
# Only grow segments if there is basal *and* apical input.
if len(basalGrowthCandidates) > 0 and len(apicalGrowthCandidates) > 0:
self._learnOnNewSegments(self.basalConnections, self.rng,
newSegmentCells, basalGrowthCandidates,
self.initialPermanence, self.sampleSize,
self.maxSynapsesPerSegment)
self._learnOnNewSegments(self.apicalConnections, self.rng,
newSegmentCells, apicalGrowthCandidates,
self.initialPermanence, self.sampleSize,
self.maxSynapsesPerSegment)
# Save the results
newActiveCells.sort()
learningCells.sort()
self.activeCells = newActiveCells
self.winnerCells = learningCells
self.predictedActiveCells = correctPredictedCells | [
"def",
"activateCells",
"(",
"self",
",",
"activeColumns",
",",
"basalReinforceCandidates",
",",
"apicalReinforceCandidates",
",",
"basalGrowthCandidates",
",",
"apicalGrowthCandidates",
",",
"learn",
"=",
"True",
")",
":",
"# Calculate active cells",
"(",
"correctPredict... | Activate cells in the specified columns, using the result of the previous
'depolarizeCells' as predictions. Then learn.
@param activeColumns (numpy array)
List of active columns
@param basalReinforceCandidates (numpy array)
List of bits that the active cells may reinforce basal synapses to.
@param apicalReinforceCandidates (numpy array)
List of bits that the active cells may reinforce apical synapses to.
@param basalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new basal synapses to.
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
@param learn (bool)
Whether to grow / reinforce / punish synapses | [
"Activate",
"cells",
"in",
"the",
"specified",
"columns",
"using",
"the",
"result",
"of",
"the",
"previous",
"depolarizeCells",
"as",
"predictions",
".",
"Then",
"learn",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/apical_dependent_temporal_memory.py#L223-L328 | train | 198,692 |
numenta/htmresearch | htmresearch/algorithms/apical_dependent_temporal_memory.py | ApicalDependentTemporalMemory._calculateLearning | def _calculateLearning(self,
activeColumns,
burstingColumns,
correctPredictedCells,
activeBasalSegments,
activeApicalSegments,
matchingBasalSegments,
matchingApicalSegments,
basalPotentialOverlaps,
apicalPotentialOverlaps):
"""
Learning occurs on pairs of segments. Correctly predicted cells always have
active basal and apical segments, and we learn on these segments. In
bursting columns, we either learn on an existing segment pair, or we grow a
new pair of segments.
@param activeColumns (numpy array)
@param burstingColumns (numpy array)
@param correctPredictedCells (numpy array)
@param activeBasalSegments (numpy array)
@param activeApicalSegments (numpy array)
@param matchingBasalSegments (numpy array)
@param matchingApicalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@param apicalPotentialOverlaps (numpy array)
@return (tuple)
- learningActiveBasalSegments (numpy array)
Active basal segments on correct predicted cells
- learningActiveApicalSegments (numpy array)
Active apical segments on correct predicted cells
- learningMatchingBasalSegments (numpy array)
Matching basal segments selected for learning in bursting columns
- learningMatchingApicalSegments (numpy array)
Matching apical segments selected for learning in bursting columns
- basalSegmentsToPunish (numpy array)
Basal segments that should be punished for predicting an inactive column
- apicalSegmentsToPunish (numpy array)
Apical segments that should be punished for predicting an inactive column
- newSegmentCells (numpy array)
Cells in bursting columns that were selected to grow new segments
- learningCells (numpy array)
Every cell that has a learning segment or was selected to grow a segment
"""
# Correctly predicted columns
learningActiveBasalSegments = self.basalConnections.filterSegmentsByCell(
activeBasalSegments, correctPredictedCells)
learningActiveApicalSegments = self.apicalConnections.filterSegmentsByCell(
activeApicalSegments, correctPredictedCells)
# Bursting columns
cellsForMatchingBasal = self.basalConnections.mapSegmentsToCells(
matchingBasalSegments)
cellsForMatchingApical = self.apicalConnections.mapSegmentsToCells(
matchingApicalSegments)
matchingCells = np.intersect1d(
cellsForMatchingBasal, cellsForMatchingApical)
(matchingCellsInBurstingColumns,
burstingColumnsWithNoMatch) = np2.setCompare(
matchingCells, burstingColumns, matchingCells / self.cellsPerColumn,
rightMinusLeft=True)
(learningMatchingBasalSegments,
learningMatchingApicalSegments) = self._chooseBestSegmentPairPerColumn(
matchingCellsInBurstingColumns, matchingBasalSegments,
matchingApicalSegments, basalPotentialOverlaps, apicalPotentialOverlaps)
newSegmentCells = self._getCellsWithFewestSegments(
burstingColumnsWithNoMatch)
# Incorrectly predicted columns
if self.basalPredictedSegmentDecrement > 0.0:
correctMatchingBasalMask = np.in1d(
cellsForMatchingBasal / self.cellsPerColumn, activeColumns)
basalSegmentsToPunish = matchingBasalSegments[~correctMatchingBasalMask]
else:
basalSegmentsToPunish = ()
if self.apicalPredictedSegmentDecrement > 0.0:
correctMatchingApicalMask = np.in1d(
cellsForMatchingApical / self.cellsPerColumn, activeColumns)
apicalSegmentsToPunish = matchingApicalSegments[~correctMatchingApicalMask]
else:
apicalSegmentsToPunish = ()
# Make a list of every cell that is learning
learningCells = np.concatenate(
(correctPredictedCells,
self.basalConnections.mapSegmentsToCells(learningMatchingBasalSegments),
newSegmentCells))
return (learningActiveBasalSegments,
learningActiveApicalSegments,
learningMatchingBasalSegments,
learningMatchingApicalSegments,
basalSegmentsToPunish,
apicalSegmentsToPunish,
newSegmentCells,
learningCells) | python | def _calculateLearning(self,
activeColumns,
burstingColumns,
correctPredictedCells,
activeBasalSegments,
activeApicalSegments,
matchingBasalSegments,
matchingApicalSegments,
basalPotentialOverlaps,
apicalPotentialOverlaps):
"""
Learning occurs on pairs of segments. Correctly predicted cells always have
active basal and apical segments, and we learn on these segments. In
bursting columns, we either learn on an existing segment pair, or we grow a
new pair of segments.
@param activeColumns (numpy array)
@param burstingColumns (numpy array)
@param correctPredictedCells (numpy array)
@param activeBasalSegments (numpy array)
@param activeApicalSegments (numpy array)
@param matchingBasalSegments (numpy array)
@param matchingApicalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@param apicalPotentialOverlaps (numpy array)
@return (tuple)
- learningActiveBasalSegments (numpy array)
Active basal segments on correct predicted cells
- learningActiveApicalSegments (numpy array)
Active apical segments on correct predicted cells
- learningMatchingBasalSegments (numpy array)
Matching basal segments selected for learning in bursting columns
- learningMatchingApicalSegments (numpy array)
Matching apical segments selected for learning in bursting columns
- basalSegmentsToPunish (numpy array)
Basal segments that should be punished for predicting an inactive column
- apicalSegmentsToPunish (numpy array)
Apical segments that should be punished for predicting an inactive column
- newSegmentCells (numpy array)
Cells in bursting columns that were selected to grow new segments
- learningCells (numpy array)
Every cell that has a learning segment or was selected to grow a segment
"""
# Correctly predicted columns
learningActiveBasalSegments = self.basalConnections.filterSegmentsByCell(
activeBasalSegments, correctPredictedCells)
learningActiveApicalSegments = self.apicalConnections.filterSegmentsByCell(
activeApicalSegments, correctPredictedCells)
# Bursting columns
cellsForMatchingBasal = self.basalConnections.mapSegmentsToCells(
matchingBasalSegments)
cellsForMatchingApical = self.apicalConnections.mapSegmentsToCells(
matchingApicalSegments)
matchingCells = np.intersect1d(
cellsForMatchingBasal, cellsForMatchingApical)
(matchingCellsInBurstingColumns,
burstingColumnsWithNoMatch) = np2.setCompare(
matchingCells, burstingColumns, matchingCells / self.cellsPerColumn,
rightMinusLeft=True)
(learningMatchingBasalSegments,
learningMatchingApicalSegments) = self._chooseBestSegmentPairPerColumn(
matchingCellsInBurstingColumns, matchingBasalSegments,
matchingApicalSegments, basalPotentialOverlaps, apicalPotentialOverlaps)
newSegmentCells = self._getCellsWithFewestSegments(
burstingColumnsWithNoMatch)
# Incorrectly predicted columns
if self.basalPredictedSegmentDecrement > 0.0:
correctMatchingBasalMask = np.in1d(
cellsForMatchingBasal / self.cellsPerColumn, activeColumns)
basalSegmentsToPunish = matchingBasalSegments[~correctMatchingBasalMask]
else:
basalSegmentsToPunish = ()
if self.apicalPredictedSegmentDecrement > 0.0:
correctMatchingApicalMask = np.in1d(
cellsForMatchingApical / self.cellsPerColumn, activeColumns)
apicalSegmentsToPunish = matchingApicalSegments[~correctMatchingApicalMask]
else:
apicalSegmentsToPunish = ()
# Make a list of every cell that is learning
learningCells = np.concatenate(
(correctPredictedCells,
self.basalConnections.mapSegmentsToCells(learningMatchingBasalSegments),
newSegmentCells))
return (learningActiveBasalSegments,
learningActiveApicalSegments,
learningMatchingBasalSegments,
learningMatchingApicalSegments,
basalSegmentsToPunish,
apicalSegmentsToPunish,
newSegmentCells,
learningCells) | [
"def",
"_calculateLearning",
"(",
"self",
",",
"activeColumns",
",",
"burstingColumns",
",",
"correctPredictedCells",
",",
"activeBasalSegments",
",",
"activeApicalSegments",
",",
"matchingBasalSegments",
",",
"matchingApicalSegments",
",",
"basalPotentialOverlaps",
",",
"a... | Learning occurs on pairs of segments. Correctly predicted cells always have
active basal and apical segments, and we learn on these segments. In
bursting columns, we either learn on an existing segment pair, or we grow a
new pair of segments.
@param activeColumns (numpy array)
@param burstingColumns (numpy array)
@param correctPredictedCells (numpy array)
@param activeBasalSegments (numpy array)
@param activeApicalSegments (numpy array)
@param matchingBasalSegments (numpy array)
@param matchingApicalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@param apicalPotentialOverlaps (numpy array)
@return (tuple)
- learningActiveBasalSegments (numpy array)
Active basal segments on correct predicted cells
- learningActiveApicalSegments (numpy array)
Active apical segments on correct predicted cells
- learningMatchingBasalSegments (numpy array)
Matching basal segments selected for learning in bursting columns
- learningMatchingApicalSegments (numpy array)
Matching apical segments selected for learning in bursting columns
- basalSegmentsToPunish (numpy array)
Basal segments that should be punished for predicting an inactive column
- apicalSegmentsToPunish (numpy array)
Apical segments that should be punished for predicting an inactive column
- newSegmentCells (numpy array)
Cells in bursting columns that were selected to grow new segments
- learningCells (numpy array)
Every cell that has a learning segment or was selected to grow a segment | [
"Learning",
"occurs",
"on",
"pairs",
"of",
"segments",
".",
"Correctly",
"predicted",
"cells",
"always",
"have",
"active",
"basal",
"and",
"apical",
"segments",
"and",
"we",
"learn",
"on",
"these",
"segments",
".",
"In",
"bursting",
"columns",
"we",
"either",
... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/apical_dependent_temporal_memory.py#L331-L437 | train | 198,693 |
numenta/htmresearch | htmresearch/algorithms/apical_dependent_temporal_memory.py | ApicalDependentTemporalMemory._learnOnNewSegments | def _learnOnNewSegments(connections, rng, newSegmentCells, growthCandidates,
initialPermanence, sampleSize, maxSynapsesPerSegment):
"""
Create new segments, and grow synapses on them.
@param connections (SparseMatrixConnections)
@param rng (Random)
@param newSegmentCells (numpy array)
@param growthCandidates (numpy array)
"""
numNewSynapses = len(growthCandidates)
if sampleSize != -1:
numNewSynapses = min(numNewSynapses, sampleSize)
if maxSynapsesPerSegment != -1:
numNewSynapses = min(numNewSynapses, maxSynapsesPerSegment)
newSegments = connections.createSegments(newSegmentCells)
connections.growSynapsesToSample(newSegments, growthCandidates,
numNewSynapses, initialPermanence,
rng) | python | def _learnOnNewSegments(connections, rng, newSegmentCells, growthCandidates,
initialPermanence, sampleSize, maxSynapsesPerSegment):
"""
Create new segments, and grow synapses on them.
@param connections (SparseMatrixConnections)
@param rng (Random)
@param newSegmentCells (numpy array)
@param growthCandidates (numpy array)
"""
numNewSynapses = len(growthCandidates)
if sampleSize != -1:
numNewSynapses = min(numNewSynapses, sampleSize)
if maxSynapsesPerSegment != -1:
numNewSynapses = min(numNewSynapses, maxSynapsesPerSegment)
newSegments = connections.createSegments(newSegmentCells)
connections.growSynapsesToSample(newSegments, growthCandidates,
numNewSynapses, initialPermanence,
rng) | [
"def",
"_learnOnNewSegments",
"(",
"connections",
",",
"rng",
",",
"newSegmentCells",
",",
"growthCandidates",
",",
"initialPermanence",
",",
"sampleSize",
",",
"maxSynapsesPerSegment",
")",
":",
"numNewSynapses",
"=",
"len",
"(",
"growthCandidates",
")",
"if",
"sam... | Create new segments, and grow synapses on them.
@param connections (SparseMatrixConnections)
@param rng (Random)
@param newSegmentCells (numpy array)
@param growthCandidates (numpy array) | [
"Create",
"new",
"segments",
"and",
"grow",
"synapses",
"on",
"them",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/apical_dependent_temporal_memory.py#L530-L552 | train | 198,694 |
numenta/htmresearch | htmresearch/algorithms/apical_dependent_temporal_memory.py | ApicalDependentTemporalMemory._chooseBestSegmentPairPerColumn | def _chooseBestSegmentPairPerColumn(self,
matchingCellsInBurstingColumns,
matchingBasalSegments,
matchingApicalSegments,
basalPotentialOverlaps,
apicalPotentialOverlaps):
"""
Choose the best pair of matching segments - one basal and one apical - for
each column. Pairs are ranked by the sum of their potential overlaps.
When there's a tie, the first pair wins.
@param matchingCellsInBurstingColumns (numpy array)
Cells in bursting columns that have at least one matching basal segment and
at least one matching apical segment
@param matchingBasalSegments (numpy array)
@param matchingApicalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@param apicalPotentialOverlaps (numpy array)
@return (tuple)
- learningBasalSegments (numpy array)
The selected basal segments
- learningApicalSegments (numpy array)
The selected apical segments
"""
basalCandidateSegments = self.basalConnections.filterSegmentsByCell(
matchingBasalSegments, matchingCellsInBurstingColumns)
apicalCandidateSegments = self.apicalConnections.filterSegmentsByCell(
matchingApicalSegments, matchingCellsInBurstingColumns)
# Sort everything once rather than inside of each call to argmaxMulti.
self.basalConnections.sortSegmentsByCell(basalCandidateSegments)
self.apicalConnections.sortSegmentsByCell(apicalCandidateSegments)
# Narrow it down to one pair per cell.
oneBasalPerCellFilter = np2.argmaxMulti(
basalPotentialOverlaps[basalCandidateSegments],
self.basalConnections.mapSegmentsToCells(basalCandidateSegments),
assumeSorted=True)
basalCandidateSegments = basalCandidateSegments[oneBasalPerCellFilter]
oneApicalPerCellFilter = np2.argmaxMulti(
apicalPotentialOverlaps[apicalCandidateSegments],
self.apicalConnections.mapSegmentsToCells(apicalCandidateSegments),
assumeSorted=True)
apicalCandidateSegments = apicalCandidateSegments[oneApicalPerCellFilter]
# Narrow it down to one pair per column.
cellScores = (basalPotentialOverlaps[basalCandidateSegments] +
apicalPotentialOverlaps[apicalCandidateSegments])
columnsForCandidates = (
self.basalConnections.mapSegmentsToCells(basalCandidateSegments) /
self.cellsPerColumn)
onePerColumnFilter = np2.argmaxMulti(cellScores, columnsForCandidates,
assumeSorted=True)
learningBasalSegments = basalCandidateSegments[onePerColumnFilter]
learningApicalSegments = apicalCandidateSegments[onePerColumnFilter]
return (learningBasalSegments,
learningApicalSegments) | python | def _chooseBestSegmentPairPerColumn(self,
matchingCellsInBurstingColumns,
matchingBasalSegments,
matchingApicalSegments,
basalPotentialOverlaps,
apicalPotentialOverlaps):
"""
Choose the best pair of matching segments - one basal and one apical - for
each column. Pairs are ranked by the sum of their potential overlaps.
When there's a tie, the first pair wins.
@param matchingCellsInBurstingColumns (numpy array)
Cells in bursting columns that have at least one matching basal segment and
at least one matching apical segment
@param matchingBasalSegments (numpy array)
@param matchingApicalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@param apicalPotentialOverlaps (numpy array)
@return (tuple)
- learningBasalSegments (numpy array)
The selected basal segments
- learningApicalSegments (numpy array)
The selected apical segments
"""
basalCandidateSegments = self.basalConnections.filterSegmentsByCell(
matchingBasalSegments, matchingCellsInBurstingColumns)
apicalCandidateSegments = self.apicalConnections.filterSegmentsByCell(
matchingApicalSegments, matchingCellsInBurstingColumns)
# Sort everything once rather than inside of each call to argmaxMulti.
self.basalConnections.sortSegmentsByCell(basalCandidateSegments)
self.apicalConnections.sortSegmentsByCell(apicalCandidateSegments)
# Narrow it down to one pair per cell.
oneBasalPerCellFilter = np2.argmaxMulti(
basalPotentialOverlaps[basalCandidateSegments],
self.basalConnections.mapSegmentsToCells(basalCandidateSegments),
assumeSorted=True)
basalCandidateSegments = basalCandidateSegments[oneBasalPerCellFilter]
oneApicalPerCellFilter = np2.argmaxMulti(
apicalPotentialOverlaps[apicalCandidateSegments],
self.apicalConnections.mapSegmentsToCells(apicalCandidateSegments),
assumeSorted=True)
apicalCandidateSegments = apicalCandidateSegments[oneApicalPerCellFilter]
# Narrow it down to one pair per column.
cellScores = (basalPotentialOverlaps[basalCandidateSegments] +
apicalPotentialOverlaps[apicalCandidateSegments])
columnsForCandidates = (
self.basalConnections.mapSegmentsToCells(basalCandidateSegments) /
self.cellsPerColumn)
onePerColumnFilter = np2.argmaxMulti(cellScores, columnsForCandidates,
assumeSorted=True)
learningBasalSegments = basalCandidateSegments[onePerColumnFilter]
learningApicalSegments = apicalCandidateSegments[onePerColumnFilter]
return (learningBasalSegments,
learningApicalSegments) | [
"def",
"_chooseBestSegmentPairPerColumn",
"(",
"self",
",",
"matchingCellsInBurstingColumns",
",",
"matchingBasalSegments",
",",
"matchingApicalSegments",
",",
"basalPotentialOverlaps",
",",
"apicalPotentialOverlaps",
")",
":",
"basalCandidateSegments",
"=",
"self",
".",
"bas... | Choose the best pair of matching segments - one basal and one apical - for
each column. Pairs are ranked by the sum of their potential overlaps.
When there's a tie, the first pair wins.
@param matchingCellsInBurstingColumns (numpy array)
Cells in bursting columns that have at least one matching basal segment and
at least one matching apical segment
@param matchingBasalSegments (numpy array)
@param matchingApicalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@param apicalPotentialOverlaps (numpy array)
@return (tuple)
- learningBasalSegments (numpy array)
The selected basal segments
- learningApicalSegments (numpy array)
The selected apical segments | [
"Choose",
"the",
"best",
"pair",
"of",
"matching",
"segments",
"-",
"one",
"basal",
"and",
"one",
"apical",
"-",
"for",
"each",
"column",
".",
"Pairs",
"are",
"ranked",
"by",
"the",
"sum",
"of",
"their",
"potential",
"overlaps",
".",
"When",
"there",
"s"... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/apical_dependent_temporal_memory.py#L555-L617 | train | 198,695 |
numenta/htmresearch | htmresearch/algorithms/apical_dependent_temporal_memory.py | TripleMemory.compute | def compute(self,
activeColumns,
basalInput,
apicalInput=(),
basalGrowthCandidates=None,
apicalGrowthCandidates=None,
learn=True):
"""
Perform one timestep. Use the basal and apical input to form a set of
predictions, then activate the specified columns, then learn.
@param activeColumns (numpy array)
List of active columns
@param basalInput (numpy array)
List of active input bits for the basal dendrite segments
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param basalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new basal synapses to.
If None, the basalInput is assumed to be growth candidates.
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
If None, the apicalInput is assumed to be growth candidates.
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
activeColumns = np.asarray(activeColumns)
basalInput = np.asarray(basalInput)
apicalInput = np.asarray(apicalInput)
if basalGrowthCandidates is None:
basalGrowthCandidates = basalInput
basalGrowthCandidates = np.asarray(basalGrowthCandidates)
if apicalGrowthCandidates is None:
apicalGrowthCandidates = apicalInput
apicalGrowthCandidates = np.asarray(apicalGrowthCandidates)
self.depolarizeCells(basalInput, apicalInput, learn)
self.activateCells(activeColumns, basalInput, apicalInput,
basalGrowthCandidates, apicalGrowthCandidates, learn) | python | def compute(self,
activeColumns,
basalInput,
apicalInput=(),
basalGrowthCandidates=None,
apicalGrowthCandidates=None,
learn=True):
"""
Perform one timestep. Use the basal and apical input to form a set of
predictions, then activate the specified columns, then learn.
@param activeColumns (numpy array)
List of active columns
@param basalInput (numpy array)
List of active input bits for the basal dendrite segments
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param basalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new basal synapses to.
If None, the basalInput is assumed to be growth candidates.
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
If None, the apicalInput is assumed to be growth candidates.
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
activeColumns = np.asarray(activeColumns)
basalInput = np.asarray(basalInput)
apicalInput = np.asarray(apicalInput)
if basalGrowthCandidates is None:
basalGrowthCandidates = basalInput
basalGrowthCandidates = np.asarray(basalGrowthCandidates)
if apicalGrowthCandidates is None:
apicalGrowthCandidates = apicalInput
apicalGrowthCandidates = np.asarray(apicalGrowthCandidates)
self.depolarizeCells(basalInput, apicalInput, learn)
self.activateCells(activeColumns, basalInput, apicalInput,
basalGrowthCandidates, apicalGrowthCandidates, learn) | [
"def",
"compute",
"(",
"self",
",",
"activeColumns",
",",
"basalInput",
",",
"apicalInput",
"=",
"(",
")",
",",
"basalGrowthCandidates",
"=",
"None",
",",
"apicalGrowthCandidates",
"=",
"None",
",",
"learn",
"=",
"True",
")",
":",
"activeColumns",
"=",
"np",... | Perform one timestep. Use the basal and apical input to form a set of
predictions, then activate the specified columns, then learn.
@param activeColumns (numpy array)
List of active columns
@param basalInput (numpy array)
List of active input bits for the basal dendrite segments
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param basalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new basal synapses to.
If None, the basalInput is assumed to be growth candidates.
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
If None, the apicalInput is assumed to be growth candidates.
@param learn (bool)
Whether to grow / reinforce / punish synapses | [
"Perform",
"one",
"timestep",
".",
"Use",
"the",
"basal",
"and",
"apical",
"input",
"to",
"form",
"a",
"set",
"of",
"predictions",
"then",
"activate",
"the",
"specified",
"columns",
"then",
"learn",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/apical_dependent_temporal_memory.py#L888-L933 | train | 198,696 |
numenta/htmresearch | htmresearch/algorithms/apical_dependent_temporal_memory.py | ApicalDependentSequenceMemory.compute | def compute(self,
activeColumns,
apicalInput=(),
apicalGrowthCandidates=None,
learn=True):
"""
Perform one timestep. Activate the specified columns, using the predictions
from the previous timestep, then learn. Then form a new set of predictions
using the new active cells and the apicalInput.
@param activeColumns (numpy array)
List of active columns
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
If None, the apicalInput is assumed to be growth candidates.
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
activeColumns = np.asarray(activeColumns)
apicalInput = np.asarray(apicalInput)
if apicalGrowthCandidates is None:
apicalGrowthCandidates = apicalInput
apicalGrowthCandidates = np.asarray(apicalGrowthCandidates)
self.prevPredictedCells = self.predictedCells
self.activateCells(activeColumns, self.activeCells, self.prevApicalInput,
self.winnerCells, self.prevApicalGrowthCandidates, learn)
self.depolarizeCells(self.activeCells, apicalInput, learn)
self.prevApicalInput = apicalInput.copy()
self.prevApicalGrowthCandidates = apicalGrowthCandidates.copy() | python | def compute(self,
activeColumns,
apicalInput=(),
apicalGrowthCandidates=None,
learn=True):
"""
Perform one timestep. Activate the specified columns, using the predictions
from the previous timestep, then learn. Then form a new set of predictions
using the new active cells and the apicalInput.
@param activeColumns (numpy array)
List of active columns
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
If None, the apicalInput is assumed to be growth candidates.
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
activeColumns = np.asarray(activeColumns)
apicalInput = np.asarray(apicalInput)
if apicalGrowthCandidates is None:
apicalGrowthCandidates = apicalInput
apicalGrowthCandidates = np.asarray(apicalGrowthCandidates)
self.prevPredictedCells = self.predictedCells
self.activateCells(activeColumns, self.activeCells, self.prevApicalInput,
self.winnerCells, self.prevApicalGrowthCandidates, learn)
self.depolarizeCells(self.activeCells, apicalInput, learn)
self.prevApicalInput = apicalInput.copy()
self.prevApicalGrowthCandidates = apicalGrowthCandidates.copy() | [
"def",
"compute",
"(",
"self",
",",
"activeColumns",
",",
"apicalInput",
"=",
"(",
")",
",",
"apicalGrowthCandidates",
"=",
"None",
",",
"learn",
"=",
"True",
")",
":",
"activeColumns",
"=",
"np",
".",
"asarray",
"(",
"activeColumns",
")",
"apicalInput",
"... | Perform one timestep. Activate the specified columns, using the predictions
from the previous timestep, then learn. Then form a new set of predictions
using the new active cells and the apicalInput.
@param activeColumns (numpy array)
List of active columns
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
If None, the apicalInput is assumed to be growth candidates.
@param learn (bool)
Whether to grow / reinforce / punish synapses | [
"Perform",
"one",
"timestep",
".",
"Activate",
"the",
"specified",
"columns",
"using",
"the",
"predictions",
"from",
"the",
"previous",
"timestep",
"then",
"learn",
".",
"Then",
"form",
"a",
"new",
"set",
"of",
"predictions",
"using",
"the",
"new",
"active",
... | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/apical_dependent_temporal_memory.py#L1023-L1060 | train | 198,697 |
numenta/htmresearch | htmresearch/frameworks/thalamus/thalamus_utils.py | createLocationEncoder | def createLocationEncoder(t, w=15):
"""
A default coordinate encoder for encoding locations into sparse
distributed representations.
"""
encoder = CoordinateEncoder(name="positionEncoder", n=t.l6CellCount, w=w)
return encoder | python | def createLocationEncoder(t, w=15):
"""
A default coordinate encoder for encoding locations into sparse
distributed representations.
"""
encoder = CoordinateEncoder(name="positionEncoder", n=t.l6CellCount, w=w)
return encoder | [
"def",
"createLocationEncoder",
"(",
"t",
",",
"w",
"=",
"15",
")",
":",
"encoder",
"=",
"CoordinateEncoder",
"(",
"name",
"=",
"\"positionEncoder\"",
",",
"n",
"=",
"t",
".",
"l6CellCount",
",",
"w",
"=",
"w",
")",
"return",
"encoder"
] | A default coordinate encoder for encoding locations into sparse
distributed representations. | [
"A",
"default",
"coordinate",
"encoder",
"for",
"encoding",
"locations",
"into",
"sparse",
"distributed",
"representations",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus_utils.py#L36-L42 | train | 198,698 |
numenta/htmresearch | htmresearch/frameworks/thalamus/thalamus_utils.py | getUnionLocations | def getUnionLocations(encoder, x, y, r, step=1):
"""
Return a union of location encodings that correspond to the union of all locations
within the specified circle.
"""
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
locations = set()
for dx in range(-r, r+1, step):
for dy in range(-r, r+1, step):
if dx*dx + dy*dy <= r*r:
e = encodeLocation(encoder, x+dx, y+dy, output)
locations = locations.union(set(e))
return locations | python | def getUnionLocations(encoder, x, y, r, step=1):
"""
Return a union of location encodings that correspond to the union of all locations
within the specified circle.
"""
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
locations = set()
for dx in range(-r, r+1, step):
for dy in range(-r, r+1, step):
if dx*dx + dy*dy <= r*r:
e = encodeLocation(encoder, x+dx, y+dy, output)
locations = locations.union(set(e))
return locations | [
"def",
"getUnionLocations",
"(",
"encoder",
",",
"x",
",",
"y",
",",
"r",
",",
"step",
"=",
"1",
")",
":",
"output",
"=",
"np",
".",
"zeros",
"(",
"encoder",
".",
"getWidth",
"(",
")",
",",
"dtype",
"=",
"defaultDtype",
")",
"locations",
"=",
"set"... | Return a union of location encodings that correspond to the union of all locations
within the specified circle. | [
"Return",
"a",
"union",
"of",
"location",
"encodings",
"that",
"correspond",
"to",
"the",
"union",
"of",
"all",
"locations",
"within",
"the",
"specified",
"circle",
"."
] | 70c096b09a577ea0432c3f3bfff4442d4871b7aa | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus_utils.py#L63-L76 | train | 198,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.