code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from typing import Optional
from streamlit import config, net_util, url_util
def is_url_from_allowed_origins(url: str) -> bool:
"""Return True if URL is from allowed origins (for CORS purpose).
Allowed origins:
1. localhost
2. The internal and external IP addresses of the machine where this
function was called from.
If `server.enableCORS` is False, this allows all origins.
"""
if not config.get_option("server.enableCORS"):
# Allow everything when CORS is disabled.
return True
hostname = url_util.get_hostname(url)
allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]
# Check localhost first.
"localhost",
"0.0.0.0",
"127.0.0.1",
# Try to avoid making unnecessary HTTP requests by checking if the user
# manually specified a server address.
_get_server_address_if_manually_set,
# Then try the options that depend on HTTP requests or opening sockets.
net_util.get_internal_ip,
net_util.get_external_ip,
]
for allowed_domain in allowed_domains:
if callable(allowed_domain):
allowed_domain = allowed_domain()
if allowed_domain is None:
continue
if hostname == allowed_domain:
return True
return False
def _get_server_address_if_manually_set() -> Optional[str]:
if config.is_manually_set("browser.serverAddress"):
return url_util.get_hostname(config.get_option("browser.serverAddress"))
return None
def make_url_path_regex(*path, **kwargs) -> str:
"""Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz)."""
path = [x.strip("/") for x in path if x] # Filter out falsely components.
path_format = r"^/%s/?$" if kwargs.get("trailing_slash", True) else r"^/%s$"
return path_format % "/".join(path)
def get_url(host_ip: str) -> str:
"""Get the URL for any app served at the given host_ip.
Parameters
----------
host_ip : str
The IP address of the machine that is running the Streamlit Server.
Returns
-------
str
The URL.
"""
port = _get_browser_address_bar_port()
base_path = config.get_option("server.baseUrlPath").strip("/")
if base_path:
base_path = "/" + base_path
host_ip = host_ip.strip("/")
return f"http://{host_ip}:{port}{base_path}"
def _get_browser_address_bar_port() -> int:
"""Get the app URL that will be shown in the browser's address bar.
That is, this is the port where static assets will be served from. In dev,
this is different from the URL that will be used to connect to the
server-browser websocket.
"""
if config.get_option("global.developmentMode"):
return 3000
return int(config.get_option("browser.serverPort")) | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/web/server/server_util.py | 0.876079 | 0.235163 | server_util.py | pypi |
from typing import List
import tornado.web
from streamlit.proto.openmetrics_data_model_pb2 import GAUGE
from streamlit.proto.openmetrics_data_model_pb2 import MetricSet as MetricSetProto
from streamlit.runtime.stats import CacheStat, StatsManager
class StatsRequestHandler(tornado.web.RequestHandler):
def initialize(self, stats_manager: StatsManager) -> None:
self._manager = stats_manager
def set_default_headers(self):
# Avoid a circular import
from streamlit.web.server import allow_cross_origin_requests
if allow_cross_origin_requests():
self.set_header("Access-Control-Allow-Origin", "*")
def options(self):
"""/OPTIONS handler for preflight CORS checks."""
self.set_status(204)
self.finish()
def get(self) -> None:
stats = self._manager.get_stats()
# If the request asked for protobuf output, we return a serialized
# protobuf. Else we return text.
if "application/x-protobuf" in self.request.headers.get_list("Accept"):
self.write(self._stats_to_proto(stats).SerializeToString())
self.set_header("Content-Type", "application/x-protobuf")
self.set_status(200)
else:
self.write(self._stats_to_text(self._manager.get_stats()))
self.set_header("Content-Type", "application/openmetrics-text")
self.set_status(200)
@staticmethod
def _stats_to_text(stats: List[CacheStat]) -> str:
metric_type = "# TYPE cache_memory_bytes gauge"
metric_unit = "# UNIT cache_memory_bytes bytes"
metric_help = "# HELP Total memory consumed by a cache."
openmetrics_eof = "# EOF\n"
# Format: header, stats, EOF
result = [metric_type, metric_unit, metric_help]
result.extend(stat.to_metric_str() for stat in stats)
result.append(openmetrics_eof)
return "\n".join(result)
@staticmethod
def _stats_to_proto(stats: List[CacheStat]) -> MetricSetProto:
metric_set = MetricSetProto()
metric_family = metric_set.metric_families.add()
metric_family.name = "cache_memory_bytes"
metric_family.type = GAUGE
metric_family.unit = "bytes"
metric_family.help = "Total memory consumed by a cache."
for stat in stats:
metric_proto = metric_family.metrics.add()
stat.marshall_metric_proto(metric_proto)
metric_set = MetricSetProto()
metric_set.metric_families.append(metric_family)
return metric_set | /repl_streamlit-1.16.2.tar.gz/repl_streamlit-1.16.2/streamlit/web/server/stats_request_handler.py | 0.891138 | 0.166896 | stats_request_handler.py | pypi |
import requests
import re
from datetime import datetime
from tqdm import tqdm
from typing import Tuple, List, Union
from pathlib import Path
from base64 import b16decode
def time_stp():
return int(datetime.now().timestamp()*1000)
class ImgUpload(object):
token_re = 'PF.obj.config.auth_token = "(.*?)"'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Referer":"https://imgtu.com/login"
}
images_url = "https://imgtu.com/i/%s"
is_login = False
@property
def data_dict(self):
return {
"type": "file",
"action": "upload",
"timestamp": str(time_stp()),
"auth_token": self.token,
"nsfw": "0",
}
def __login(self):
print("login....")
wel_page = self.sess.get("https://imgtu.com/login")
item = re.search(self.token_re, wel_page.text)
assert item is not None, "查询token出现问题"
self.token = item.group(1)
self.__load_username_password_from_local()
# 将数据改成下面的形式,使发送的Content-type为form-data
login_dict = {
'login-subject': (None,self.username),
"password": (None,self.passwd),
'auth_token': (None,self.token),
}
self.sess.post("https://imgtu.com/login",
files=login_dict, headers=self.headers)
self.is_login = True
def __load_username_password_from_local(self) -> Union[str, str]:
this_file_path = Path(__file__).parent
this_file = this_file_path.joinpath(".info")
assert this_file.exists(), "未保存登陆信息,请使用replace_img store,按照提示输入用户名和密码"
with this_file.open("rb") as f:
content = f.read()
decode_content = b16decode(content).decode("utf-8")
username, passwd = decode_content.split("\1")
self.username = username
self.passwd = passwd
def __init__(self) -> None:
self.sess = requests.Session()
# 获取原始的token,与账号关联
def uploadImage(self, name, path) -> Tuple[str, str, str]:
if not self.is_login:
self.__login()
with open(path, 'rb') as f:
file_dict = {'source': (name, f)}
res = self.sess.post("https://imgtu.com/json",
data=self.data_dict, files=file_dict)
return_res = None
try:
this_json = res.json()
return_res = (
name, this_json['image']['url'], self.images_url % this_json['image']['name'])
except:
print("###### %s % s" % (str(path), res.text))
import traceback
traceback.print_exc()
return return_res
def uploadMulImages(self, names, paths) -> List[Tuple[str, str, str]]:
if not self.is_login:
self.__login()
assert len(names) == len(paths), "name和path长度不相同"
res_list = list()
for name, path in tqdm(zip(names, paths), total=len(names), desc="uploading..."):
res_list.append(self.uploadImage(name, path))
return res_list
def __login_out(self):
print("logout....")
if hasattr(self,"token"):
self.sess.get("https://imgtu.com/logout/?auth_token=%s" % self.token)
def close(self):
print("closeing...")
self.__login_out()
self.sess.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close() | /replace_md_img-0.0.14-py3-none-any.whl/img_url_replace/img_upload.py | 0.47244 | 0.224491 | img_upload.py | pypi |
from inspect import getframeinfo, stack
from pprint import pformat
import re
def replace_me(value, as_comment=False):
"""
** ATTENTION **
CALLING THIS FUNCTION WILL MODIFY YOUR SOURCE CODE. KEEP BACKUPS.
Replaces the current souce code line with the given `value`, while keeping
the indentation level. If `as_comment` is True, then `value` is inserted
as a Python comment and pretty-printed.
Because inserting multi-line values changes the following line numbers,
don't mix multiple calls to `replace_me` with multi-line values.
"""
caller = getframeinfo(stack()[1][0])
if caller.filename == '<stdin>':
raise ValueError("Can't use `replace_me` module in interactive interpreter.")
with open(caller.filename, 'r+') as f:
lines = f.read().split('\n')
spaces, = re.match(r'^(\s*)', lines[caller.lineno-1]).groups()
if as_comment:
if not isinstance(value, str):
value = pformat(value, indent=4)
value_lines = value.rstrip().split('\n')
value_lines = (spaces + '# ' + l for l in value_lines)
else:
value_lines = (spaces + l for l in str(value).split('\n'))
lines[caller.lineno-1] = '\n'.join(value_lines)
f.seek(0)
f.truncate()
f.write('\n'.join(lines))
def insert_comment(comment):
"""
** ATTENTION **
CALLING THIS FUNCTION WILL MODIFY YOUR SOURCE CODE. KEEP BACKUPS.
Inserts a Python comment in the next source code line. If a comment alraedy
exists, it'll be replaced. The current indentation level will be maintained,
multi-line values will be inserted as multiple comments, and non-str values
will be pretty-printed.
Because inserting multi-line comments changes the following line numbers,
don't mix multiple calls to `insert_comment` with multi-line comments.
"""
caller = getframeinfo(stack()[1][0])
if caller.filename == '<stdin>':
raise ValueError("Can't use `replace_me` module in interactive interpreter.")
line_number = caller.lineno-1
comment_line = line_number + 1
with open(caller.filename, 'r+') as f:
lines = f.read().split('\n')
spaces, = re.match(r'^(\s*)', lines[line_number]).groups()
while comment_line < len(lines) and lines[comment_line].startswith(spaces + '#'):
lines.pop(comment_line)
if not isinstance(comment, str):
comment = pformat(comment, indent=4)
comment_lines = [spaces + '# ' + l for l in comment.rstrip().split('\n')]
lines = lines[:comment_line] + comment_lines + lines[comment_line:]
f.seek(0)
f.truncate()
f.write('\n'.join(lines))
if __name__ == '__main__':
# If you run this program, the following examples will change.
# These two lines will become the same:
# Hello World
replace_me("Hello World", as_comment=True)
# Code generation. Creates a hard coded list of 100 numbers.
replace_me('numbers = ' + str(list(range(100))))
import random
# The next comment will be replaced with a random number.
insert_comment(random.randint(1, 10))
# ??
# Pseudo-quine, replaces the line with itself.
quine = 'replace_me(quine)'
replace_me(quine) | /replace_me-0.1.4.tar.gz/replace_me-0.1.4/replace_me.py | 0.509032 | 0.328408 | replace_me.py | pypi |
import base64
import re
import uuid
from typing import List, Callable, Dict, TypedDict, Any
class FunctionArguments(TypedDict, total=False):
args: List[Any]
kwargs: Dict[str, Any]
class Assignment(FunctionArguments, total=True):
name: str
type: str
replace_pattern = re.compile(r"\${[A-z_][A-z0-9_]*}")
data_retrievers: Dict[str, Callable[..., str]] = {}
def retriever(func: Callable[..., str]):
name = func.__name__
assert name.startswith("retriever_")
name = name[len("retriever_"):]
assert name not in data_retrievers, "no duplicate retriever names!"
data_retrievers[name] = func
return func
@retriever
def retriever_identity(s: str) -> str:
return s
@retriever
def retriever_base64uuid4() -> str:
return base64.urlsafe_b64encode(uuid.uuid4().bytes).strip(b"=").decode()
@retriever
def retriever_localfile(*args, **kwargs):
with open(*args, mode='r', **kwargs) as f:
return f.read()
@retriever
def retriever_fsspec(*args, **kwargs):
import fsspec
with fsspec.open(*args, **kwargs, mode='r').open() as f:
return f.read()
@retriever
def retriever_awssecret(region_name, secret_id):
import boto3
return (
boto3.client("secretsmanager", region_name=region_name)
.get_secret_value(SecretId=secret_id)
['SecretString']
)
@retriever
def retriever_env(name):
import os
res = os.getenv(name)
assert res is not None
return res
class Replacer:
def __init__(
self,
assignments: List[Assignment],
allow_missing: bool=False
) -> None:
"""
An object for replacing strings with other strings in json-like objects
Assignments is a list of dictionaries, each containing a `name`, a
`type`, and optionally `args`, `kwargs` and `default`.
If `allow_missing` is `False` (default), and a string to be replaced
has a "${name}" with no assignment to "name", a KeyError exception will
be thrown.
There are currently 6 implemented types:
- identity: returns the argument passed to it.
- localfile: passes the `args` and `kwargs` to `open` and then reads
the file object. The mode is always 'r'.
- fsspec: passes the `args` and `kwargs` to `fsspec.open`, opens
that, and reads. The mode is always 'r'. Requires fsspec to be
installed. fsspec has multiple protocols installed, e.g.
http(s), (s)ftp and zip. This can also be used for data on S3,
if s3fs is installed.
- awssecret: takes two arguments: `region_name` and `secret_id`.
Uses boto to call secretsmanager, and returns the returned
`SecretString`.
- env: takes a single `name` argument, and looks for this
environment variable. If the environment variable doesn't exist
and no default value was passed, an AssertionError will be
raised.
- base64uuid4; the base64 (the url safe "-_" variant) of a
`uuid.uuid4` call. Use this to create a unique id that can be
used in multiple derived replacements.
Example:
>>> Replacer([{
... "name": "name",
... "type": "identity",
... "args": ["World"]
... }])("Hello, ${name}!")
Hello, World!
There can be dependencies between the assignments. They are resolved
linearly using the list order:
>>> Replacer([
... {
... "name": "name",
... "type": "identity",
... "args": ["World"]
... },
... {
... "name": "greeting",
... "type": "identity",
... "args": ["Hello, ${name}!"]
... }
... ])("${greeting}")
Hello, World!
"""
self.variables: Dict[str, str] = {}
self.allow_missing = allow_missing
for assignment in assignments:
assignment = self.replace(assignment)
variable = assignment['name']
data_retriever = data_retrievers[assignment['type']]
try:
self.variables[variable] = data_retriever(
*assignment.get('args', []),
**assignment.get('kwargs', {})
)
except KeyboardInterrupt:
raise
except Exception as e:
if "default" in assignment:
self.variables[variable] = assignment["default"]
else:
raise
def replace(self, s):
if isinstance(s, dict):
return {self.replace(k): self.replace(v) for k, v in s.items()}
elif isinstance(s, (list, tuple)):
return [self.replace(v) for v in s]
elif not isinstance(s, str):
return s
names = set(n[2:-1] for n in re.findall(replace_pattern, s))
for name in names:
if self.allow_missing and name not in self.variables:
continue
s = s.replace(f"${{{name}}}", self.variables[name])
return s
__call__ = replace
tests: List[Callable[[], None]] = []
def test(func: Callable[[], None]) -> Callable[[], None]:
tests.append(func)
return func
def runtests():
for test in tests:
test()
@test
def test_empty():
assert Replacer([])("hello") == "hello"
@test
def test_allow_missing():
try:
Replacer([])("${hello}")
except KeyError:
pass
else:
raise Exception("allow_missing incorrect behavior")
assert Replacer([], allow_missing=True)("${hello}") == "${hello}"
@test
def test_identity_args():
replacer = Replacer([{
"name": "hello",
"type": "identity",
"args": ["world"]
}])
assert replacer("hello") == "hello"
assert replacer("${hello}") == "world"
@test
def test_identity_kwargs():
replacer = Replacer([{
"name": "hello",
"type": "identity",
"kwargs": {"s": "world"}
}])
assert replacer("hello") == "hello"
assert replacer("${hello}") == "world"
@test
def test_dependency():
replacer = Replacer([
{
"name": "name",
"type": "identity",
"args": ["World"]
},
{
"name": "greeting",
"type": "identity",
"args": ["Hello, ${name}!"]
}
])
assert replacer("${greeting}") == "Hello, World!"
@test
def test_localfile():
import tempfile
with tempfile.NamedTemporaryFile('w') as f:
f.write("Hello, World!")
f.flush()
replacer = Replacer([
{
"name": "a",
"type": "localfile",
"args": [f.name]
}
])
assert replacer("${a}") == "Hello, World!"
@test
def test_fsspec():
import tempfile
with tempfile.NamedTemporaryFile('w') as f:
f.write("Hello, World!")
f.flush()
replacer = Replacer([
{
"name": "a",
"type": "fsspec",
"args": [f.name]
}
])
assert replacer("${a}") == "Hello, World!"
@test
def test_fsspec_google():
replacer = Replacer([
{
"name": "a",
"type": "fsspec",
"args": ["https://www.google.com"]
}
])
assert "google" in replacer("${a}")
@test
def test_default():
replacer = Replacer([{
"name": "name",
"type": "localfile",
"args": "/best/path/ever/yo",
"default": "World"
}])
assert replacer("Hello, ${name}!") == "Hello, World!"
@test
def test_base64uuid4():
replacer = Replacer([{
"name": "myuuid",
"type": "base64uuid4"
}])
assert len(replacer("${myuuid}")) == 22
@test
def test_env():
import os
os.environ["shimi"] = "Hello"
replacer = Replacer([
{
"name": "greeting",
"type": "env",
"args": ["shimi"]
},
{
"name": "name",
"type": "env",
"args": ["noshimi"],
"default": "World"
}
])
assert replacer("${greeting}, ${name}!") == "Hello, World!"
@test
def test_no_env():
try:
Replacer([
{
"name": "a",
"type": "env",
"args": ["noshimi"]
}
])
except AssertionError:
pass
else:
raise Exception("missing env didn't raise")
if __name__ == "__main__":
runtests() | /replacements-1.0-py3-none-any.whl/replacements.py | 0.751101 | 0.495911 | replacements.py | pypi |
from typing import List
import inflect
from spacy.tokens import Span
inflector = inflect.engine()
class ReplacyPipelineOrderError(RuntimeError):
pass
class ArticleAgreer:
def __init__(self, name="ArticleAgreer"):
self.name = name
@staticmethod
def _remove_article_prefix_with_whitespace(text: str):
if not isinstance(text, str):
raise ReplacyPipelineOrderError(
"ArticleAgreer replaCy component must be added after joiner component\
in order to operate on text suggestions"
)
if text.startswith("a "):
return text[2:]
elif text.startswith("an "):
return text[3:]
else:
return text
@staticmethod
def fix_double_article(s: Span) -> Span:
potential_problem = (
s.doc[s.start - 1].lower_ in ["a", "an"] if s.start > 0 else False
)
if potential_problem:
new_suggestions = [
ArticleAgreer._remove_article_prefix_with_whitespace(suggestion)
for suggestion in s._.suggestions
]
s._.suggestions = new_suggestions
return s
@staticmethod
def fix_a_an(s: Span) -> Span:
"""
Always call after fix_double_article, so we can assume
that suggestion starts with a/an xor suggestion is preceded by a/an
"""
a_an_before_suggestion = (
s.doc[s.start - 1].lower_ in ["a", "an"] if s.start > 0 else False
)
suggestions_starting_with_a_an = []
for i, suggestion in enumerate(s._.suggestions):
if suggestion.startswith("a ") or suggestion.startswith("an "):
suggestions_starting_with_a_an.append(i)
if a_an_before_suggestion:
update_span = True
# need to make the new span extend 1 back to catch the a/an
# then modify each suggestion
new_span = s.doc[s.start - 1 : s.end]
for k in s._.span_extensions.keys():
setattr(new_span._, k, getattr(s._, k))
new_span._.suggestions = [
inflector.a(suggestion) for suggestion in s._.suggestions
]
if len(new_span._.suggestions):
orig_and_corr_start_with_a = (
new_span.text[:2] == "a " and new_span._.suggestions[0][:2] == "a "
)
orig_and_corr_start_with_an = (
new_span.text[:3] == "an "
and new_span._.suggestions[0][:3] == "an "
)
same_article = orig_and_corr_start_with_a or orig_and_corr_start_with_an
if same_article:
update_span = False
s = new_span if update_span else s
elif len(suggestions_starting_with_a_an):
new_suggestions = [
inflector.a(suggestion) for suggestion in s._.suggestions
]
s._.suggestions = new_suggestions
return s
@staticmethod
def fix(s: Span) -> Span:
s = ArticleAgreer.fix_double_article(s)
s = ArticleAgreer.fix_a_an(s)
return s
def __call__(self, spans: List[Span]) -> List[Span]:
return [self.fix(s) for s in spans] | /replacy_article_agreer-0.2.2-py3-none-any.whl/replacy_article_agreer/article.py | 0.770162 | 0.22448 | article.py | pypi |
from typing import List
from spacy.tokens import Span
class ReplacyPipelineOrderError(RuntimeError):
pass
class ESpan(Span):
@staticmethod
def create_instance(span: Span):
espan = ESpan(
span.doc,
span.start,
span.end,
span.label,
span.vector,
span.vector_norm,
span.kb_id,
)
espan.start_character = espan.start_char
espan.end_character = espan.end_char
espan.fixed_text = espan.text
espan.original_text = espan.text
if Span.has_extension("suggestions"):
espan.original_suggestions = span._.suggestions
espan.is_deletion = len(span._.suggestions) and span._.suggestions[0] == ""
else:
espan.original_suggestions = []
espan.is_deletion = False
return espan
def __getattribute__(self, item):
if item == "start_char" and hasattr(self, "start_character"):
return self.start_character
elif item == "end_char" and hasattr(self, "end_character"):
return self.end_character
elif item == "text" and hasattr(self, "fixed_text"):
return self.fixed_text
else:
return object.__getattribute__(self, item)
def __setattr__(self, key, value):
if key == "start_char":
self.start_character = value
self.fixed_text = self.doc.text[self.start_character : self.end_character]
elif key == "end_char":
self.end_character = value
self.fixed_text = self.doc.text[self.start_character : self.end_character]
else:
object.__setattr__(self, key, value)
class IssueBoundary:
def __init__(self, name="IssueBoundary"):
self.name = name
@staticmethod
def _would_cause_lowercase_first_letter(span: Span) -> bool:
return (
span.start_char == 0
and len(span._.suggestions) > 0
and span._.suggestions[0] == ""
)
@staticmethod
def _would_cause_double_space(span: Span) -> bool:
return (
span.start_char > 0
and span.end_char < len(span.doc.text)
and len(span._.suggestions) > 0
and span.doc.text[span.start_char - 1] == " "
and span.doc.text[span.end_char] == " "
and span._.suggestions[0] in ["", ","]
)
@staticmethod
def _would_cause_space_at_start(span: Span) -> bool:
return (
span.start_char == 0
and span.end_char < len(span.doc.text)
and span.doc.text[span.end_char] == " "
and len(span._.suggestions) > 0
and span._.suggestions[0] == ""
)
@staticmethod
def _would_cause_double_comma(span: Span) -> bool:
return (
(
(span.start_char > 0 and span.doc.text[span.start_char - 1] == ",")
or (
span.start_char > 1
and span.doc.text[span.start_char - 1] == " "
and span.doc.text[span.start_char - 2] == ","
)
)
and (
span.end_char < len(span.doc.text)
and span.doc.text[span.end_char] == ","
)
and len(span._.suggestions) > 0
and span._.suggestions[0] == ""
)
@staticmethod
def _would_cause_comma_start(span: Span) -> bool:
return (
span.start_char == 0
and span.end_char < len(span.doc.text)
and span.doc.text[span.end_char] == ","
and len(span._.suggestions) > 0
and span._.suggestions[0] == ""
)
@staticmethod
def _is_comma_replacement_that_could_be_deletion(span: Span) -> bool:
return (
(span.text[0] == "," or span.text[-1] == ",")
and len(span._.suggestions) > 0
and span._.suggestions[0] == ","
)
@staticmethod
def _comma_replacement_to_deletion(span: Span):
"""
rather than suggest replacing 'some long string,' with ',', it looks nicer in the front end
if we replace 'some long string' with '' - because the empty string is a magic replacement
this attempts to figure out how to move the span boundaries to make this happen
"""
first_char = span.doc.text[span.start_char]
second_char = span.doc.text[span.start_char + 1]
second_to_last_char = span.doc.text[span.end_char - 2]
last_char = span.doc.text[span.end_char - 1]
if first_char == ",":
span.start_char += 1
elif first_char == " " and second_char == ",":
span.start_char += 2
elif second_to_last_char == "," and last_char == " ":
span.end_char -= 2
elif last_char == ",":
span.end_char -= 1
span._.suggestions.remove(",")
if "" not in span._.suggestions:
span._.suggestions.insert(0, "")
return span
@staticmethod
def _would_cause_parenthesis_space(span: Span) -> bool:
return (
span.start_char > 0
and span.end_char < len(span.doc.text)
and len(span._.suggestions) > 0
and span.doc.text[span.start_char - 1] == "("
and span.doc.text[span.end_char] == " "
and span._.suggestions[0] in [""]
)
@staticmethod
def _would_cause_space_parenthesis(span: Span) -> bool:
return (
span.start_char > 0
and span.end_char < len(span.doc.text)
and len(span._.suggestions) > 0
and span.doc.text[span.start_char - 1] == " "
and span.doc.text[span.end_char] == ")"
and span._.suggestions[0] in [""]
)
@staticmethod
def _sanity_check(span: Span) -> Span:
if span[0].is_sent_start or span.doc.text[span.start_char - 1] == "(":
# the language model gets confused and sometimes thinks
# that it makes sense to start a sentence (or a parenthetical) with a comma
# don't allow this
if "," in span._.suggestions:
span._.suggestions.remove(",")
# now maybe so suggestions are left,
# so add DELETE operation (i.e. suggest replacement with "")
if not len(span._.suggestions):
span._.suggestions.append("")
return span
@staticmethod
def _drop_comma_when_empty_top(span: Span) -> Span:
if (
len(span._.suggestions) > 0
and span._.suggestions[0] == ""
and "," in span._.suggestions
):
span._.suggestions.remove(",")
return span
@staticmethod
def starts_with_contraction(s: str) -> bool:
contractions_list = ["n't", "'ll", "'s", "'d", "'re", "'m"]
contractions_list = contractions_list + [
c.replace("'", "’") for c in contractions_list
]
return any(s.startswith(c) for c in contractions_list)
@staticmethod
def extend_span_with_contraction(spans: List[Span]) -> List[Span]:
result = []
for span in spans:
if len(span.doc) > span.start + 1:
next_token = span.doc[span.start + 1]
if IssueBoundary.starts_with_contraction(next_token.text):
s = span.doc[span.start: span.start + 2]
s._.suggestions = [sug + next_token.text for sug in span._.suggestions]
result.append(s)
else:
result.append(span)
else:
result.append(span)
return result
def __call__(self, spans: List[Span]) -> List[Span]:
result = []
for span in spans:
span = ESpan.create_instance(span)
span = IssueBoundary._sanity_check(span)
if IssueBoundary._would_cause_double_comma(span):
span.end_char += 1
if IssueBoundary._would_cause_comma_start(span):
span.end_char += 1
if IssueBoundary._would_cause_double_space(span):
# double space issue, extending issue back one character
span.start_char -= 1
elif IssueBoundary._would_cause_space_at_start(span):
# space at start, extending issue forward one character
span.end_char += 1
if IssueBoundary._would_cause_lowercase_first_letter(span):
# casing issue, extending issue forward one word and uppercasing that word
doc_text_without_issue = span.doc.text[span.end_char :]
first_space_index = doc_text_without_issue.find(" ")
if first_space_index > 0:
replacement = doc_text_without_issue[0:first_space_index]
span.end_char += first_space_index
span._.suggestions = [replacement[0].upper() + replacement[1:]]
# order matters, must come after _would_cause_lowercase_first_letter
if IssueBoundary._is_comma_replacement_that_could_be_deletion(span):
span = IssueBoundary._comma_replacement_to_deletion(span)
# fix parentheses
if self._would_cause_parenthesis_space(span):
span.end_char += 1
if self._would_cause_space_parenthesis(span):
span.start_char -= 1
# only "" or "," can ever be right; so drop comma suggestion, when first one is empty
span = IssueBoundary._drop_comma_when_empty_top(span)
result.append(span)
return sorted(result, key=lambda span: span.start) | /replacy_issue_boundary-0.2.12-py3-none-any.whl/replacy_issue_boundary/boundary.py | 0.748904 | 0.37202 | boundary.py | pypi |
import numpy as np
import xarray as xr
from .core import atleast_kd
from scipy.ndimage.filters import gaussian_filter
from statsmodels.api import GLM, families
def estimate_movement_std(position):
'''Estimates the movement standard deviation based on position.
WARNING: Need to use on original position, not interpolated position.
Parameters
----------
position : ndarray, shape (n_time, n_position_dim)
Returns
-------
movement_std : ndarray, shape (n_position_dim,)
'''
position = atleast_kd(position, 2)
is_nan = np.any(np.isnan(position), axis=1)
position = position[~is_nan]
movement_std = []
for p in position.T:
fit = GLM(p[:-1], p[1:], family=families.Gaussian()).fit()
movement_std.append(np.sqrt(fit.scale))
return np.array(movement_std)
def empirical_movement_transition_matrix(place, lagged_place, place_bin_edges,
replay_speed=20,
movement_std=0.5):
'''Estimate the probablity of the next position based on the movement
data, given the movment is sped up by the
`replay_speed`
Place cell firing during a hippocampal replay event is a "sped-up"
version of place cell firing when the animal is actually moving.
Here we use the animal's actual movements to constrain which place
cell is likely to fire next.
Parameters
----------
place : array_like, shape (n_time,)
Linearized position of the animal over time
lagged_place : array_like, shape (n_time,)
Linearized position of the preivous time step
place_bin_edges : array_like, shape (n_bins,)
replay_speed : int, optional
How much the movement is sped-up during a replay event
Returns
-------
empirical_movement_transition_matrix : array_like,
shape=(n_bin_edges-1,
n_bin_edges-1)
'''
movement_bins, _, _ = np.histogram2d(
place, lagged_place,
bins=(place_bin_edges, place_bin_edges))
movement_bins = _fix_zero_bins(movement_bins)
movement_bins = _normalize_row_probability(movement_bins)
movement_bins = gaussian_filter(movement_bins, sigma=movement_std)
return np.linalg.matrix_power(movement_bins, replay_speed)
def _normalize_row_probability(x):
'''Ensure the state transition matrix rows sum to 1
'''
return x / x.sum(axis=1, keepdims=True)
def _fix_zero_bins(movement_bins):
'''If there is no data observed for a column, set everything to 1 so
that it will have equal probability
'''
movement_bins[:, movement_bins.sum(axis=0) == 0] = 1
return movement_bins
def fit_state_transition(position_info, place_bin_edges, place_bin_centers,
replay_sequence_orders='Forward', replay_speed=20,
movement_std=0.5):
order_to_df_column = {'Forward': 'lagged_position',
'Reverse': 'future_position',
'Stay': 'position'}
if isinstance(replay_sequence_orders, str):
replay_sequence_orders = [replay_sequence_orders]
state_transition = []
state_names = []
for order in replay_sequence_orders:
column_name = order_to_df_column[order]
for condition, df in position_info.groupby('experimental_condition'):
state_names.append('-'.join((condition, order)))
state_transition.append(
empirical_movement_transition_matrix(
df.position, df[column_name], place_bin_edges,
replay_speed=replay_speed,
movement_std=movement_std))
state_transition = np.stack(state_transition)
return xr.DataArray(
state_transition,
dims=['state', 'position_t', 'position_t_1'],
coords=dict(
state=state_names,
position_t=place_bin_centers,
position_t_1=place_bin_centers),
name='state_transition_probability') | /replay_classification-0.6.1.tar.gz/replay_classification-0.6.1/replay_classification/state_transition.py | 0.899472 | 0.682904 | state_transition.py | pypi |
from sklearn.base import BaseEstimator
import numpy as np
from numba import jit
@jit(nogil=True, nopython=True, cache=True)
def _normal_pdf(x, mean=0, std_deviation=1):
'''Evaluate the normal probability density function at specified points.
Unlike the `scipy.stats.norm.pdf`, this function is not general and does
not do any sanity checking of the inputs. As a result it is a much faster
function, but you should be sure of your inputs before using.
This function only computes the one-dimensional pdf.
Parameters
----------
x : array_like
The normal probability function will be evaluated
mean : float or array_like, optional
std_deviation : float or array_like
Returns
-------
probability_density
The normal probability density function evaluated at `x`
'''
return (np.exp(-0.5 * ((x - mean) / std_deviation) ** 2) /
(np.sqrt(2.0 * np.pi) * std_deviation))
class IsotropicKernelDensity(BaseEstimator):
def __init__(self, bandwidth):
self.bandwidth = np.array(bandwidth)
def fit(self, X, y=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
self.training_data = X
return self
def score_samples(self, X):
"""Evaluate the density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray, shape (n_samples,)
The array of log(density) evaluations.
"""
bandwidth = self.bandwidth[-X.shape[1]:]
return np.log(np.mean(np.prod(_normal_pdf(
X[:, np.newaxis, :], mean=self.training_data[np.newaxis, ...],
std_deviation=bandwidth), axis=2), axis=1))
def score(self, X, y=None):
"""Compute the total log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Total log-likelihood of the data in X.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
random_state : int, RandomState instance or None. default to None
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples.
"""
raise NotImplementedError() | /replay_classification-0.6.1.tar.gz/replay_classification-0.6.1/replay_classification/misc.py | 0.957547 | 0.785103 | misc.py | pypi |
from functools import wraps
from logging import getLogger
import numpy as np
logger = getLogger(__name__)
def filter(initial_conditions, state_transition, likelihood, bin_size):
'''Adaptive filter to iteratively calculate the posterior probability
of a state variable using past information.
Parameters
----------
initial_conditions : ndarray, shape (n_states, n_bins)
state_transition : ndarray, shape (n_states, n_bins, n_bins)
likelihood : ndarray, shape (n_time, n_states, n_bins)
bin_size : float
Returns
-------
results : dict
'''
likelihood = likelihood[..., np.newaxis]
n_time = likelihood.shape[0]
shape = (n_time, *initial_conditions.shape, 1)
posterior = np.zeros(shape)
prior = np.zeros(shape)
posterior[0] = initial_conditions.copy()[..., np.newaxis]
for time_ind in np.arange(1, n_time):
prior[time_ind] = predict_state(
posterior[time_ind - 1], state_transition)
posterior[time_ind] = update_posterior(
prior[time_ind], likelihood[time_ind], bin_size)
return {'posterior_density': posterior.squeeze(axis=-1),
'likelihood': likelihood.squeeze(axis=-1),
'prior': prior.squeeze(axis=-1)}
def smooth(filter_posterior, state_transition, bin_size):
'''Uses past and future information to estimate the state.
Parameters
----------
filter_posterior : ndarray, shape (n_time, n_bins)
state_transition : ndarray, shape (n_states, n_bins, n_bins)
bin_size : float
Return
------
results : dict
'''
filter_posterior = filter_posterior[..., np.newaxis]
smoother_posterior = np.zeros_like(filter_posterior)
smoother_posterior[-1] = filter_posterior[-1].copy()
smoother_prior = np.zeros_like(filter_posterior)
n_time = filter_posterior.shape[0]
for time_ind in np.arange(n_time - 2, -1, -1):
smoother_prior[time_ind] = predict_state(
filter_posterior[time_ind], state_transition)
smoother_posterior[time_ind] = update_backwards_posterior(
filter_posterior[time_ind], state_transition,
smoother_posterior[time_ind + 1], smoother_prior[time_ind],
bin_size)
return {'filter_posterior': filter_posterior.squeeze(axis=-1),
'posterior_density': smoother_posterior.squeeze(axis=-1),
'prior': smoother_prior.squeeze(axis=-1)}
def update_backwards_posterior(filter_posterior, state_transition,
smoother_posterior, prior, bin_size):
'''
Parameters
----------
filter_posterior : ndarray, shape (n_states, n_bins, 1)
state_transition : ndarray, shape (n_states, n_bins, n_bins)
smoother_posterior : ndarray, shape (n_states, n_bins, 1)
prior : ndarray, shape (n_states, n_bins, 1)
bin_size : float
Returns
-------
updated_posterior : ndarray, shape (n_states, n_bins)
'''
log_ratio = (np.log(smoother_posterior + np.spacing(1)) -
np.log(prior + np.spacing(1))).swapaxes(1, 2)
weights = np.exp(log_ratio) @ state_transition * bin_size
weights = weights.squeeze()[..., np.newaxis]
return normalize_to_probability(weights * filter_posterior, bin_size)
def update_posterior(prior, likelihood, bin_size):
'''The posterior density given the prior state weighted by the
observed instantaneous likelihoodself.
Parameters
----------
prior : ndarray, shape (n_states, n_bins, 1)
likelihood : ndarray, shape (n_states, n_bins, 1)
Returns
-------
updated_posterior : ndarray, shape (n_states, n_bins, 1)
'''
return normalize_to_probability(prior * likelihood, bin_size)
def normalize_to_probability(distribution, bin_size):
'''Ensure the distribution integrates to 1 so that it is a probability
distribution
'''
return distribution / np.nansum(distribution) / bin_size
def predict_state(posterior, state_transition):
'''The prior given the previous posterior density and a transition
matrix indicating the state at the next time step.
'''
return state_transition @ posterior
def scaled_likelihood(log_likelihood_func):
'''Converts a log likelihood to a scaled likelihood with its max value at
1.
Used primarily to keep the likelihood numerically stable because more
observations at a time point will lead to a smaller overall likelihood
and this can exceed the floating point accuarcy of a machine.
Parameters
----------
log_likelihood_func : function
Returns
-------
scaled_likelihood : function
'''
@wraps(log_likelihood_func)
def decorated_function(*args, **kwargs):
log_likelihood = log_likelihood_func(*args, **kwargs)
axis = tuple(range(log_likelihood.ndim)[1:])
return np.exp(log_likelihood - np.max(
log_likelihood, axis=axis, keepdims=True))
return decorated_function
@scaled_likelihood
def combined_likelihood(data, log_likelihood_function=None,
likelihood_kwargs={}):
'''Applies likelihood function to each signal and returns their product
If there isn't a column dimension, just returns the likelihood.
Parameters
----------
data : ndarray, shape (n_signals, ...)
log_likelihood_function : function
Log Likelihood function to be applied to each signal.
The likelihood function must take data as its first argument.
All other arguments for the likelihood should be passed
via `likelihood_kwargs`
likelihood_kwargs : dict
Keyword arguments for the likelihood function
Returns
-------
likelihood : ndarray, shape (n_time, n_states, n_bins)
'''
try:
return np.sum(
log_likelihood_function(data, **likelihood_kwargs), axis=0)
except ValueError:
return log_likelihood_function(data, **likelihood_kwargs)
def get_bin_edges(position, n_bins=None, place_bin_size=None):
not_nan_position = position[~np.isnan(position)]
if place_bin_size is not None:
n_bins = (np.round(np.ceil(np.ptp(not_nan_position) / place_bin_size))
).astype(np.int)
return np.linspace(
np.min(not_nan_position), np.max(not_nan_position), n_bins + 1,
endpoint=True)
def get_bin_centers(bin_edges):
'''Given the outer-points of bins, find their center
'''
return bin_edges[:-1] + np.diff(bin_edges) / 2
def atleast_kd(array, k):
'''
https://stackoverflow.com/questions/42516569/numpy-add-variable-number-of-dimensions-to-an-array
'''
new_shape = array.shape + (1,) * (k - array.ndim)
return array.reshape(new_shape) | /replay_classification-0.6.1.tar.gz/replay_classification-0.6.1/replay_classification/core.py | 0.966961 | 0.607838 | core.py | pypi |
import numpy as np
from scipy.stats import norm, multivariate_normal
def simulate_time(n_samples, sampling_frequency):
return np.arange(n_samples) / sampling_frequency
def simulate_linear_distance(time, track_height):
return ((track_height / 2) * np.cos(2 * np.pi * time) +
(track_height / 2))
def get_trajectory_direction(linear_distance):
is_inbound = np.insert(np.diff(linear_distance) < 0, 0, False)
return np.where(is_inbound, 'Inbound', 'Outbound'), is_inbound
def simulate_poisson_spikes(rate, sampling_frequency):
return 1.0 * (np.random.poisson(rate / sampling_frequency) > 0)
def create_place_field(
place_field_mean, linear_distance, sampling_frequency, is_condition=None,
place_field_std_deviation=12.5, max_firing_rate=20,
baseline_firing_rate=0.5):
if is_condition is None:
is_condition = np.ones_like(linear_distance, dtype=bool)
field_firing_rate = norm(
place_field_mean, place_field_std_deviation).pdf(linear_distance)
field_firing_rate /= field_firing_rate.max()
field_firing_rate[~is_condition] = 0
return baseline_firing_rate + max_firing_rate * field_firing_rate
def generate_marks(spikes, mark_mean, mark_std_deviation, n_marks=4):
'''Generate a place field with an associated mark'''
spikes[spikes == 0] = np.nan
marks = multivariate_normal(
mean=[mark_mean] * n_marks,
cov=[mark_std_deviation] * n_marks).rvs(size=(spikes.size,))
return marks * spikes[:, np.newaxis]
def simulate_multiunit(
place_field_means, mark_means, linear_distance, sampling_frequency,
mark_std_deviation=20, n_marks=4, **kwargs):
'''Simulate a single tetrode assuming each tetrode picks up several
neurons with different place fields with distinguishing marks.'''
unit = []
for place_field_mean, mark_mean in zip(place_field_means, mark_means):
rate = create_place_field(
place_field_mean, linear_distance, sampling_frequency, **kwargs)
spikes = simulate_poisson_spikes(rate, sampling_frequency)
marks = generate_marks(
spikes, mark_mean, mark_std_deviation, n_marks=n_marks)
unit.append(marks)
return np.nanmean(np.stack(unit, axis=0), axis=0) | /replay_classification-0.6.1.tar.gz/replay_classification-0.6.1/replay_classification/simulate.py | 0.838845 | 0.558628 | simulate.py | pypi |
from logging import getLogger
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xarray as xr
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
from sklearn.neighbors import KernelDensity
import holoviews as hv
from .core import (combined_likelihood, filter, get_bin_centers, get_bin_edges,
smooth)
from .initial_conditions import (fit_initial_conditions,
inbound_outbound_initial_conditions,
uniform_initial_conditions)
from .multiunit import (fit_multiunit_observation_model,
poisson_mark_log_likelihood)
from .sorted_spikes import fit_spike_observation_model, poisson_log_likelihood
from .state_transition import fit_state_transition
logger = getLogger(__name__)
_DEFAULT_REPLAY_ORDERS = ['Forward', 'Reverse', 'Stay']
_INITIAL_CONDITIONS_MAP = {
'Inbound-Outbound': inbound_outbound_initial_conditions,
'Uniform': uniform_initial_conditions,
'Empirical': fit_initial_conditions
}
_DEFAULT_MULTIUNIT_MODEL_KWARGS = dict(bandwidth=10, leaf_size=1000,
rtol=1E-3)
hv.extension('bokeh', 'matplotlib')
class _DecoderBase(BaseEstimator):
def __init__(
self, n_place_bins=None, place_bin_size=2.8,
replay_speedup_factor=20,
replay_orders=_DEFAULT_REPLAY_ORDERS,
time_bin_size=1, confidence_threshold=0.8, movement_std=None):
'''
Attributes
----------
n_place_bins : None or int, optional
place_bin_size : None or int, optional
replay_speedup_factor : int, optional
state_names : list of str, optional
observation_state_order : list of str, optional
replay_orders : list of str, optional
time_bin_size : float, optional
confidence_threshold : float, optional
movement_std : None or float, optional
'''
if n_place_bins is not None and place_bin_size is not None:
logger.warn('Both place_bin_size and n_place_bins are set. Using'
' place_bin_size.')
self.n_place_bins = n_place_bins
self.place_bin_size = place_bin_size
self.replay_speedup_factor = replay_speedup_factor
self.replay_orders = replay_orders
self.time_bin_size = time_bin_size
self.confidence_threshold = confidence_threshold
self.movement_std = movement_std
def fit_place_bins(self, position, place_bin_edges=None):
if place_bin_edges is not None:
self.place_bin_edges = place_bin_edges
else:
self.place_bin_edges = get_bin_edges(
position, self.n_place_bins, self.place_bin_size)
self.place_bin_centers = get_bin_centers(self.place_bin_edges)
def fit_initial_conditions(self, initial_conditions='Inbound-Outbound',
position=None, is_training=None,
experimental_condition=None, trial_id=None):
'''
Parameters
----------
experimental_condition : ndarray, shape (n_time,)
initial_conditions : str or dict of ndarray, optional
'''
df = pd.DataFrame(
{'position': position,
'is_training': is_training,
'experimental_condition': experimental_condition,
'trial_id': trial_id})
df['lagged_position'] = df.position.shift(1)
df = df.loc[df.is_training].dropna()
if isinstance(initial_conditions, str):
self.initial_conditions_ = (
_INITIAL_CONDITIONS_MAP[initial_conditions](
df, self.place_bin_edges, self.place_bin_centers,
self.replay_orders))
else:
self.initial_conditions_ = xr.DataArray(
initial_conditions, dims=['state', 'position'],
coords=dict(position=self.place_bin_centers,
state=self.state_names),
name='probability')
def fit_state_transition(self, position, is_training,
experimental_condition,
replay_speedup_factor=20):
logger.info('Fitting state transition model...')
self.replay_speedup_factor = replay_speedup_factor
df = pd.DataFrame({'position': position,
'is_training': is_training,
'experimental_condition': experimental_condition})
df['lagged_position'] = df.position.shift(1)
df['future_position'] = df.position.shift(-1)
df = df.loc[df.is_training].dropna()
self.state_transition_ = fit_state_transition(
df, self.place_bin_edges, self.place_bin_centers,
replay_sequence_orders=self.replay_orders,
replay_speed=self.replay_speedup_factor,
movement_std=self.movement_std)
observation_order = [state.split('-')[0] for state in
self.state_transition_.state.values.tolist()]
self.observation_state_order = observation_order
self.state_names = self.state_transition_.state.values
def save_model(self, filename='model.pkl'):
joblib.dump(self, filename)
@staticmethod
def load_model(filename='model.pkl'):
return joblib.load(filename)
def plot_initial_conditions(self, **kwargs):
return self.initial_conditions_.plot(x='position', col='state',
**kwargs)
def plot_state_transition_model(self, **kwargs):
try:
return (self.state_transition_
.plot(x='position_t', y='position_t_1', col='state',
robust=True, **kwargs))
except ValueError:
return (self.state_transition_
.plot(x='position_t', y='position_t_1',
robust=True, **kwargs))
def plot_observation_model(self, sampling_frequency=1):
raise NotImplementedError
class ClusterlessDecoder(_DecoderBase):
'''
Attributes
----------
n_place_bins : None or int, optional
place_bin_size : None or int, optional
replay_speedup_factor : int, optional
state_names : list of str, optional
observation_state_order : list of str, optional
replay_orders : list of str, optional
time_bin_size : float, optional
confidence_threshold : float, optional
'''
def __init__(
self, n_place_bins=None, place_bin_size=1,
replay_speedup_factor=20,
replay_orders=_DEFAULT_REPLAY_ORDERS,
time_bin_size=1, confidence_threshold=0.8, movement_std=0.5,
model=KernelDensity, model_kwargs=_DEFAULT_MULTIUNIT_MODEL_KWARGS):
super().__init__(n_place_bins, place_bin_size,
replay_speedup_factor,
replay_orders,
time_bin_size, confidence_threshold,
movement_std)
self.model = model
self.model_kwargs = model_kwargs
def fit(self, position, multiunits, experimental_condition=None,
is_training=None, trial_id=None,
initial_conditions='Uniform', place_bin_edges=None):
'''Fits the decoder model for each experimental_condition
and replay order.
Relates the position and multiunits to the experimental_condition.
Parameters
----------
position : ndarray, shape (n_time,)
multiunits : ndarray, shape (n_signals, n_time, n_features)
experimental_condition : None or ndarray, shape (n_time,)
is_training : None or ndarray, bool, shape (n_time, ), optional
trial_id : None or ndarray, shape (n_time,), optional
initial_conditions : str or ndarray, shape (n_states, n_place_bin),
optional
('Inbound-Outbound' | 'Uniform' | 'Empirical')
place_bin_edges : None or ndarray, optional
Returns
-------
self : class instance
'''
position = np.asarray(position.copy()).squeeze()
multiunits = np.asarray(multiunits.copy())
if is_training is None:
is_training = np.ones_like(position, dtype=np.bool)
else:
is_training = np.asarray(is_training).squeeze()
if trial_id is None:
trial_id = np.ones_like(position, dtype=np.bool)
else:
trial_id = np.asarray(trial_id).squeeze()
if experimental_condition is None:
experimental_condition = np.full_like(
position, 'All', dtype=object)
else:
experimental_condition = np.asarray(
experimental_condition.copy()).squeeze()
self.fit_place_bins(position, place_bin_edges)
self.fit_state_transition(
position, is_training, experimental_condition,
self.replay_speedup_factor)
self.fit_initial_conditions(initial_conditions, position,
is_training, experimental_condition,
trial_id)
logger.info('Fitting observation model...')
joint_mark_intensity_functions, ground_process_intensity = (
fit_multiunit_observation_model(
position[is_training], experimental_condition[is_training],
multiunits[:, is_training, :], self.place_bin_centers,
self.model, self.model_kwargs, self.observation_state_order))
likelihood_kwargs = dict(
joint_mark_intensity_functions=joint_mark_intensity_functions,
ground_process_intensity=ground_process_intensity,
time_bin_size=self.time_bin_size)
self.combined_likelihood_kwargs_ = dict(
log_likelihood_function=poisson_mark_log_likelihood,
likelihood_kwargs=likelihood_kwargs)
return self
def predict(self, multiunits, time=None, use_smoother=True):
'''Predicts the state from multiunits.
Parameters
----------
multiunits : ndarray, shape (n_signals, n_time, n_marks)
If spike does not occur, the row must be marked with np.nan.
time : ndarray, optional, shape (n_time,)
use_smoother : bool
Use future information to compute state
Returns
-------
DecodingResults : DecodingResults class instance
'''
multiunits = np.asarray(multiunits.copy())
likelihood = combined_likelihood(
multiunits, **self.combined_likelihood_kwargs_)
place_bin_size = np.diff(self.place_bin_edges)[0]
state_transition = self.state_transition_.values
results = filter(
initial_conditions=self.initial_conditions_.values,
state_transition=state_transition,
likelihood=likelihood,
bin_size=place_bin_size)
if use_smoother:
results = smooth(
filter_posterior=results['posterior_density'],
state_transition=self.state_transition_.values,
bin_size=place_bin_size)
results['likelihood'] = likelihood
coords = dict(
time=(time if time is not None
else np.arange(results['posterior_density'].shape[0])),
position=self.place_bin_centers,
state=self.state_names
)
DIMS = ['time', 'state', 'position']
results = xr.Dataset(
{key: (DIMS, value) for key, value in results.items()},
coords=coords)
return DecodingResults(
results=results,
spikes=multiunits,
place_bin_size=place_bin_size,
confidence_threshold=self.confidence_threshold,
)
class SortedSpikeDecoder(_DecoderBase):
def __init__(
self, n_place_bins=None, place_bin_size=1,
replay_speedup_factor=20,
replay_orders=_DEFAULT_REPLAY_ORDERS,
time_bin_size=1, confidence_threshold=0.8, movement_std=0.5,
knot_spacing=15, spike_model_penalty=1E-1):
'''
Attributes
----------
n_place_bins : None or int, optional
place_bin_size : None or int, optional
replay_speedup_factor : int, optional
replay_orders : list of str, optional
time_bin_size : float, optional
confidence_threshold : float, optional
knot_spacing : float, optional
spike_model_penalty : float, optional
'''
super().__init__(n_place_bins, place_bin_size,
replay_speedup_factor,
replay_orders,
time_bin_size, confidence_threshold,
movement_std)
self.knot_spacing = knot_spacing
self.spike_model_penalty = spike_model_penalty
def fit(self, position, spikes, experimental_condition=None,
is_training=None, trial_id=None,
initial_conditions='Uniform', place_bin_edges=None):
'''Fits the decoder model by state
Relates the position and spikes to the state.
Parameters
----------
position : ndarray, shape (n_time,)
spikes : ndarray, shape (n_time, n_neurons)
experimental_condition : None or ndarray, shape (n_time,)
is_training : None or ndarray, bool, shape (n_time, ), optional
trial_id : None or ndarray, shape (n_time,), optional
initial_conditions : str or ndarray, shape (n_states, n_place_bin),
optional
('Inbound-Outbound' | 'Uniform' | 'Empirical')
place_bin_edges : None or ndarray, optional
Returns
-------
self : class instance
'''
position = np.asarray(position.copy()).squeeze()
spikes = np.asarray(spikes.copy())
if is_training is None:
is_training = np.ones_like(position, dtype=np.bool)
else:
is_training = np.asarray(is_training).squeeze()
if trial_id is None:
trial_id = np.ones_like(position, dtype=np.bool)
else:
trial_id = np.asarray(trial_id).squeeze()
if experimental_condition is None:
experimental_condition = np.full_like(
position, 'All', dtype=object)
else:
experimental_condition = np.asarray(
experimental_condition.copy()).squeeze()
self.fit_place_bins(position, place_bin_edges)
self.fit_state_transition(
position, is_training, experimental_condition,
self.replay_speedup_factor)
logger.info('Fitting observation model...')
conditional_intensity = fit_spike_observation_model(
position[is_training], experimental_condition[is_training],
spikes[is_training], self.place_bin_centers,
self.knot_spacing, self.observation_state_order,
self.spike_model_penalty)
self.fit_initial_conditions(initial_conditions, position,
is_training, experimental_condition,
trial_id)
self.combined_likelihood_kwargs_ = dict(
log_likelihood_function=poisson_log_likelihood,
likelihood_kwargs=dict(conditional_intensity=conditional_intensity,
time_bin_size=self.time_bin_size)
)
return self
def plot_observation_model(self, sampling_frequency=1):
'''
Parmameters
-----------
sampling_frequency : float, optional
'''
conditional_intensity = self.combined_likelihood_kwargs_[
'likelihood_kwargs']['conditional_intensity'].squeeze(axis=-3)
coords = dict(
state=self.state_names,
position=self.place_bin_centers)
conditional_intensity = xr.DataArray(
conditional_intensity * sampling_frequency,
coords=coords,
dims=['signal', 'state', 'position'],
name='firing_rate').to_dataframe().reset_index()
g = sns.FacetGrid(
conditional_intensity,
row='signal', col='state')
return g.map(plt.plot, 'position', 'firing_rate')
def predict(self, spikes, time=None, use_smoother=True):
'''Predicts the state from multiunits.
Parameters
----------
spikes : ndarray, shape (n_time, n_neurons)
If spike does not occur, the row must be marked with np.nan.
time : ndarray, optional, shape (n_time,)
use_smoother : bool
Use future information to compute state
Returns
-------
DecodingResults : DecodingResults class instance
'''
spikes = np.asarray(spikes.copy())
place_bin_size = np.diff(self.place_bin_edges)[0]
likelihood = combined_likelihood(
spikes.T[..., np.newaxis, np.newaxis],
**self.combined_likelihood_kwargs_)
state_transition = self.state_transition_.values
results = filter(
initial_conditions=self.initial_conditions_.values,
state_transition=state_transition,
likelihood=likelihood,
bin_size=place_bin_size)
if use_smoother:
results = smooth(
filter_posterior=results['posterior_density'],
state_transition=self.state_transition_.values,
bin_size=place_bin_size)
results['likelihood'] = likelihood
coords = dict(
time=(time if time is not None
else np.arange(results['posterior_density'].shape[0])),
position=self.place_bin_centers,
state=self.state_names
)
DIMS = ['time', 'state', 'position']
results = xr.Dataset(
{key: (DIMS, value) for key, value in results.items()},
coords=coords)
return DecodingResults(
results=results,
spikes=spikes,
place_bin_size=place_bin_size,
confidence_threshold=self.confidence_threshold,
)
class DecodingResults():
def __init__(self, results, place_bin_size, spikes=None,
confidence_threshold=0.8):
self.results = results
self.spikes = spikes
self.place_bin_size = place_bin_size
self.confidence_threshold = confidence_threshold
def __dir__(self):
return self.keys()
def state_probability(self):
return (self.results['posterior_density'].sum('position')
.to_series().unstack()) * self.place_bin_size
def predicted_state(self):
state_probability = self.state_probability()
is_threshold = np.sum(
(state_probability > self.confidence_threshold), axis=1)
if np.any(is_threshold):
return state_probability.iloc[
is_threshold.values.argmax()].idxmax()
else:
return 'Unclassified'
def predicted_state_probability(self):
state_probability = self.state_probability()
is_threshold = np.sum(
(state_probability > self.confidence_threshold), axis=1)
if np.any(is_threshold):
return state_probability.loc[is_threshold.argmax()].max()
else:
return np.nan
def plot_posterior_density(self, **kwargs):
try:
return self.results['posterior_density'].plot(
x='time', y='position', col='state', col_wrap=2,
robust=True, **kwargs)
except ValueError:
return self.results['posterior_density'].plot(
x='time', y='position', robust=True, **kwargs)
def plot_interactive(self):
ds = hv.Dataset(self.results)
likelihood_plot = ds.to(hv.Curve, 'position', 'likelihood')
posterior_plot = ds.to(hv.Curve, 'position', 'posterior_density')
prior_plot = ds.to(hv.Curve, 'position', 'prior')
plot_opts = dict(shared_yaxis=True, shared_xaxis=True)
norm_opts = dict(framewise=True)
hv.opts({'Curve': dict(norm=norm_opts)}, likelihood_plot)
return (prior_plot.grid('state').opts(plot=plot_opts) +
likelihood_plot.grid('state').opts(plot=plot_opts) +
posterior_plot.grid('state').opts(plot=plot_opts)).cols(1)
def plot_state_probability(self, **kwargs):
return self.state_probability().plot(**kwargs) | /replay_classification-0.6.1.tar.gz/replay_classification-0.6.1/replay_classification/decoders.py | 0.839537 | 0.230638 | decoders.py | pypi |
from functools import partial
import numpy as np
import pandas as pd
try:
from IPython import get_ipython
if get_ipython() is not None:
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
def poisson_mark_log_likelihood(marks, joint_mark_intensity_functions=None,
ground_process_intensity=None,
time_bin_size=1):
'''Probability of parameters given spiking indicator at a particular
time and associated marks.
Parameters
----------
marks : array, shape (n_signals, n_marks)
joint_mark_intensity : function
Instantaneous probability of observing a spike given mark vector
from data. The parameters for this function should already be set,
before it is passed to `poisson_mark_log_likelihood`.
ground_process_intensity : array, shape (n_signals, n_states,
n_place_bins)
Probability of observing a spike regardless of marks.
time_bin_size : float, optional
Returns
-------
poisson_mark_log_likelihood : array_like, shape (n_signals, n_states,
n_time, n_place_bins)
'''
ground_process_intensity += np.spacing(1)
probability_no_spike = -ground_process_intensity * time_bin_size
joint_mark_intensity = np.stack(
[np.stack([jmi(signal_marks) for jmi in jmi_by_state], axis=-2)
for signal_marks, jmi_by_state
in zip(tqdm(marks, 'electrodes'), joint_mark_intensity_functions)])
joint_mark_intensity += np.spacing(1)
return np.log(joint_mark_intensity) + probability_no_spike
def joint_mark_intensity(
marks, place_bin_centers, place_occupancy, fitted_model, mean_rate):
marks = np.atleast_2d(marks)
n_place_bins = place_bin_centers.shape[0]
n_time = marks.shape[0]
is_nan = np.any(np.isnan(marks), axis=1)
n_spikes = np.sum(~is_nan)
density = np.zeros((n_time, n_place_bins))
if n_spikes > 0:
for bin_ind, bin in enumerate(place_bin_centers):
bin = bin * np.ones((n_spikes, 1))
predict_data = np.concatenate((marks[~is_nan], bin), axis=1)
density[~is_nan, bin_ind] = np.exp(
fitted_model.score_samples(predict_data))
joint_mark_intensity = mean_rate * density / place_occupancy
joint_mark_intensity[is_nan] = 0.0
return joint_mark_intensity
def estimate_place_occupancy(position, place_bin_centers, model, model_kwargs):
return np.exp(model(**model_kwargs).fit(position)
.score_samples(place_bin_centers[:, np.newaxis]))
def estimate_ground_process_intensity(
position, marks, place_bin_centers, model, model_kwargs):
is_spike = np.any(~np.isnan(marks), axis=1)
position = atleast_2d(position)
place_field = np.exp(model(**model_kwargs).fit(position[is_spike])
.score_samples(place_bin_centers[:, np.newaxis]))
place_occupancy = estimate_place_occupancy(
position, place_bin_centers, model, model_kwargs)
mean_rate = np.mean(is_spike)
return np.atleast_2d(mean_rate * place_field / place_occupancy)
def build_joint_mark_intensity(
position, training_marks, place_bin_centers, model, model_kwargs):
training_marks = atleast_2d(training_marks)[~np.isnan(position)]
position = atleast_2d(position)[~np.isnan(position)]
is_spike = np.any(~np.isnan(training_marks), axis=1)
mean_rate = np.mean(is_spike, dtype=np.float)
training_data = np.concatenate(
(training_marks[is_spike], position[is_spike]), axis=1)
fitted_model = model(**model_kwargs).fit(training_data)
place_occupancy = estimate_place_occupancy(
position, place_bin_centers, model, model_kwargs)
return partial(joint_mark_intensity,
place_bin_centers=place_bin_centers,
place_occupancy=place_occupancy,
fitted_model=fitted_model,
mean_rate=mean_rate)
def fit_multiunit_observation_model(position, trajectory_direction,
spike_marks, place_bin_centers,
model, model_kwargs,
observation_state_order):
joint_mark_intensity_functions = []
ground_process_intensity = []
trajectory_directions = np.unique(
trajectory_direction[pd.notnull(trajectory_direction)])
for marks in tqdm(spike_marks, desc='electrodes'):
jmi_by_state = {
direction: build_joint_mark_intensity(
position[
np.in1d(trajectory_direction, direction)],
marks[np.in1d(trajectory_direction, direction)],
place_bin_centers, model, model_kwargs)
for direction in trajectory_directions}
joint_mark_intensity_functions.append(
[jmi_by_state[state]
for state in observation_state_order])
gpi_by_state = {
direction: estimate_ground_process_intensity(
position[
np.in1d(trajectory_direction, direction)],
marks[np.in1d(trajectory_direction, direction)],
place_bin_centers, model, model_kwargs)
for direction in trajectory_directions}
ground_process_intensity.append(
np.stack([gpi_by_state[state]
for state in observation_state_order], axis=-2))
ground_process_intensity = np.stack(ground_process_intensity)
return joint_mark_intensity_functions, ground_process_intensity
def atleast_2d(x):
return np.atleast_2d(x).T if x.ndim < 2 else x | /replay_classification-0.6.1.tar.gz/replay_classification-0.6.1/replay_classification/multiunit.py | 0.884975 | 0.408159 | multiunit.py | pypi |
import numpy as np
import xarray as xr
from patsy import dmatrices
from scipy.ndimage.filters import gaussian_filter
from scipy.stats import norm
from statsmodels.api import GLM, families
from .core import normalize_to_probability
def estimate_movement_std(position_info):
MODEL_FORMULA = 'position ~ lagged_position - 1'
response, design_matrix = dmatrices(MODEL_FORMULA, position_info)
fit = GLM(response, design_matrix, family=families.Gaussian()).fit()
return np.sqrt(fit.scale)
def uniform_initial_conditions(
position_info, place_bin_edges, place_bin_centers,
replay_sequence_orders='Forward'):
if isinstance(replay_sequence_orders, str):
replay_sequence_orders = [replay_sequence_orders]
bin_size = place_bin_centers[1] - place_bin_centers[0]
uniform = normalize_to_probability(
np.ones_like(place_bin_centers), bin_size)
state_names = []
initial_conditions = []
for order in replay_sequence_orders:
for condition, df in position_info.groupby('experimental_condition'):
state_names.append('-'.join((condition, order)))
initial_conditions.append(uniform)
initial_conditions = normalize_to_probability(
np.stack(initial_conditions), bin_size)
return xr.DataArray(
initial_conditions, dims=['state', 'position'],
coords=dict(position=place_bin_centers,
state=state_names),
name='probability')
def inbound_outbound_initial_conditions(
position_info, place_bin_edges, place_bin_centers,
replay_sequence_orders='Forward'):
'''Sets the prior for each state (Outbound-Forward, Outbound-Reverse,
Inbound-Forward, Inbound-Reverse).
Inbound states have greater weight on starting at the center arm.
Outbound states have weight everywhere else.
Parameters
----------
place_bin_centers : ndarray, shape (n_bins,)
Histogram bin centers of the place measure
Returns
-------
initial_conditions : dict
'''
CENTER_WELL_LOCATION = 0.0
if isinstance(replay_sequence_orders, str):
replay_sequence_orders = [replay_sequence_orders]
bin_size = place_bin_centers[1] - place_bin_centers[0]
outbound_initial_conditions = normalize_to_probability(
norm.pdf(place_bin_centers, loc=CENTER_WELL_LOCATION,
scale=2 * bin_size), bin_size)
# Everywhere but the center well
inbound_initial_conditions = normalize_to_probability(
(np.max(outbound_initial_conditions) *
np.ones(place_bin_centers.shape)) -
outbound_initial_conditions, bin_size)
uniform = normalize_to_probability(
np.ones_like(place_bin_centers), bin_size)
conditions_map = {
('Inbound', 'Forward'): inbound_initial_conditions,
('Inbound', 'Reverse'): outbound_initial_conditions,
('Outbound', 'Forward'): outbound_initial_conditions,
('Outbound', 'Reverse'): inbound_initial_conditions,
('Inbound', 'Stay'): uniform,
('Outbound', 'Stay'): uniform}
state_names = []
initial_conditions = []
for order in replay_sequence_orders:
for condition, df in position_info.groupby('experimental_condition'):
state_names.append('-'.join((condition, order)))
initial_conditions.append(conditions_map[(condition, order)])
initial_conditions = normalize_to_probability(
np.stack(initial_conditions), bin_size)
return xr.DataArray(
initial_conditions, dims=['state', 'position'],
coords=dict(position=place_bin_centers,
state=state_names),
name='probability')
def fit_initial_conditions(position_info, place_bin_edges, place_bin_centers,
replay_sequence_orders='Forward'):
if isinstance(replay_sequence_orders, str):
replay_sequence_orders = [replay_sequence_orders]
order_to_position = {'Forward': lambda s: s.iloc[0],
'Reverse': lambda s: s.iloc[-1],
'Stay': lambda s: None}
order_to_position = {order_name: func
for order_name, func in order_to_position.items()
if order_name in replay_sequence_orders}
grouper = (position_info
.groupby(['experimental_condition', 'trial_id'])
.position.agg(order_to_position)
.stack(dropna=False)
.reset_index()
.rename(columns={'level_2': 'order', 0: 'position'})
.groupby(['order', 'experimental_condition']))
state_names = []
initial_conditions = []
bin_size = np.diff(place_bin_edges)[0]
for (order, condition), df in grouper:
state_names.append('-'.join((condition, order)))
movement_std = estimate_movement_std(df)
initial_conditions.append(empirical_inital_conditions(
df.position, place_bin_edges, bin_size, movement_std))
initial_conditions = normalize_to_probability(
np.stack(initial_conditions), bin_size)
return xr.DataArray(
initial_conditions, dims=['state', 'position'],
coords=dict(position=place_bin_centers,
state=state_names),
name='probability')
def empirical_inital_conditions(position, place_bin_edges, movement_std=0.5):
try:
movement_bins, _ = np.histogram(position, bins=place_bin_edges)
except ValueError:
movement_bins = np.zeros((place_bin_edges.size - 1, ))
if movement_bins.sum() == 0:
movement_bins[:] = 1
bin_size = np.diff(place_bin_edges)[0]
movement_bins = normalize_to_probability(movement_bins, bin_size)
return gaussian_filter(movement_bins, sigma=movement_std) | /replay_classification-0.6.1.tar.gz/replay_classification-0.6.1/replay_classification/initial_conditions.py | 0.811713 | 0.406803 | initial_conditions.py | pypi |
from logging import getLogger
import numpy as np
import pandas as pd
from patsy import build_design_matrices, dmatrix
from statsmodels.api import families
from regularized_glm import penalized_IRLS
from .core import atleast_kd
try:
from IPython import get_ipython
if get_ipython() is not None:
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
logger = getLogger(__name__)
def fit_glm_model(spikes, design_matrix, penalty=3):
'''Fits the Poisson model to the spikes from a neuron.
Parameters
----------
spikes : array_like
design_matrix : array_like or pandas DataFrame
ind : int
penalty : float, optional
Returns
-------
fitted_model : statsmodel results
'''
regularization_weights = np.ones((design_matrix.shape[1],)) * penalty
regularization_weights[0] = 0.0
return np.squeeze(
penalized_IRLS(
np.array(design_matrix), np.array(spikes),
family=families.Poisson(),
penalty=regularization_weights).coefficients)
def predictors_by_experimental_condition(experimental_condition,
place_bin_centers,
design_matrix):
'''The design matrix for a given trajectory direction.
'''
predictors = {'position': place_bin_centers,
'experimental_condition': [experimental_condition] *
len(place_bin_centers)}
return build_design_matrices(
[design_matrix.design_info], predictors)[0]
def get_conditional_intensity(fit_coefficients, predict_design_matrix):
'''The conditional intensity for each model
'''
intensity = np.exp(np.dot(predict_design_matrix, fit_coefficients)).T
intensity[np.isnan(intensity)] = np.spacing(1)
return intensity
def poisson_log_likelihood(is_spike, conditional_intensity=None,
time_bin_size=1):
'''Probability of parameters given spiking at a particular time
Parameters
----------
is_spike : array_like with values in {0, 1}, shape (n_signals,)
Indicator of spike or no spike at current time.
conditional_intensity : array_like, shape (n_signals, n_states,
n_place_bins)
Instantaneous probability of observing a spike
time_bin_size : float, optional
Returns
-------
poisson_log_likelihood : array_like, shape (n_signals, n_states,
n_place_bins)
'''
conditional_intensity += np.spacing(1)
probability_no_spike = -conditional_intensity * time_bin_size
is_spike = atleast_kd(is_spike, conditional_intensity.ndim)
return (np.log(conditional_intensity) * is_spike +
probability_no_spike)
def fit_spike_observation_model(position, experimental_condition, spikes,
place_bin_centers,
knot_spacing, observation_state_order,
spike_model_penalty=1E-1):
min_position, max_position = np.nanmin(position), np.nanmax(position)
n_steps = (max_position - min_position) // knot_spacing
position_knots = min_position + np.arange(1, n_steps) * knot_spacing # noqa: F841, E501
formula = ('1 + experimental_condition * '
'cr(position, knots=position_knots, constraints="center")')
training_data = pd.DataFrame(dict(
position=position,
experimental_condition=experimental_condition)).dropna()
design_matrix = dmatrix(
formula, training_data, return_type='dataframe')
fit_coefficients = np.stack(
[fit_glm_model(
pd.DataFrame(s).loc[design_matrix.index], design_matrix,
spike_model_penalty)
for s in tqdm(spikes.T, desc='neurons')], axis=1)
ci_by_state = {
condition: get_conditional_intensity(
fit_coefficients, predictors_by_experimental_condition(
condition, place_bin_centers, design_matrix))
for condition in np.unique(observation_state_order)}
conditional_intensity = np.stack(
[ci_by_state[state] for state in observation_state_order],
axis=1)
return conditional_intensity[:, np.newaxis, ...] | /replay_classification-0.6.1.tar.gz/replay_classification-0.6.1/replay_classification/sorted_spikes.py | 0.909378 | 0.429429 | sorted_spikes.py | pypi |
import datetime
import gym
import tensorflow as tf
import os
from abc import ABC, abstractmethod
from typing import List
from copy import deepcopy
from replay_monitor.db import DBWriter, PosInTrajectory
class PerformanceMetric(ABC):
@abstractmethod
def track(self, *args, **kwargs):
pass
@property
@abstractmethod
def value(self):
"""
Compute and return the value of the performance metric.
:return:
"""
pass
@abstractmethod
def log_metric_to_tf_summary(self):
"""
Assume this method will be called with a default TF Summary Writer, output the relevant tf.summary calls.
:return:
"""
pass
@abstractmethod
def reset_state(self):
pass
def new_episode(self):
"""
Optional. A method that is being called whenever a new episode starts
:return:
"""
pass
class PerStepPerformanceMetric(PerformanceMetric):
"""
This class is intended for metrics that rely on 'per step' data such as observation, action, reward, info, etc.
"""
@abstractmethod
def track(self, observation, action, next_observation, reward, done, info, *args, **kwargs):
pass
def get_space_shape(space: gym.spaces.Space):
if isinstance(space, gym.spaces.Tuple):
return tuple(get_space_shape(space) for space in space.spaces)
else:
return space.shape
DEFAULT_LOGS_DIR = 'monitor-logs'
DEFAULT_DB_FILENAME = 'monitor_db.h5'
class Monitor(gym.core.Wrapper):
"""
This class is intended to provide a convenient way of tracking the training / running process of a Reinforcement
Learning algorithm over an environment that follows the OpenAI's Gym conventions.
"""
# TODO: Support Vector Envs.
def __init__(self,
env: gym.Env,
performance_metrics: List[PerformanceMetric] = None,
log_to_db: bool = False,
logging_directory: str = DEFAULT_LOGS_DIR,
*arg, **kwargs):
# initialize the directory for the logs:
self.run_id = datetime.datetime.now().strftime("%d%m%Y-%H%M%S")
logging_path = self._generate_logging_path(logging_directory=logging_directory, run_id=self.run_id)
self.summary_writer = tf.summary.create_file_writer(logging_path)
self.performance_metrics: List[PerformanceMetric] = performance_metrics \
if performance_metrics is not None else []
self.current_observation = None
self.log_to_db = log_to_db
self.db_file_path = os.path.join(logging_directory, DEFAULT_DB_FILENAME)
self._db_writer = DBWriter(
state_shape=get_space_shape(env.observation_space),
action_shape=get_space_shape(env.action_space),
is_action_discrete=isinstance(env.action_space, (gym.spaces.Discrete, gym.spaces.MultiDiscrete)),
db_file=self.db_file_path,
total_steps_estimate=None
)
self._last_observation = None
self._is_first_observation = False
super().__init__(env=env)
def add_performance_metrics(self, performance_metrics: List[PerformanceMetric]):
self.performance_metrics.extend(performance_metrics)
def step(self, action):
next_observation, reward, done, info = self.env.step(action)
if self.log_to_db:
# determine position in trajectory:
self._db_writer.store_transition(
state=self._last_observation,
action=action,
reward=reward,
next_state=next_observation,
info=info,
pos_in_trajectory=self._determine_pos_in_trajectory(done)
)
self._is_first_observation = False
self._track_performance(action=action, next_observation=next_observation, reward=reward, done=done, info=info)
self.current_observation = deepcopy(next_observation)
return next_observation, reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
self._last_observation = observation
self._is_first_observation = True
self.current_observation = deepcopy(observation)
for metric in self.performance_metrics:
metric.new_episode()
return observation
def log_performance_metrics(self):
with self.summary_writer.as_default():
for metric in self.performance_metrics:
metric.log_metric_to_tf_summary()
def log_hparams(self, hparams: dict):
"""
Logs the hyper parameters to a text table in Tensorboard.
:param hparams: dictionary where the keys are strings with the names of the hyper parameters and the values
are the corresponding values of the hyper parameters.
"""
with self.summary_writer.as_default():
tf.summary.text(name='Hyper Parameters', data=self.generate_hparams_md_table(hparams), step=0)
def reset_metrics_states(self):
for metric in self.performance_metrics:
metric.reset_state()
def _track_performance(self, action, next_observation, reward, done, info):
for metric in self.performance_metrics:
metric.track(self.current_observation,
action,
next_observation,
reward,
done,
info)
@staticmethod
def _generate_logging_path(logging_directory: str, run_id: str):
logging_path = os.path.join(logging_directory, f'run_{run_id}')
if not os.path.exists(logging_path):
os.makedirs(logging_path)
return logging_path
@staticmethod
def generate_hparams_md_table(hparams: dict):
header = '| Hyper Parameter Name | Value | \n | --- | --- | \n'
lines = [f'| {hp_k} | {hp_v} | \n' for hp_k, hp_v in hparams.items()]
return header + ' '.join(lines)
def _determine_pos_in_trajectory(self, done) -> PosInTrajectory:
if self._is_first_observation:
if done:
pos = PosInTrajectory.ONLY
else:
pos = PosInTrajectory.FIRST
else:
if done:
pos = PosInTrajectory.LAST
else:
pos = PosInTrajectory.MID
return pos | /replay-monitor-0.0.5.tar.gz/replay-monitor-0.0.5/replay_monitor/monitor.py | 0.838184 | 0.350977 | monitor.py | pypi |
from dataclasses import dataclass
import tables as tb
import time
import numpy as np
from typing import Dict, Tuple, Union, List
import os
from enum import Enum, auto
@dataclass
class StateData:
observation: Tuple[np.ndarray]
additional_data: Dict[str, np.ndarray]
terminal: bool
LOGS_GROUP = 'logs'
LOG_ID_GROUP = 'log_id'
TRAJECTORIES_ARRAY = 'trajectories_start_idx'
LOG_STATES_GROUP = 'states'
STATE_ELEM = 'state_elem_'
LOG_ACTIONS_ARRAY = 'actions'
LOG_REWARDS_ARRAY = 'rewards'
DB_STRUCTURE = {
LOGS_GROUP: {
LOG_ID_GROUP: {
LOG_STATES_GROUP: {
STATE_ELEM: np.ndarray
}
}
}
}
class PosInTrajectory(Enum):
FIRST = auto()
LAST = auto()
MID = auto()
ONLY = auto()
class DBWriter:
"""
Intended for easy to use interface with the DB to perform the necessary DB write transactions.
An object of this class records all interactions as a single run.
To record different runs, use different objects.
* avoid messy function signatures by avoiding to require configurations in function params
"""
def __init__(
self,
state_shape: Union[Tuple[int], Tuple[Tuple[int]]],
action_shape: Tuple[int],
is_action_discrete: bool = True,
db_file: str = 'monitor_db.h5',
total_steps_estimate: int = None
):
self.state_shape = state_shape
if isinstance(self.state_shape[0], int):
self.state_shape = (self.state_shape,)
self.action_shape = action_shape
self.is_action_discrete = is_action_discrete
self.total_steps_estimate = total_steps_estimate
self.db_file = db_file
open_mode = 'a' if os.path.exists(self.db_file) else 'w'
self.db = tb.open_file(self.db_file, mode=open_mode, title="RL Monitor DB")
self.run_id = self._generate_run_id()
self._initialize_monitor_run()
self._start_new_trajectory_flag = False
def __del__(self):
self.db.close()
def store_transition(
self,
state: Union[np.ndarray, Tuple[np.ndarray]],
action,
reward: float,
next_state: Union[np.ndarray, Tuple[np.ndarray]],
info = None,
pos_in_trajectory: PosInTrajectory = PosInTrajectory.MID
):
# If this state begins a new trajectory, store that information as well:
if pos_in_trajectory == PosInTrajectory.FIRST or pos_in_trajectory == PosInTrajectory.ONLY:
if self._start_new_trajectory_flag:
self._begin_new_trajectory()
self._store_state(state)
self._start_new_trajectory_flag = False
# self._store_state(state)
self._store_action(action)
self._store_reward(reward)
self._store_state(next_state)
if pos_in_trajectory == PosInTrajectory.LAST or pos_in_trajectory == PosInTrajectory.ONLY:
self._start_new_trajectory_flag = True
def _store_state(self, env_state: Union[np.ndarray, Tuple[np.ndarray]]):
# wrap single obs states in a tuple for uniform handling:
if not isinstance(env_state, tuple):
assert isinstance(env_state, np.ndarray)
env_state = (env_state,)
states_group_node = self.db.get_node(f'/{LOGS_GROUP}/{self.run_id}/{LOG_STATES_GROUP}')
assert states_group_node._v_nchildren == len(env_state)
for i, state_elem in enumerate(env_state):
state_elem_arr = self.db.get_node(f'/{LOGS_GROUP}/{self.run_id}/{LOG_STATES_GROUP}/{STATE_ELEM}{i}')
state_elem_arr.append(state_elem)
def _store_action(self, agent_action):
if isinstance(agent_action, (int, float)):
agent_action = np.array([agent_action])
actions_arr = self.db.get_node(f'/{LOGS_GROUP}/{self.run_id}/{LOG_ACTIONS_ARRAY}')
actions_arr.append(agent_action)
def _store_reward(self, reward: float):
rewards_arr = self.db.get_node(f'/{LOGS_GROUP}/{self.run_id}/{LOG_REWARDS_ARRAY}')
rewards_arr.append(np.array([reward]))
def _begin_new_trajectory(self):
n_states = self.db.get_node(f'/{LOGS_GROUP}/{self.run_id}/{LOG_STATES_GROUP}/{STATE_ELEM}{0}').nrows
n_transitions = self.db.get_node(f'/{LOGS_GROUP}/{self.run_id}/{LOG_REWARDS_ARRAY}').nrows
self.db.get_node(f'/{LOGS_GROUP}/{self.run_id}/{TRAJECTORIES_ARRAY}').append((n_states, n_transitions))
@staticmethod
def _generate_run_id() -> str:
return LOG_ID_GROUP + str(time.time()).replace('.', '_')
def _initialize_monitor_run(self):
root_children = [v._v_name for v in self.db.get_node('/')]
if LOGS_GROUP not in root_children:
self.db.create_group('/', LOGS_GROUP)
log_root = self.db.create_group(f'/{LOGS_GROUP}', f"{self.run_id}")
# create inner structure to store the data of the current run:
self.db.create_vlarray(
log_root,
TRAJECTORIES_ARRAY,
tb.UInt64Atom(shape=(2,), dflt=0),
'An array of the indexes of the first state of each trajectory'
)
log_states_group = self.db.create_group(log_root, LOG_STATES_GROUP)
for i, state_elem_shape in enumerate(self.state_shape):
self.db.create_vlarray(
log_states_group,
f"{STATE_ELEM}{i}",
tb.Atom.from_dtype(np.dtype((np.float32, state_elem_shape))),
f'Element #{i} of the observation (state)',
expectedrows=self.total_steps_estimate
)
action_atom = tb.UInt64Atom(shape=self.action_shape, dflt=0) if self.is_action_discrete else \
tb.Atom.from_dtype(np.dtype((np.float32, self.action_shape)))
self.db.create_vlarray(
log_root,
LOG_ACTIONS_ARRAY,
atom=action_atom
)
self.db.create_vlarray(
log_root,
LOG_REWARDS_ARRAY,
atom=tb.Float32Atom(shape=(), dflt=0.0)
)
class DBReader:
def __init__(self, db_file: str = 'monitor_db.h5'):
self.db_file = db_file
@property
def db(self):
return tb.open_file(self.db_file, mode='r')
def get_logs_ids(self) -> List[str]:
with self.db as db:
logs_node = db.get_node(f'/{LOGS_GROUP}')
return [v._v_name for v in logs_node._f_list_nodes()]
def get_num_of_trajectories(self, log_id: str):
if self._get_num_of_states(log_id=log_id) == 0:
return 0
with self.db as db:
n_traj_rows = db.get_node(f'/{LOGS_GROUP}/{log_id}/{TRAJECTORIES_ARRAY}').nrows
return n_traj_rows + 1
def get_trajectories_lengths(self, log_id: str) -> List[int]:
"""
Useful to know how many trajectories are there, and their lengths.
Here, trajectory length is defined as the number of transitions (= number of actions).
:return:
"""
n_states = self._get_num_of_states(log_id=log_id)
if n_states == 0:
return []
trajectories_start_indices = self._get_log_trajectories(log_id=log_id)
trajectories_lengths = []
for i in range(len(trajectories_start_indices)-1):
trajectories_lengths.append((trajectories_start_indices[i+1][0]-1) - trajectories_start_indices[i][0])
trajectories_lengths.append((n_states-1) - trajectories_start_indices[-1][0])
return trajectories_lengths
def get_transition_data(self, log_id: str, trajectory_index: int, transition_index: int):
trajectories_start_indices = self._get_log_trajectories(log_id=log_id)
with self.db as db:
states_group_node = db.get_node(f'/{LOGS_GROUP}/{log_id}/{LOG_STATES_GROUP}')
state_elements_nodes = states_group_node._f_list_nodes()
state_idx = trajectories_start_indices[trajectory_index][0] + transition_index
state = tuple(s[state_idx] for s in state_elements_nodes)
next_state = tuple(s[state_idx+1] for s in state_elements_nodes)
action_idx = trajectories_start_indices[trajectory_index][1] + transition_index
action = db.get_node(f'/{LOGS_GROUP}/{log_id}/{LOG_ACTIONS_ARRAY}')[action_idx]
reward = db.get_node(f'/{LOGS_GROUP}/{log_id}/{LOG_REWARDS_ARRAY}')[action_idx]
return state, action, reward, next_state
def get_trajectory_rewards(self, log_id: str, trajectory_index: int):
trajectories_start_indices = self._get_log_trajectories(log_id=log_id)
trajectories_lengths = self.get_trajectories_lengths(log_id=log_id)
if len(trajectories_lengths) == 0:
return None
with self.db as db:
start_index = trajectories_start_indices[trajectory_index][1]
end_index = trajectories_start_indices[trajectory_index][1] + trajectories_lengths[trajectory_index]
rewards = db.get_node(f'/{LOGS_GROUP}/{log_id}/{LOG_REWARDS_ARRAY}')[start_index:end_index]
return rewards
def get_num_of_state_elements(self, log_id: str):
with self.db as db:
return db.get_node(f'/{LOGS_GROUP}/{log_id}/{LOG_STATES_GROUP}')._v_nchildren
def _get_num_of_states(self, log_id: str):
with self.db as db:
states_group_node = db.get_node(f'/{LOGS_GROUP}/{log_id}/{LOG_STATES_GROUP}')
state_elements_nodes = states_group_node._f_list_nodes()
if len(state_elements_nodes) == 0:
return 0
return db.get_node(f'/{LOGS_GROUP}/{log_id}/{LOG_STATES_GROUP}/{STATE_ELEM}{0}').nrows
def _get_log_trajectories(self, log_id: str) -> List[Tuple[int, int]]:
if self._get_num_of_states(log_id=log_id) == 0:
return []
trajectories = [(0, 0)]
with self.db as db:
stored_data = db.get_node(f'/{LOGS_GROUP}/{log_id}/{TRAJECTORIES_ARRAY}').read()
if len(stored_data) > 0:
trajectories.extend(np.array(stored_data, dtype=int).squeeze(1))
return trajectories | /replay-monitor-0.0.5.tar.gz/replay-monitor-0.0.5/replay_monitor/db.py | 0.763219 | 0.317109 | db.py | pypi |
import os
from enum import Enum, auto
from typing import Tuple
import numpy as np
from bokeh.models import Panel, Tabs, Slider, ColumnDataSource, Select, Spinner, Div
from bokeh.plotting import figure
from bokeh.layouts import column, row
from bokeh.server.server import Server
from replay_monitor.db import DBReader
from .monitor import DEFAULT_LOGS_DIR, DEFAULT_DB_FILENAME
def create_state_data_dict_from_state(state: Tuple[np.ndarray]):
state_data_dicts = []
for element in state:
if element.shape[0] == 1:
element = element[0]
data = {}
plot_type = determine_state_element_plot_type(element)
if plot_type == StateElementPlotType.BAR:
values = element.flatten()
data['x'] = list(range(len(values)))
data['state_element'] = values
# data['state_x_range_1d'] = range(element.shape[0])
elif plot_type == StateElementPlotType.MATRIX:
data['state_element'] = [element]
# data['state_x_range_2d'] = element.shape[1]
elif plot_type == StateElementPlotType.COLOR_IMAGE:
assert element.shape[2] == 3 # Assume color channel is last!
xdim, ydim = element.shape[0:2]
img = np.empty((xdim, ydim), dtype=np.uint32)
view = img.view(dtype=np.uint8).reshape((xdim, ydim, 4))
view[:, :, :-1] = np.flipud(element)
view[:, :, -1] = 255
data['state_element'] = [img]
state_data_dicts.append(data)
return state_data_dicts
def create_rewards_data_dict(rewards: np.ndarray):
return {
'rewards': rewards,
'x': list(range(len(data_manager.trajectory_rewards)))
}
def step_slider_change_handler(attr, old, new):
global data_manager, ui_manager
data_manager.change_transition(new)
ui_manager.step_spinner.value = new
ui_manager.step_slider.value = new
ui_manager.update_ui_due_to_transition_change()
def trajectory_changed_handler(attr, old, new):
global data_manager, ui_manager
data_manager.change_transition(0, new)
ui_manager.update_ui_due_to_trajectory_change()
def log_select_change_handler(attr, old, new):
global data_manager, ui_manager
data_manager.change_transition(0, 0, new)
ui_manager.update_ui_due_to_log_change()
class StateElementPlotType(Enum):
BAR = auto()
MATRIX = auto()
COLOR_IMAGE = auto()
def determine_state_element_plot_type(state_elem: np.ndarray) -> StateElementPlotType:
if len(state_elem.shape) > 1 and state_elem.shape[0] == 1:
state_elem = state_elem[0]
if len(state_elem.shape) == 1:
return StateElementPlotType.BAR
elif len(state_elem.shape) == 2:
if 1 in state_elem.shape:
return StateElementPlotType.BAR
else:
return StateElementPlotType.MATRIX
elif len(state_elem.shape) == 3 and state_elem.shape[2] == 3:
return StateElementPlotType.COLOR_IMAGE
def create_state_layout(state, data_sources, title):
tabs_list = []
for i, element in enumerate(state):
if len(element.shape) > 1 and element.shape[0] == 1:
element = element[0]
plot_type = determine_state_element_plot_type(element)
if plot_type == StateElementPlotType.BAR:
fig = figure(plot_width=600, plot_height=600)
fig.vbar(x='x', width=0.5, bottom=0, top='state_element', source=data_sources[i])
elif plot_type == StateElementPlotType.MATRIX:
range_max = max(element.shape[1], element.shape[0])
fig = figure(plot_width=600, plot_height=600, x_range=(0, range_max), y_range=(0, range_max))
fig.image(image='state_element', x=0, y=0, dw=element.shape[1], dh=element.shape[0], palette="Spectral11",
source=data_sources[i])
elif plot_type == StateElementPlotType.COLOR_IMAGE:
range_max = max(element.shape[1], element.shape[0])
fig = figure(plot_width=600, plot_height=600, x_range=(0, range_max), y_range=(0, range_max))
fig.image_rgba(image='state_element', x=0, y=0, dw=element.shape[1], dh=element.shape[0],
source=data_sources[i])
else:
continue
tabs_list.append(Panel(child=fig, title=f"State Element {i}"))
state_layout = column(
Div(text=f"<h2>{title}</h2>"),
Tabs(tabs=tabs_list)
)
return state_layout
class DataManager:
def __init__(self, db_file_path: str):
self.db_file_path = db_file_path
db_reader = DBReader(db_file=db_file_path)
self.log_ids = db_reader.get_logs_ids()
self.current_log = self.log_ids[0]
self.trajectory_index = 0
self.transition_index = 0
self.trajectories_lengths = db_reader.get_trajectories_lengths(self.current_log)
self.n_state_elements = db_reader.get_num_of_state_elements(self.current_log)
self.trajectory_rewards = db_reader.get_trajectory_rewards(log_id=self.current_log,
trajectory_index=self.trajectory_index)
self.s, self.a, self.r, self.s2 = self.get_transition(self.current_log, self.trajectory_index,
self.transition_index)
self.state_elements_sizes = [state_elem.size for state_elem in self.s]
def get_transition(self, log_id: str, trajectory_index: int, transition_index: int):
db_reader = DBReader(db_file=self.db_file_path)
data = db_reader.get_transition_data(log_id, trajectory_index=trajectory_index,
transition_index=transition_index)
return data
def change_transition(self, transition_index: int, trajectory_index: int = None, log_id: str = None):
is_log_changed = False
if log_id is not None:
is_log_changed = log_id != self.current_log
self.current_log = log_id
if trajectory_index is not None:
is_different_trajectory = self.trajectory_index != trajectory_index
self.trajectory_index = trajectory_index
if is_different_trajectory or is_log_changed:
db_reader = DBReader(db_file=self.db_file_path)
self.trajectory_rewards = db_reader.get_trajectory_rewards(log_id=self.current_log,
trajectory_index=self.trajectory_index)
self.transition_index = transition_index
self.s, self.a, self.r, self.s2 = self.get_transition(self.current_log, self.trajectory_index,
self.transition_index)
if log_id is not None and is_log_changed:
db_reader = DBReader(db_file=self.db_file_path)
self.trajectories_lengths = db_reader.get_trajectories_lengths(self.current_log)
self.n_state_elements = db_reader.get_num_of_state_elements(self.current_log)
self.state_elements_sizes = [state_elem.size for state_elem in self.s]
def get_current_trajectory_length(self):
return self.trajectories_lengths[self.trajectory_index]
class UIManager:
def __init__(self, data_manager: DataManager):
self.data_manager = data_manager
self.log_select = Select(title="Choose a log:", value=data_manager.current_log, options=data_manager.log_ids)
self.log_select.on_change('value', log_select_change_handler)
self.step_slider = None
self._generate_step_slider()
self.trajectory_select = None
self._generate_trajectory_select()
self.state_data_sources = None
self.next_state_data_sources = None
self.state_layout = None
self.next_state_layout = None
self.states_layout = None
self._create_states_ui()
self.rewards_data_source = ColumnDataSource(data=create_rewards_data_dict(self.data_manager.trajectory_rewards))
self.trajectory_rewards_fig = None
self._create_trajectory_rewards_ui()
self.textual_action_reward = Div(text=self._generate_textual_action_reward_str())
self.states_layout = row(self.state_layout, self.next_state_layout)
self.sliders_layout = column(row(self.step_slider, self.step_spinner), self.trajectory_select, sizing_mode='stretch_width')
self.layout = column(self.log_select,
self.trajectory_rewards_fig,
self.textual_action_reward,
self.states_layout,
self.sliders_layout)
def update_ui_due_to_log_change(self):
self._create_states_ui()
self.states_layout.children = [self.state_layout, self.next_state_layout]
self.rewards_data_source.stream(new_data=create_rewards_data_dict(self.data_manager.trajectory_rewards),
rollover=len(self.data_manager.trajectory_rewards))
self.trajectory_select.high = len(self.data_manager.trajectories_lengths)-1
self.trajectory_select.value = self.data_manager.trajectory_index
self.step_slider.value = self.data_manager.transition_index
self.step_slider.end = self.data_manager.get_current_trajectory_length()-1
self.step_spinner.high = self.step_slider.end
self.step_spinner.value = self.step_slider.value
def update_ui_due_to_transition_change(self):
state_data_dicts = create_state_data_dict_from_state(self.data_manager.s)
next_state_data_dicts = create_state_data_dict_from_state(self.data_manager.s2)
for i, element in enumerate(self.data_manager.s):
self.state_data_sources[i].stream(new_data=state_data_dicts[i],
rollover=self.data_manager.state_elements_sizes[i])
self.next_state_data_sources[i].stream(new_data=next_state_data_dicts[i],
rollover=self.data_manager.state_elements_sizes[i])
self.textual_action_reward.text = self._generate_textual_action_reward_str()
def update_ui_due_to_trajectory_change(self):
self.update_ui_due_to_transition_change()
self.rewards_data_source.stream(new_data=create_rewards_data_dict(self.data_manager.trajectory_rewards),
rollover=len(self.data_manager.trajectory_rewards))
self.step_slider.value = self.data_manager.transition_index
self.step_slider.end = self.data_manager.get_current_trajectory_length() - 1
self.step_spinner.high = self.step_slider.end
self.step_spinner.value = self.step_slider.value
def _generate_step_slider(self):
self.step_slider = Slider(start=0, end=self.data_manager.get_current_trajectory_length()-1,
value=self.data_manager.transition_index, step=1, title="Time Step",
sizing_mode='stretch_width')
self.step_spinner = Spinner(low=0, high=self.step_slider.end, step=1, value=self.step_slider.value, width=100)
self.step_slider.on_change('value', step_slider_change_handler)
self.step_spinner.on_change('value', step_slider_change_handler)
def _generate_trajectory_select(self):
self.trajectory_select = Spinner(
title="Choose a trajectory:",
low=0,
high=len(self.data_manager.trajectories_lengths)-1,
step=1,
value=self.data_manager.trajectory_index,
)
self.trajectory_select.on_change('value', trajectory_changed_handler)
def _create_states_ui(self):
self.state_data_sources = [ColumnDataSource(data=data_dict)
for data_dict in create_state_data_dict_from_state(self.data_manager.s)]
self.next_state_data_sources = [ColumnDataSource(data=data_dict)
for data_dict in create_state_data_dict_from_state(self.data_manager.s2)]
self.state_layout = create_state_layout(self.data_manager.s, self.state_data_sources, 'State')
self.next_state_layout = create_state_layout(self.data_manager.s2, self.next_state_data_sources, 'Next State')
def _create_trajectory_rewards_ui(self):
fig = figure(plot_width=600, plot_height=300, sizing_mode='stretch_width')
fig.circle(x='x', y='rewards', source=self.rewards_data_source, name='trajectory_rewards_plot')
self.trajectory_rewards_fig = fig
return fig
def _generate_textual_action_reward_str(self):
text = f'<h4>Reward: {self.data_manager.r[0]}<br>Action: {self.data_manager.a[0]}</h4>'
return text
db_file_path = os.path.join(DEFAULT_LOGS_DIR, DEFAULT_DB_FILENAME)
data_manager = None
ui_manager = None
def start_app(doc):
global data_manager, ui_manager, db_file_path
data_manager = DataManager(db_file_path)
ui_manager = UIManager(data_manager=data_manager)
doc.add_root(ui_manager.layout)
def _start_server(db_path: str = None):
global db_file_path
if db_path is not None:
db_file_path = db_path
server = Server({'/': start_app}, num_procs=1)
server.start()
print('Opening Bokeh application on http://localhost:5006/')
server.io_loop.add_callback(server.show, "/")
server.io_loop.start()
def start_server():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--db_path', required=False)
args = parser.parse_args()
_start_server(args.db_path)
if __name__ == '__main__':
start_server() | /replay-monitor-0.0.5.tar.gz/replay-monitor-0.0.5/replay_monitor/server.py | 0.636805 | 0.458167 | server.py | pypi |
from typing import Any, Dict, Optional
import pandas as pd
from replay.constants import AnyDataFrame, IntOrList, NumType
from replay.utils import convert2spark
from replay.metrics.base_metric import (
get_enriched_recommendations,
Metric,
NCISMetric,
RecOnlyMetric,
)
# pylint: disable=too-few-public-methods
class Experiment:
"""
This class calculates and stores metric values.
Initialize it with test data and a dictionary mapping metrics to their depth cut-offs.
Results are available with ``pandas_df`` attribute.
Example:
>>> import pandas as pd
>>> from replay.metrics import Coverage, NDCG, Precision, Surprisal
>>> log = pd.DataFrame({"user_idx": [2, 2, 2, 1], "item_idx": [1, 2, 3, 3], "relevance": [5, 5, 5, 5]})
>>> test = pd.DataFrame({"user_idx": [1, 1, 1], "item_idx": [1, 2, 3], "relevance": [5, 3, 4]})
>>> pred = pd.DataFrame({"user_idx": [1, 1, 1], "item_idx": [4, 1, 3], "relevance": [5, 4, 5]})
>>> recs = pd.DataFrame({"user_idx": [1, 1, 1], "item_idx": [1, 4, 5], "relevance": [5, 4, 5]})
>>> ex = Experiment(test, {NDCG(): [2, 3], Surprisal(log): 3})
>>> ex.add_result("baseline", recs)
>>> ex.add_result("baseline_gt_users", recs, ground_truth_users=pd.DataFrame({"user_idx": [1, 3]}))
>>> ex.add_result("model", pred)
>>> ex.results
NDCG@2 NDCG@3 Surprisal@3
baseline 0.613147 0.469279 1.000000
baseline_gt_users 0.306574 0.234639 0.500000
model 0.386853 0.530721 0.666667
>>> ex.compare("baseline")
NDCG@2 NDCG@3 Surprisal@3
baseline – – –
baseline_gt_users -50.0% -50.0% -50.0%
model -36.91% 13.09% -33.33%
>>> ex = Experiment(test, {Precision(): [3]}, calc_median=True, calc_conf_interval=0.95)
>>> ex.add_result("baseline", recs)
>>> ex.add_result("model", pred)
>>> ex.results
Precision@3 Precision@3_median Precision@3_0.95_conf_interval
baseline 0.333333 0.333333 0.0
model 0.666667 0.666667 0.0
>>> ex = Experiment(test, {Coverage(log): 3}, calc_median=True, calc_conf_interval=0.95)
>>> ex.add_result("baseline", recs)
>>> ex.add_result("model", pred)
>>> ex.results
Coverage@3 Coverage@3_median Coverage@3_0.95_conf_interval
baseline 1.0 1.0 0.0
model 1.0 1.0 0.0
"""
# pylint: disable=too-many-arguments
def __init__(
self,
test: Any,
metrics: Dict[Metric, IntOrList],
calc_median: bool = False,
calc_conf_interval: Optional[float] = None,
):
"""
:param test: test DataFrame
:param metrics: Dictionary of metrics to calculate.
Key -- metric, value -- ``int`` or a list of ints.
:param calc_median: flag to calculate median value across users
:param calc_conf_interval: quantile value for the calculation of the confidence interval.
Resulting value is the half of confidence interval.
"""
self.test = convert2spark(test)
self.results = pd.DataFrame()
self.metrics = metrics
self.calc_median = calc_median
self.calc_conf_interval = calc_conf_interval
def add_result(
self,
name: str,
pred: AnyDataFrame,
ground_truth_users: Optional[AnyDataFrame] = None,
) -> None:
"""
Calculate metrics for predictions
:param name: name of the run to store in the resulting DataFrame
:param pred: model recommendations
:param ground_truth_users: list of users to consider in metric calculation.
if None, only the users from ground_truth are considered.
"""
max_k = 0
for current_k in self.metrics.values():
max_k = max(
(*current_k, max_k)
if isinstance(current_k, list)
else (current_k, max_k)
)
recs = get_enriched_recommendations(
pred, self.test, max_k, ground_truth_users
).cache()
for metric, k_list in sorted(
self.metrics.items(), key=lambda x: str(x[0])
):
enriched = None
if isinstance(metric, (RecOnlyMetric, NCISMetric)):
enriched = metric._get_enriched_recommendations(
pred, self.test, max_k, ground_truth_users
)
values, median, conf_interval = self._calculate(
metric, enriched or recs, k_list
)
if isinstance(k_list, int):
self._add_metric( # type: ignore
name,
metric,
k_list,
values, # type: ignore
median, # type: ignore
conf_interval, # type: ignore
)
else:
for k, val in sorted(values.items(), key=lambda x: x[0]):
self._add_metric(
name,
metric,
k,
val,
None if median is None else median[k],
None if conf_interval is None else conf_interval[k],
)
recs.unpersist()
def _calculate(self, metric, enriched, k_list):
median = None
conf_interval = None
values = metric._mean(enriched, k_list)
if self.calc_median:
median = metric._median(enriched, k_list)
if self.calc_conf_interval is not None:
conf_interval = metric._conf_interval(
enriched, k_list, self.calc_conf_interval
)
return values, median, conf_interval
# pylint: disable=too-many-arguments
def _add_metric(
self,
name: str,
metric: Metric,
k: int,
value: NumType,
median: Optional[NumType],
conf_interval: Optional[NumType],
):
"""
Add metric for a specific k
:param name: name to save results
:param metric: metric object
:param k: length of the recommendation list
:param value: metric value
:param median: median value
:param conf_interval: confidence interval value
"""
self.results.at[name, f"{metric}@{k}"] = value # type: ignore
if median is not None:
self.results.at[
name, f"{metric}@{k}_median"
] = median # type: ignore
if conf_interval is not None:
self.results.at[
name, f"{metric}@{k}_{self.calc_conf_interval}_conf_interval"
] = conf_interval
# pylint: disable=not-an-iterable
def compare(self, name: str) -> pd.DataFrame:
"""
Show results as a percentage difference to record ``name``.
:param name: name of the baseline record
:return: results table in a percentage format
"""
if name not in self.results.index:
raise ValueError(f"No results for model {name}")
columns = [
column for column in self.results.columns if column[-1].isdigit()
]
data_frame = self.results[columns].copy()
baseline = data_frame.loc[name]
for idx in data_frame.index:
if idx != name:
diff = data_frame.loc[idx] / baseline - 1
data_frame.loc[idx] = [
str(round(v * 100, 2)) + "%" for v in diff
]
else:
data_frame.loc[name] = ["–"] * len(baseline)
return data_frame | /replay_rec-0.11.0-py3-none-any.whl/replay/experiment.py | 0.952915 | 0.437223 | experiment.py | pypi |
import pyspark.sql.functions as sf
import numpy as np
from replay.utils import convert2spark
from replay.constants import AnyDataFrame
def get_item_recency(
log: AnyDataFrame,
decay: float = 30,
limit: float = 0.1,
kind: str = "exp",
):
"""
Calculate item weight showing when the majority of interactions with this item happened.
:param log: interactions log
:param decay: number of days after which the weight is reduced by half, must be grater than 1
:param limit: minimal value the weight can reach
:param kind: type of smoothing, one of [power, exp, linear]
Corresponding functions are ``power``: ``age^c``,
``exp``: ``c^age``, ``linear``: ``1-c*age``
:return: DataFrame with item weights
>>> import pandas as pd
>>> d = {}
>>> d["item_idx"] = [1, 1, 2, 3, 3]
>>> d["timestamp"] = ["2099-03-19", "2099-03-20", "2099-03-22", "2099-03-27", "2099-03-25"]
>>> d["relevance"] = [1, 1, 1, 1, 1]
>>> df = pd.DataFrame(d)
>>> df
item_idx timestamp relevance
0 1 2099-03-19 1
1 1 2099-03-20 1
2 2 2099-03-22 1
3 3 2099-03-27 1
4 3 2099-03-25 1
Age in days is calculated for every item,
which is transformed into a weight using some function.
There are three types of smoothing types available: power, exp and linear.
Each type calculates a parameter ``c`` based on the ``decay`` argument,
so that an item with ``age==decay`` has weight 0.5.
Power smoothing falls quickly in the beginning but decays slowly afterwards as ``age^c``.
>>> get_item_recency(df, kind="power").orderBy("item_idx").show()
+--------+-------------------+------------------+
|item_idx| timestamp| relevance|
+--------+-------------------+------------------+
| 1|2099-03-19 12:00:00|0.6632341020947187|
| 2|2099-03-22 00:00:00|0.7203662792445817|
| 3|2099-03-26 00:00:00| 1.0|
+--------+-------------------+------------------+
<BLANKLINE>
Exponential smoothing is the other way around. Old objects decay more quickly as ``c^age``.
>>> get_item_recency(df, kind="exp").orderBy("item_idx").show()
+--------+-------------------+------------------+
|item_idx| timestamp| relevance|
+--------+-------------------+------------------+
| 1|2099-03-19 12:00:00|0.8605514372443298|
| 2|2099-03-22 00:00:00|0.9117224885582166|
| 3|2099-03-26 00:00:00| 1.0|
+--------+-------------------+------------------+
<BLANKLINE>
Last type is a linear smoothing: ``1 - c*age``.
>>> get_item_recency(df, kind="linear").orderBy("item_idx").show()
+--------+-------------------+------------------+
|item_idx| timestamp| relevance|
+--------+-------------------+------------------+
| 1|2099-03-19 12:00:00|0.8916666666666666|
| 2|2099-03-22 00:00:00|0.9333333333333333|
| 3|2099-03-26 00:00:00| 1.0|
+--------+-------------------+------------------+
<BLANKLINE>
This function **does not** take relevance values of interactions into account.
Only item age is used.
"""
log = convert2spark(log)
items = log.select(
"item_idx",
sf.unix_timestamp(sf.to_timestamp("timestamp")).alias("timestamp"),
)
items = items.groupBy("item_idx").agg(
sf.mean("timestamp").alias("timestamp")
)
items = items.withColumn("relevance", sf.lit(1))
items = smoothe_time(items, decay, limit, kind)
return items
def smoothe_time(
log: AnyDataFrame,
decay: float = 30,
limit: float = 0.1,
kind: str = "exp",
):
"""
Weighs ``relevance`` column with a time-dependent weight.
:param log: interactions log
:param decay: number of days after which the weight is reduced by half, must be grater than 1
:param limit: minimal value the weight can reach
:param kind: type of smoothing, one of [power, exp, linear].
Corresponding functions are ``power``: ``age^c``,
``exp``: ``c^age``, ``linear``: ``1-c*age``
:return: modified DataFrame
>>> import pandas as pd
>>> d = {}
>>> d["item_idx"] = [1, 1, 2, 3, 3]
>>> d["timestamp"] = ["2099-03-19", "2099-03-20", "2099-03-22", "2099-03-27", "2099-03-25"]
>>> d["relevance"] = [1, 1, 1, 1, 1]
>>> df = pd.DataFrame(d)
>>> df
item_idx timestamp relevance
0 1 2099-03-19 1
1 1 2099-03-20 1
2 2 2099-03-22 1
3 3 2099-03-27 1
4 3 2099-03-25 1
Power smoothing falls quickly in the beginning but decays slowly afterwards as ``age^c``.
>>> smoothe_time(df, kind="power").orderBy("timestamp").show()
+--------+-------------------+------------------+
|item_idx| timestamp| relevance|
+--------+-------------------+------------------+
| 1|2099-03-19 00:00:00|0.6390430306850825|
| 1|2099-03-20 00:00:00| 0.654567945027101|
| 2|2099-03-22 00:00:00|0.6940913454809814|
| 3|2099-03-25 00:00:00|0.7994016704292545|
| 3|2099-03-27 00:00:00| 1.0|
+--------+-------------------+------------------+
<BLANKLINE>
Exponential smoothing is the other way around. Old objects decay more quickly as ``c^age``.
>>> smoothe_time(df, kind="exp").orderBy("timestamp").show()
+--------+-------------------+------------------+
|item_idx| timestamp| relevance|
+--------+-------------------+------------------+
| 1|2099-03-19 00:00:00|0.8312378961427874|
| 1|2099-03-20 00:00:00|0.8506671609508554|
| 2|2099-03-22 00:00:00| 0.890898718140339|
| 3|2099-03-25 00:00:00|0.9548416039104165|
| 3|2099-03-27 00:00:00| 1.0|
+--------+-------------------+------------------+
<BLANKLINE>
Last type is a linear smoothing: ``1 - c*age``.
>>> smoothe_time(df, kind="linear").orderBy("timestamp").show()
+--------+-------------------+------------------+
|item_idx| timestamp| relevance|
+--------+-------------------+------------------+
| 1|2099-03-19 00:00:00|0.8666666666666667|
| 1|2099-03-20 00:00:00|0.8833333333333333|
| 2|2099-03-22 00:00:00|0.9166666666666666|
| 3|2099-03-25 00:00:00|0.9666666666666667|
| 3|2099-03-27 00:00:00| 1.0|
+--------+-------------------+------------------+
<BLANKLINE>
These examples use constant relevance 1, so resulting weight equals the time dependent weight.
But actually this value is an updated relevance.
>>> d = {}
>>> d["item_idx"] = [1, 2, 3]
>>> d["timestamp"] = ["2099-03-19", "2099-03-20", "2099-03-22"]
>>> d["relevance"] = [10, 3, 0.1]
>>> df = pd.DataFrame(d)
>>> df
item_idx timestamp relevance
0 1 2099-03-19 10.0
1 2 2099-03-20 3.0
2 3 2099-03-22 0.1
>>> smoothe_time(df).orderBy("timestamp").show()
+--------+-------------------+------------------+
|item_idx| timestamp| relevance|
+--------+-------------------+------------------+
| 1|2099-03-19 00:00:00| 9.330329915368074|
| 2|2099-03-20 00:00:00|2.8645248117312496|
| 3|2099-03-22 00:00:00| 0.1|
+--------+-------------------+------------------+
<BLANKLINE>
"""
log = convert2spark(log)
log = log.withColumn(
"timestamp", sf.unix_timestamp(sf.to_timestamp("timestamp"))
)
last_date = (
log.agg({"timestamp": "max"}).collect()[0].asDict()["max(timestamp)"]
)
day_in_secs = 86400
log = log.withColumn(
"age", (last_date - sf.col("timestamp")) / day_in_secs
)
if kind == "power":
power = np.log(0.5) / np.log(decay)
log = log.withColumn("age", sf.pow(sf.col("age") + 1, power))
elif kind == "exp":
base = np.exp(np.log(0.5) / decay)
log = log.withColumn("age", sf.pow(base, "age"))
elif kind == "linear":
k = 0.5 / decay
log = log.withColumn("age", 1 - k * sf.col("age"))
else:
raise ValueError(
f"parameter kind must be one of [power, exp, linear], got {kind}"
)
log = log.withColumn(
"age", sf.when(sf.col("age") < limit, limit).otherwise(sf.col("age"))
)
log = log.withColumn(
"relevance", sf.col("relevance") * sf.col("age")
).drop("age")
log = log.withColumn("timestamp", sf.to_timestamp("timestamp"))
return log | /replay_rec-0.11.0-py3-none-any.whl/replay/time.py | 0.88557 | 0.673608 | time.py | pypi |
from typing import Dict, Optional, List
import pyspark.sql.functions as sf
from datetime import datetime
from pyspark.sql import DataFrame
from pyspark.sql.types import TimestampType
from replay.utils import (
join_or_return,
join_with_col_renaming,
unpersist_if_exists,
)
class EmptyFeatureProcessor:
"""Do not perform any transformations on the dataframe"""
def fit(self, log: DataFrame, features: Optional[DataFrame]) -> None:
"""
:param log: input DataFrame ``[user_idx, item_idx, timestamp, relevance]``
:param features: DataFrame with ``user_idx/item_idx`` and feature columns
"""
# pylint: disable=no-self-use
def transform(self, log: DataFrame) -> DataFrame:
"""
Return log without any transformations
:param log: spark DataFrame
"""
return log
class LogStatFeaturesProcessor(EmptyFeatureProcessor):
"""
Calculate user and item features based on interactions log:
Based on the number of interactions:
- log number of interactions (1)
- average log number of interactions by users interacted with item and vice versa (2)
- difference between number of interactions by user/item (1)
and average number of interactions (2)
- cold user/item flag
Based on timestamp (if present and has a TimestampType):
- min and max interaction timestamp for user/item
- history length (max - min timestamp)
- log number of interactions' days
- difference in days between last date in log and last interaction of the user/item
Based or ratings/relevance:
- relevance mean and std
- relevance approximate quantiles (0.05, 0.5, 0.95)
- abnormality of user's preferences https://hal.inria.fr/hal-01254172/document
"""
calc_timestamp_based: bool = False
calc_relevance_based: bool = False
user_log_features: Optional[DataFrame] = None
item_log_features: Optional[DataFrame] = None
def _create_log_aggregates(self, agg_col: str = "user_idx") -> List:
"""
Create features based on relevance type
(binary or not) and whether timestamp is present.
:param agg_col: column to create features for, user_idx or item_idx
:return: list of columns to pass into pyspark agg
"""
prefix = agg_col[:1]
aggregates = [
sf.log(sf.count(sf.col("relevance"))).alias(
f"{prefix}_log_num_interact"
)
]
if self.calc_timestamp_based:
aggregates.extend(
[
sf.log(
sf.countDistinct(
sf.date_trunc("dd", sf.col("timestamp"))
)
).alias(f"{prefix}_log_interact_days_count"),
sf.min(sf.col("timestamp")).alias(
f"{prefix}_min_interact_date"
),
sf.max(sf.col("timestamp")).alias(
f"{prefix}_max_interact_date"
),
]
)
if self.calc_relevance_based:
aggregates.extend(
[
(
sf.when(
sf.stddev(sf.col("relevance")).isNull()
| sf.isnan(sf.stddev(sf.col("relevance"))),
0,
)
.otherwise(sf.stddev(sf.col("relevance")))
.alias(f"{prefix}_std")
),
sf.mean(sf.col("relevance")).alias(f"{prefix}_mean"),
]
)
for percentile in [0.05, 0.5, 0.95]:
aggregates.append(
sf.expr(
f"percentile_approx(relevance, {percentile})"
).alias(f"{prefix}_quantile_{str(percentile)[2:]}")
)
return aggregates
@staticmethod
def _add_ts_based(
features: DataFrame, max_log_date: datetime, prefix: str
) -> DataFrame:
"""
Add history length (max - min timestamp) and difference in days between
last date in log and last interaction of the user/item
:param features: dataframe with calculated log-based features
:param max_log_date: max timestamp in log used for features calculation
:param prefix: identifier used as a part of column name
:return: features dataframe with new timestamp-based columns
"""
return features.withColumn(
f"{prefix}_history_length_days",
sf.datediff(
sf.col(f"{prefix}_max_interact_date"),
sf.col(f"{prefix}_min_interact_date"),
),
).withColumn(
f"{prefix}_last_interaction_gap_days",
sf.datediff(
sf.lit(max_log_date), sf.col(f"{prefix}_max_interact_date")
),
)
@staticmethod
def _cals_cross_interactions_count(
log: DataFrame, features: DataFrame
) -> DataFrame:
"""
Calculate difference between the log number of interactions by the user
and average log number of interactions users interacted with the item has.
:param log: dataframe with calculated log-based features
:param features: dataframe with calculated log-based features
:return: features dataframe with new columns
"""
if "user_idx" in features.columns:
new_feature_entity, calc_by_entity = "item_idx", "user_idx"
else:
new_feature_entity, calc_by_entity = "user_idx", "item_idx"
mean_log_num_interact = log.join(
features.select(
calc_by_entity, f"{calc_by_entity[0]}_log_num_interact"
),
on=calc_by_entity,
how="left",
)
return mean_log_num_interact.groupBy(new_feature_entity).agg(
sf.mean(f"{calc_by_entity[0]}_log_num_interact").alias(
f"{new_feature_entity[0]}_mean_{calc_by_entity[0]}_log_num_interact"
)
)
@staticmethod
def _calc_abnormality(
log: DataFrame, item_features: DataFrame
) -> DataFrame:
"""
Calculate discrepancy between a rating on a resource
and the average rating of this resource (Abnormality) and
abnormality taking controversy of the item into account (AbnormalityCR).
https://hal.inria.fr/hal-01254172/document
:param log: dataframe with calculated log-based features
:param item_features: dataframe with calculated log-based features
:return: features dataframe with new columns
"""
# Abnormality
abnormality_df = join_with_col_renaming(
left=log,
right=item_features.select("item_idx", "i_mean", "i_std"),
on_col_name="item_idx",
how="left",
)
abnormality_df = abnormality_df.withColumn(
"abnormality", sf.abs(sf.col("relevance") - sf.col("i_mean"))
)
abnormality_aggs = [
sf.mean(sf.col("abnormality")).alias("abnormality")
]
# Abnormality CR:
max_std = item_features.select(sf.max("i_std")).collect()[0][0]
min_std = item_features.select(sf.min("i_std")).collect()[0][0]
if max_std - min_std != 0:
abnormality_df = abnormality_df.withColumn(
"controversy",
1
- (sf.col("i_std") - sf.lit(min_std))
/ (sf.lit(max_std - min_std)),
)
abnormality_df = abnormality_df.withColumn(
"abnormalityCR",
(sf.col("abnormality") * sf.col("controversy")) ** 2,
)
abnormality_aggs.append(
sf.mean(sf.col("abnormalityCR")).alias("abnormalityCR")
)
return abnormality_df.groupBy("user_idx").agg(*abnormality_aggs)
def fit(
self, log: DataFrame, features: Optional[DataFrame] = None
) -> None:
"""
Calculate log-based features for users and items
:param log: input DataFrame ``[user_idx, item_idx, timestamp, relevance]``
:param features: not required
"""
self.calc_timestamp_based = (
isinstance(log.schema["timestamp"].dataType, TimestampType)
) & (
log.select(sf.countDistinct(sf.col("timestamp"))).collect()[0][0]
> 1
)
self.calc_relevance_based = (
log.select(sf.countDistinct(sf.col("relevance"))).collect()[0][0]
> 1
)
user_log_features = log.groupBy("user_idx").agg(
*self._create_log_aggregates(agg_col="user_idx")
)
item_log_features = log.groupBy("item_idx").agg(
*self._create_log_aggregates(agg_col="item_idx")
)
if self.calc_timestamp_based:
last_date = log.select(sf.max("timestamp")).collect()[0][0]
user_log_features = self._add_ts_based(
features=user_log_features, max_log_date=last_date, prefix="u"
)
item_log_features = self._add_ts_based(
features=item_log_features, max_log_date=last_date, prefix="i"
)
if self.calc_relevance_based:
user_log_features = user_log_features.join(
self._calc_abnormality(
log=log, item_features=item_log_features
),
on="user_idx",
how="left",
).cache()
self.user_log_features = join_with_col_renaming(
left=user_log_features,
right=self._cals_cross_interactions_count(
log=log, features=item_log_features
),
on_col_name="user_idx",
how="left",
).cache()
self.item_log_features = join_with_col_renaming(
left=item_log_features,
right=self._cals_cross_interactions_count(
log=log, features=user_log_features
),
on_col_name="item_idx",
how="left",
).cache()
def transform(self, log: DataFrame) -> DataFrame:
"""
Add log-based features for users and items
:param log: input DataFrame with
``[user_idx, item_idx, <features columns>]`` columns
:return: log with log-based feature columns
"""
joined = (
log.join(
self.user_log_features,
on="user_idx",
how="left",
)
.join(
self.item_log_features,
on="item_idx",
how="left",
)
.withColumn(
"na_u_log_features",
sf.when(sf.col("u_log_num_interact").isNull(), 1.0).otherwise(
0.0
),
)
.withColumn(
"na_i_log_features",
sf.when(sf.col("i_log_num_interact").isNull(), 1.0).otherwise(
0.0
),
)
# TO DO std и date diff заменяем на inf, date features - будут ли работать корректно?
# если не заменять, будет ли работать корректно?
.fillna(
{
col_name: 0
for col_name in self.user_log_features.columns
+ self.item_log_features.columns
}
)
)
joined = joined.withColumn(
"u_i_log_num_interact_diff",
sf.col("u_log_num_interact") - sf.col("i_mean_u_log_num_interact"),
).withColumn(
"i_u_log_num_interact_diff",
sf.col("i_log_num_interact") - sf.col("u_mean_i_log_num_interact"),
)
return joined
def __del__(self):
unpersist_if_exists(self.user_log_features)
unpersist_if_exists(self.item_log_features)
class ConditionalPopularityProcessor(EmptyFeatureProcessor):
"""
Calculate popularity based on user or item categorical features
(for example movie popularity among users of the same age group).
If user features are provided, item features will be generated and vice versa.
"""
conditional_pop_dict: Optional[Dict[str, DataFrame]]
entity_name: str
def __init__(
self,
cat_features_list: List,
):
"""
:param cat_features_list: List of columns with categorical features to use
for conditional popularity calculation
"""
self.cat_features_list = cat_features_list
def fit(self, log: DataFrame, features: DataFrame) -> None:
"""
Calculate conditional popularity for id and categorical features
defined in `cat_features_list`
:param log: input DataFrame ``[user_idx, item_idx, timestamp, relevance]``
:param features: DataFrame with ``user_idx/item_idx`` and feature columns
"""
if len(
set(self.cat_features_list).intersection(features.columns)
) != len(self.cat_features_list):
raise ValueError(
f"Columns {set(self.cat_features_list).difference(features.columns)} "
f"defined in `cat_features_list` are absent in features. "
f"features columns are: {features.columns}."
)
join_col, self.entity_name = (
("item_idx", "user_idx")
if "item_idx" in features.columns
else ("user_idx", "item_idx")
)
self.conditional_pop_dict = {}
log_with_features = log.join(features, on=join_col, how="left")
count_by_entity_col_name = f"count_by_{self.entity_name}"
count_by_entity_col = log_with_features.groupBy(self.entity_name).agg(
sf.count("relevance").alias(count_by_entity_col_name)
)
for cat_col in self.cat_features_list:
col_name = f"{self.entity_name[0]}_pop_by_{cat_col}"
intermediate_df = log_with_features.groupBy(
self.entity_name, cat_col
).agg(sf.count("relevance").alias(col_name))
intermediate_df = intermediate_df.join(
sf.broadcast(count_by_entity_col),
on=self.entity_name,
how="left",
)
self.conditional_pop_dict[cat_col] = intermediate_df.withColumn(
col_name, sf.col(col_name) / sf.col(count_by_entity_col_name)
).drop(count_by_entity_col_name)
self.conditional_pop_dict[cat_col].cache()
def transform(self, log: DataFrame) -> DataFrame:
"""
Add conditional popularity features
:param log: input DataFrame with
``[user_idx, item_idx, <features columns>]`` columns
:return: log with conditional popularity feature columns
"""
joined = log
for (
key,
value,
) in self.conditional_pop_dict.items():
joined = join_or_return(
joined,
sf.broadcast(value),
on=[self.entity_name, key],
how="left",
).withColumn(
f"na_{self.entity_name[0]}_pop_by_{key}",
sf.when(
sf.col(f"{self.entity_name[0]}_pop_by_{key}").isNull(),
True,
).otherwise(False),
)
joined = joined.fillna({f"{self.entity_name[0]}_pop_by_{key}": 0})
return joined
def __del__(self):
for df in self.conditional_pop_dict.values():
unpersist_if_exists(df)
# pylint: disable=too-many-instance-attributes, too-many-arguments
class HistoryBasedFeaturesProcessor:
"""
Calculate user and item features based on interactions history (log).
calculated features includes numbers of interactions, rating and timestamp distribution features
and conditional popularity for pairs `user_idx/item_idx - categorical feature`.
See LogStatFeaturesProcessor and ConditionalPopularityProcessor documentation
for detailed description of generated features.
"""
log_processor = EmptyFeatureProcessor()
user_cond_pop_proc = EmptyFeatureProcessor()
item_cond_pop_proc = EmptyFeatureProcessor()
def __init__(
self,
use_log_features: bool = True,
use_conditional_popularity: bool = True,
user_cat_features_list: Optional[List] = None,
item_cat_features_list: Optional[List] = None,
):
"""
:param use_log_features: if add statistical log-based features
generated by LogStatFeaturesProcessor
:param use_conditional_popularity: if add conditional popularity
features generated by ConditionalPopularityProcessor
:param user_cat_features_list: list of user categorical features
used to calculate item conditional popularity features
:param item_cat_features_list: list of item categorical features
used to calculate user conditional popularity features
"""
if use_log_features:
self.log_processor = LogStatFeaturesProcessor()
if use_conditional_popularity and user_cat_features_list:
if user_cat_features_list:
self.user_cond_pop_proc = ConditionalPopularityProcessor(
cat_features_list=user_cat_features_list
)
if item_cat_features_list:
self.item_cond_pop_proc = ConditionalPopularityProcessor(
cat_features_list=item_cat_features_list
)
self.fitted: bool = False
def fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
"""
Calculate log and conditional popularity features.
:param log: input DataFrame ``[user_idx, item_idx, timestamp, relevance]``
:param user_features: DataFrame with ``user_idx`` and feature columns
:param item_features: DataFrame with ``item_idx`` and feature columns
"""
log = log.cache()
self.log_processor.fit(log=log, features=user_features)
self.user_cond_pop_proc.fit(log=log, features=user_features)
self.item_cond_pop_proc.fit(log=log, features=item_features)
self.fitted = True
log.unpersist()
def transform(
self,
log: DataFrame,
):
"""
Add features
:param log: input DataFrame with
``[user_idx, item_idx, <features columns>]`` columns
:return: augmented DataFrame
"""
if not self.fitted:
raise AttributeError("Call fit before running transform")
joined = self.log_processor.transform(log)
joined = self.user_cond_pop_proc.transform(joined)
joined = self.item_cond_pop_proc.transform(joined)
return joined | /replay_rec-0.11.0-py3-none-any.whl/replay/history_based_fp.py | 0.927511 | 0.585042 | history_based_fp.py | pypi |
from datetime import datetime, timedelta
from pyspark.sql import DataFrame, Window, functions as sf
from pyspark.sql.functions import col
from pyspark.sql.types import TimestampType
from typing import Union, Optional
from replay.constants import AnyDataFrame
from replay.utils import convert2spark
from replay.session_handler import State
def filter_by_min_count(
data_frame: AnyDataFrame, num_entries: int, group_by: str = "user_idx"
) -> DataFrame:
"""
Remove entries with entities (e.g. users, items) which are presented in `data_frame`
less than `num_entries` times. The `data_frame` is grouped by `group_by` column,
which is entry column name, to calculate counts.
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_idx": [1, 1, 2]})
>>> filter_by_min_count(data_frame, 2).toPandas()
user_idx
0 1
1 1
:param data_frame: spark or pandas dataframe to apply filter
:param num_entries: minimal number of times the entry should appear in dataset
in order to remain
:param group_by: entity column, which is used to calculate entity occurrence couns
:return: filteder `data_frame`
"""
data_frame = convert2spark(data_frame)
input_count = data_frame.count()
count_by_group = data_frame.groupBy(group_by).agg(
sf.count(group_by).alias(f"{group_by}_temp_count")
)
remaining_entities = count_by_group.filter(
count_by_group[f"{group_by}_temp_count"] >= num_entries
).select(group_by)
data_frame = data_frame.join(remaining_entities, on=group_by, how="inner")
output_count = data_frame.count()
diff = (input_count - output_count) / input_count
if diff > 0.5:
logger_level = State().logger.warning
else:
logger_level = State().logger.info
logger_level(
"current threshold removes %s%% of data",
diff,
)
return data_frame
def filter_out_low_ratings(
data_frame: AnyDataFrame, value: float, rating_column="relevance"
) -> DataFrame:
"""
Remove records with records less than ``value`` in ``column``.
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"relevance": [1, 5, 3.5, 4]})
>>> filter_out_low_ratings(data_frame, 3.5).show()
+---------+
|relevance|
+---------+
| 5.0|
| 3.5|
| 4.0|
+---------+
<BLANKLINE>
"""
data_frame = convert2spark(data_frame)
data_frame = data_frame.filter(data_frame[rating_column] >= value)
return data_frame
# pylint: disable=too-many-arguments,
def take_num_user_interactions(
log: DataFrame,
num_interactions: int = 10,
first: bool = True,
date_col: str = "timestamp",
user_col: str = "user_idx",
item_col: Optional[str] = "item_idx",
) -> DataFrame:
"""
Get first/last ``num_interactions`` interactions for each user.
>>> import pandas as pd
>>> from replay.utils import convert2spark
>>> log_pd = pd.DataFrame({"user_idx": ["u1", "u2", "u2", "u3", "u3", "u3"],
... "item_idx": ["i1", "i2","i3", "i1", "i2","i3"],
... "rel": [1., 0.5, 3, 1, 0, 1],
... "timestamp": ["2020-01-01 23:59:59", "2020-02-01",
... "2020-02-01", "2020-01-01 00:04:15",
... "2020-01-02 00:04:14", "2020-01-05 23:59:59"]},
... )
>>> log_pd["timestamp"] = pd.to_datetime(log_pd["timestamp"])
>>> log_sp = convert2spark(log_pd)
>>> log_sp.show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i2|0.5|2020-02-01 00:00:00|
| u2| i3|3.0|2020-02-01 00:00:00|
| u3| i1|1.0|2020-01-01 00:04:15|
| u3| i2|0.0|2020-01-02 00:04:14|
| u3| i3|1.0|2020-01-05 23:59:59|
+--------+--------+---+-------------------+
<BLANKLINE>
Only first interaction:
>>> take_num_user_interactions(log_sp, 1, True).orderBy('user_idx').show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i2|0.5|2020-02-01 00:00:00|
| u3| i1|1.0|2020-01-01 00:04:15|
+--------+--------+---+-------------------+
<BLANKLINE>
Only last interaction:
>>> take_num_user_interactions(log_sp, 1, False, item_col=None).orderBy('user_idx').show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i2|0.5|2020-02-01 00:00:00|
| u3| i3|1.0|2020-01-05 23:59:59|
+--------+--------+---+-------------------+
<BLANKLINE>
>>> take_num_user_interactions(log_sp, 1, False).orderBy('user_idx').show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i3|3.0|2020-02-01 00:00:00|
| u3| i3|1.0|2020-01-05 23:59:59|
+--------+--------+---+-------------------+
<BLANKLINE>
:param log: historical interactions DataFrame
:param num_interactions: number of interactions to leave per user
:param first: take either first ``num_interactions`` or last.
:param date_col: date column
:param user_col: user column
:param item_col: item column to help sort simultaneous interactions.
If None, it is ignored.
:return: filtered DataFrame
"""
sorting_order = [col(date_col)]
if item_col is not None:
sorting_order.append(col(item_col))
if not first:
sorting_order = [col_.desc() for col_ in sorting_order]
window = Window().orderBy(*sorting_order).partitionBy(col(user_col))
return (
log.withColumn("temp_rank", sf.row_number().over(window))
.filter(col("temp_rank") <= num_interactions)
.drop("temp_rank")
)
def take_num_days_of_user_hist(
log: DataFrame,
days: int = 10,
first: bool = True,
date_col: str = "timestamp",
user_col: str = "user_idx",
) -> DataFrame:
"""
Get first/last ``days`` of users' interactions.
>>> import pandas as pd
>>> from replay.utils import convert2spark
>>> log_pd = pd.DataFrame({"user_idx": ["u1", "u2", "u2", "u3", "u3", "u3"],
... "item_idx": ["i1", "i2","i3", "i1", "i2","i3"],
... "rel": [1., 0.5, 3, 1, 0, 1],
... "timestamp": ["2020-01-01 23:59:59", "2020-02-01",
... "2020-02-01", "2020-01-01 00:04:15",
... "2020-01-02 00:04:14", "2020-01-05 23:59:59"]},
... )
>>> log_pd["timestamp"] = pd.to_datetime(log_pd["timestamp"])
>>> log_sp = convert2spark(log_pd)
>>> log_sp.orderBy('user_idx', 'item_idx').show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i2|0.5|2020-02-01 00:00:00|
| u2| i3|3.0|2020-02-01 00:00:00|
| u3| i1|1.0|2020-01-01 00:04:15|
| u3| i2|0.0|2020-01-02 00:04:14|
| u3| i3|1.0|2020-01-05 23:59:59|
+--------+--------+---+-------------------+
<BLANKLINE>
Get first day:
>>> take_num_days_of_user_hist(log_sp, 1, True).orderBy('user_idx', 'item_idx').show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i2|0.5|2020-02-01 00:00:00|
| u2| i3|3.0|2020-02-01 00:00:00|
| u3| i1|1.0|2020-01-01 00:04:15|
| u3| i2|0.0|2020-01-02 00:04:14|
+--------+--------+---+-------------------+
<BLANKLINE>
Get last day:
>>> take_num_days_of_user_hist(log_sp, 1, False).orderBy('user_idx', 'item_idx').show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i2|0.5|2020-02-01 00:00:00|
| u2| i3|3.0|2020-02-01 00:00:00|
| u3| i3|1.0|2020-01-05 23:59:59|
+--------+--------+---+-------------------+
<BLANKLINE>
:param log: historical DataFrame
:param days: how many days to return per user
:param first: take either first ``days`` or last
:param date_col: date column
:param user_col: user column
"""
window = Window.partitionBy(user_col)
if first:
return (
log.withColumn("min_date", sf.min(col(date_col)).over(window))
.filter(
col(date_col)
< col("min_date") + sf.expr(f"INTERVAL {days} days")
)
.drop("min_date")
)
return (
log.withColumn("max_date", sf.max(col(date_col)).over(window))
.filter(
col(date_col) > col("max_date") - sf.expr(f"INTERVAL {days} days")
)
.drop("max_date")
)
def take_time_period(
log: DataFrame,
start_date: Optional[Union[str, datetime]] = None,
end_date: Optional[Union[str, datetime]] = None,
date_column: str = "timestamp",
) -> DataFrame:
"""
Select a part of data between ``[start_date, end_date)``.
>>> import pandas as pd
>>> from replay.utils import convert2spark
>>> log_pd = pd.DataFrame({"user_idx": ["u1", "u2", "u2", "u3", "u3", "u3"],
... "item_idx": ["i1", "i2","i3", "i1", "i2","i3"],
... "rel": [1., 0.5, 3, 1, 0, 1],
... "timestamp": ["2020-01-01 23:59:59", "2020-02-01",
... "2020-02-01", "2020-01-01 00:04:15",
... "2020-01-02 00:04:14", "2020-01-05 23:59:59"]},
... )
>>> log_pd["timestamp"] = pd.to_datetime(log_pd["timestamp"])
>>> log_sp = convert2spark(log_pd)
>>> log_sp.show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i2|0.5|2020-02-01 00:00:00|
| u2| i3|3.0|2020-02-01 00:00:00|
| u3| i1|1.0|2020-01-01 00:04:15|
| u3| i2|0.0|2020-01-02 00:04:14|
| u3| i3|1.0|2020-01-05 23:59:59|
+--------+--------+---+-------------------+
<BLANKLINE>
>>> take_time_period(log_sp, start_date="2020-01-01 14:00:00", end_date=datetime(2020, 1, 3, 0, 0, 0)).show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u3| i2|0.0|2020-01-02 00:04:14|
+--------+--------+---+-------------------+
<BLANKLINE>
:param log: historical DataFrame
:param start_date: datetime or str with format "yyyy-MM-dd HH:mm:ss".
:param end_date: datetime or str with format "yyyy-MM-dd HH:mm:ss".
:param date_column: date column
"""
if start_date is None:
start_date = log.agg(sf.min(date_column)).first()[0]
if end_date is None:
end_date = log.agg(sf.max(date_column)).first()[0] + timedelta(
seconds=1
)
return log.filter(
(col(date_column) >= sf.lit(start_date).cast(TimestampType()))
& (col(date_column) < sf.lit(end_date).cast(TimestampType()))
)
def take_num_days_of_global_hist(
log: DataFrame,
duration_days: int,
first: bool = True,
date_column: str = "timestamp",
) -> DataFrame:
"""
Select first/last days from ``log``.
>>> import pandas as pd
>>> from replay.utils import convert2spark
>>> log_pd = pd.DataFrame({"user_idx": ["u1", "u2", "u2", "u3", "u3", "u3"],
... "item_idx": ["i1", "i2","i3", "i1", "i2","i3"],
... "rel": [1., 0.5, 3, 1, 0, 1],
... "timestamp": ["2020-01-01 23:59:59", "2020-02-01",
... "2020-02-01", "2020-01-01 00:04:15",
... "2020-01-02 00:04:14", "2020-01-05 23:59:59"]},
... )
>>> log_pd["timestamp"] = pd.to_datetime(log_pd["timestamp"])
>>> log_sp = convert2spark(log_pd)
>>> log_sp.show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u2| i2|0.5|2020-02-01 00:00:00|
| u2| i3|3.0|2020-02-01 00:00:00|
| u3| i1|1.0|2020-01-01 00:04:15|
| u3| i2|0.0|2020-01-02 00:04:14|
| u3| i3|1.0|2020-01-05 23:59:59|
+--------+--------+---+-------------------+
<BLANKLINE>
>>> take_num_days_of_global_hist(log_sp, 1).show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u1| i1|1.0|2020-01-01 23:59:59|
| u3| i1|1.0|2020-01-01 00:04:15|
| u3| i2|0.0|2020-01-02 00:04:14|
+--------+--------+---+-------------------+
<BLANKLINE>
>>> take_num_days_of_global_hist(log_sp, 1, first=False).show()
+--------+--------+---+-------------------+
|user_idx|item_idx|rel| timestamp|
+--------+--------+---+-------------------+
| u2| i2|0.5|2020-02-01 00:00:00|
| u2| i3|3.0|2020-02-01 00:00:00|
+--------+--------+---+-------------------+
<BLANKLINE>
:param log: historical DataFrame
:param duration_days: length of selected data in days
:param first: take either first ``duration_days`` or last
:param date_column: date column
"""
if first:
start_date = log.agg(sf.min(date_column)).first()[0]
end_date = sf.lit(start_date).cast(TimestampType()) + sf.expr(
f"INTERVAL {duration_days} days"
)
return log.filter(col(date_column) < end_date)
end_date = log.agg(sf.max(date_column)).first()[0]
start_date = sf.lit(end_date).cast(TimestampType()) - sf.expr(
f"INTERVAL {duration_days} days"
)
return log.filter(col(date_column) > start_date) | /replay_rec-0.11.0-py3-none-any.whl/replay/filters.py | 0.898915 | 0.573051 | filters.py | pypi |
import json
import logging
import string
from os.path import join
from typing import Dict, List, Optional
from pyspark.ml import Transformer, Estimator
from pyspark.ml.feature import StringIndexerModel, IndexToString, StringIndexer
from pyspark.ml.util import (
MLWriter,
MLWritable,
MLReader,
MLReadable,
DefaultParamsWriter,
)
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from pyspark.sql.types import (
DoubleType,
NumericType,
StructField,
StructType,
IntegerType,
)
from replay.constants import AnyDataFrame
from replay.session_handler import State
from replay.utils import convert2spark, process_timestamp_column
LOG_COLUMNS = ["user_id", "item_id", "timestamp", "relevance"]
class Indexer: # pylint: disable=too-many-instance-attributes
"""
This class is used to convert arbitrary id to numerical idx and back.
"""
user_indexer: StringIndexerModel
item_indexer: StringIndexerModel
inv_user_indexer: IndexToString
inv_item_indexer: IndexToString
user_type: None
item_type: None
suffix = "inner"
def __init__(self, user_col="user_id", item_col="item_id"):
"""
Provide column names for indexer to use
"""
self.user_col = user_col
self.item_col = item_col
@property
def _init_args(self):
return {
"user_col": self.user_col,
"item_col": self.item_col,
}
def fit(
self,
users: DataFrame,
items: DataFrame,
) -> None:
"""
Creates indexers to map raw id to numerical idx so that spark can handle them.
:param users: DataFrame containing user column
:param items: DataFrame containing item column
:return:
"""
users = users.select(self.user_col).withColumnRenamed(
self.user_col, f"{self.user_col}_{self.suffix}"
)
items = items.select(self.item_col).withColumnRenamed(
self.item_col, f"{self.item_col}_{self.suffix}"
)
self.user_type = users.schema[
f"{self.user_col}_{self.suffix}"
].dataType
self.item_type = items.schema[
f"{self.item_col}_{self.suffix}"
].dataType
self.user_indexer = StringIndexer(
inputCol=f"{self.user_col}_{self.suffix}", outputCol="user_idx"
).fit(users)
self.item_indexer = StringIndexer(
inputCol=f"{self.item_col}_{self.suffix}", outputCol="item_idx"
).fit(items)
self.inv_user_indexer = IndexToString(
inputCol=f"{self.user_col}_{self.suffix}",
outputCol=self.user_col,
labels=self.user_indexer.labels,
)
self.inv_item_indexer = IndexToString(
inputCol=f"{self.item_col}_{self.suffix}",
outputCol=self.item_col,
labels=self.item_indexer.labels,
)
def transform(self, df: DataFrame) -> Optional[DataFrame]:
"""
Convert raw ``user_col`` and ``item_col`` to numerical ``user_idx`` and ``item_idx``
:param df: dataframe with raw indexes
:return: dataframe with converted indexes
"""
if self.item_col in df.columns:
remaining_cols = df.drop(self.item_col).columns
df = df.withColumnRenamed(
self.item_col, f"{self.item_col}_{self.suffix}"
)
self._reindex(df, "item")
df = self.item_indexer.transform(df).select(
sf.col("item_idx").cast("int").alias("item_idx"),
*remaining_cols,
)
if self.user_col in df.columns:
remaining_cols = df.drop(self.user_col).columns
df = df.withColumnRenamed(
self.user_col, f"{self.user_col}_{self.suffix}"
)
self._reindex(df, "user")
df = self.user_indexer.transform(df).select(
sf.col("user_idx").cast("int").alias("user_idx"),
*remaining_cols,
)
return df
def inverse_transform(self, df: DataFrame) -> DataFrame:
"""
Convert DataFrame to the initial indexes.
:param df: DataFrame with numerical ``user_idx/item_idx`` columns
:return: DataFrame with original user/item columns
"""
res = df
if "item_idx" in df.columns:
remaining_cols = res.drop("item_idx").columns
res = self.inv_item_indexer.transform(
res.withColumnRenamed(
"item_idx", f"{self.item_col}_{self.suffix}"
)
).select(
sf.col(self.item_col)
.cast(self.item_type)
.alias(self.item_col),
*remaining_cols,
)
if "user_idx" in df.columns:
remaining_cols = res.drop("user_idx").columns
res = self.inv_user_indexer.transform(
res.withColumnRenamed(
"user_idx", f"{self.user_col}_{self.suffix}"
)
).select(
sf.col(self.user_col)
.cast(self.user_type)
.alias(self.user_col),
*remaining_cols,
)
return res
def _reindex(self, df: DataFrame, entity: str):
"""
Update indexer with new entries.
:param df: DataFrame with users/items
:param entity: user or item
"""
indexer = getattr(self, f"{entity}_indexer")
inv_indexer = getattr(self, f"inv_{entity}_indexer")
new_objects = set(
map(
str,
df.select(indexer.getInputCol())
.distinct()
.toPandas()[indexer.getInputCol()],
)
).difference(indexer.labels)
if new_objects:
new_labels = indexer.labels + list(new_objects)
setattr(
self,
f"{entity}_indexer",
indexer.from_labels(
new_labels,
inputCol=indexer.getInputCol(),
outputCol=indexer.getOutputCol(),
handleInvalid="error",
),
)
inv_indexer.setLabels(new_labels)
# We need to inherit it from DefaultParamsWriter to make it being saved correctly within Pipeline
class JoinIndexerMLWriter(DefaultParamsWriter):
"""Implements saving the JoinIndexerTransformer instance to disk.
Used when saving a trained pipeline.
Implements MLWriter.saveImpl(path) method.
"""
def __init__(self, instance):
super().__init__(instance)
self.instance = instance
def saveImpl(self, path: str) -> None:
super().saveImpl(path)
spark = State().session
init_args = self.instance._init_args
sc = spark.sparkContext # pylint: disable=invalid-name
df = spark.read.json(sc.parallelize([json.dumps(init_args)]))
df.coalesce(1).write.mode("overwrite").json(join(path, "init_args.json"))
self.instance.user_col_2_index_map.write.mode("overwrite")\
.save(join(path, "user_col_2_index_map.parquet"))
self.instance.item_col_2_index_map.write.mode("overwrite")\
.save(join(path, "item_col_2_index_map.parquet"))
class JoinIndexerMLReader(MLReader):
"""Implements reading the JoinIndexerTransformer instance from disk.
Used when loading a trained pipeline.
"""
def load(self, path):
"""Load the ML instance from the input path."""
spark = State().session
args = spark.read.json(join(path, "init_args.json")).first().asDict(recursive=True)
user_col_2_index_map = spark.read.parquet(join(path, "user_col_2_index_map.parquet"))
item_col_2_index_map = spark.read.parquet(join(path, "item_col_2_index_map.parquet"))
indexer = JoinBasedIndexerTransformer(
user_col=args["user_col"],
user_type=args["user_type"],
user_col_2_index_map=user_col_2_index_map,
item_col=args["item_col"],
item_type=args["item_type"],
item_col_2_index_map=item_col_2_index_map,
)
return indexer
# pylint: disable=too-many-instance-attributes
class JoinBasedIndexerTransformer(Transformer, MLWritable, MLReadable):
"""
JoinBasedIndexer, that index user column and item column in input dataframe
"""
# pylint: disable=too-many-arguments
def __init__(
self,
user_col: str,
item_col: str,
user_type: str,
item_type: str,
user_col_2_index_map: DataFrame,
item_col_2_index_map: DataFrame,
update_map_on_transform: bool = True,
force_broadcast_on_mapping_joins: bool = True,
):
super().__init__()
self.user_col = user_col
self.item_col = item_col
self.user_type = user_type
self.item_type = item_type
self.user_col_2_index_map = user_col_2_index_map
self.item_col_2_index_map = item_col_2_index_map
self.update_map_on_transform = update_map_on_transform
self.force_broadcast_on_mapping_joins = force_broadcast_on_mapping_joins
@property
def _init_args(self):
return {
"user_col": self.user_col,
"item_col": self.item_col,
"user_type": self.user_type,
"item_type": self.item_type,
"update_map_on_transform": self.update_map_on_transform,
"force_broadcast_on_mapping_joins": self.force_broadcast_on_mapping_joins
}
def set_update_map_on_transform(self, value: bool):
"""Sets 'update_map_on_transform' flag"""
self.update_map_on_transform = value
def set_force_broadcast_on_mapping_joins(self, value: bool):
"""Sets 'force_broadcast_on_mapping_joins' flag"""
self.force_broadcast_on_mapping_joins = value
def _get_item_mapping(self) -> DataFrame:
if self.force_broadcast_on_mapping_joins:
mapping = sf.broadcast(self.item_col_2_index_map)
else:
mapping = self.item_col_2_index_map
return mapping
def _get_user_mapping(self) -> DataFrame:
if self.force_broadcast_on_mapping_joins:
mapping = sf.broadcast(self.user_col_2_index_map)
else:
mapping = self.user_col_2_index_map
return mapping
def write(self) -> MLWriter:
"""Returns MLWriter instance that can save the Transformer instance."""
return JoinIndexerMLWriter(self)
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
return JoinIndexerMLReader()
def _update_maps(self, df: DataFrame):
new_items = (
df.join(self._get_item_mapping(), on=self.item_col, how="left_anti")
.select(self.item_col).distinct()
)
prev_item_count = self.item_col_2_index_map.count()
new_items_map = (
JoinBasedIndexerEstimator.get_map(new_items, self.item_col, "item_idx")
.select(self.item_col, (sf.col("item_idx") + prev_item_count).alias("item_idx"))
)
self.item_col_2_index_map = self.item_col_2_index_map.union(new_items_map)
new_users = (
df.join(self._get_user_mapping(), on=self.user_col, how="left_anti")
.select(self.user_col).distinct()
)
prev_user_count = self.user_col_2_index_map.count()
new_users_map = (
JoinBasedIndexerEstimator.get_map(new_users, self.user_col, "user_idx")
.select(self.user_col, (sf.col("user_idx") + prev_user_count).alias("user_idx"))
)
self.user_col_2_index_map = self.user_col_2_index_map.union(new_users_map)
def _transform(self, dataset: DataFrame) -> DataFrame:
if self.update_map_on_transform:
self._update_maps(dataset)
if self.item_col in dataset.columns:
remaining_cols = dataset.drop(self.item_col).columns
dataset = dataset.join(self._get_item_mapping(), on=self.item_col, how="left").select(
sf.col("item_idx").cast("int").alias("item_idx"),
*remaining_cols,
)
if self.user_col in dataset.columns:
remaining_cols = dataset.drop(self.user_col).columns
dataset = dataset.join(self._get_user_mapping(), on=self.user_col, how="left").select(
sf.col("user_idx").cast("int").alias("user_idx"),
*remaining_cols,
)
return dataset
def inverse_transform(self, df: DataFrame) -> DataFrame:
"""
Convert DataFrame to the initial indexes.
:param df: DataFrame with numerical ``user_idx/item_idx`` columns
:return: DataFrame with original user/item columns
"""
if "item_idx" in df.columns:
remaining_cols = df.drop("item_idx").columns
df = df.join(
self._get_item_mapping(), on="item_idx", how="left"
).select(
self.item_col,
*remaining_cols,
)
if "user_idx" in df.columns:
remaining_cols = df.drop("user_idx").columns
df = df.join(
self._get_user_mapping(), on="user_idx", how="left"
).select(
self.user_col,
*remaining_cols,
)
return df
class JoinBasedIndexerEstimator(Estimator):
"""
Estimator that produces JoinBasedIndexerTransformer
"""
# pylint: disable=super-init-not-called
def __init__(self, user_col="user_id", item_col="item_id"):
"""
Provide column names for indexer to use
"""
self.user_col = user_col
self.item_col = item_col
self.user_col_2_index_map = None
self.item_col_2_index_map = None
self.user_type = None
self.item_type = None
@staticmethod
def get_map(df: DataFrame, col_name: str, idx_col_name: str) -> DataFrame:
"""Creates indexes [0, .., k] for values from `col_name` column.
:param df: input dataframe
:param col_name: column name from `df` that need to index
:param idx_col_name: column name with indexes
:return: DataFrame with map "col_name" -> "idx_col_name"
"""
uid_rdd = (
df.select(col_name)
.distinct()
.rdd.map(lambda x: x[col_name])
.zipWithIndex()
)
return uid_rdd.toDF(
StructType(
[
df.schema[col_name],
StructField(idx_col_name, IntegerType(), False),
]
)
)
def _fit(self, dataset: DataFrame) -> Transformer:
"""
Creates indexers to map raw id to numerical idx so that spark can handle them.
:param df: DataFrame containing user column and item column
:return:
"""
self.user_col_2_index_map = self.get_map(dataset, self.user_col, "user_idx")
self.item_col_2_index_map = self.get_map(dataset, self.item_col, "item_idx")
self.user_type = dataset.schema[
self.user_col
].dataType
self.item_type = dataset.schema[
self.item_col
].dataType
return JoinBasedIndexerTransformer(
user_col=self.user_col,
user_type=str(self.user_type),
item_col=self.item_col,
item_type=str(self.item_type),
user_col_2_index_map=self.user_col_2_index_map,
item_col_2_index_map=self.item_col_2_index_map
)
class DataPreparator:
"""Transforms data to a library format:
- read as a spark dataframe/ convert pandas dataframe to spark
- check for nulls
- create relevance/timestamp columns if absent
- convert dates to TimestampType
Examples:
Loading log DataFrame
>>> import pandas as pd
>>> from replay.data_preparator import DataPreparator
>>>
>>> log = pd.DataFrame({"user": [2, 2, 2, 1],
... "item_id": [1, 2, 3, 3],
... "rel": [5, 5, 5, 5]}
... )
>>> dp = DataPreparator()
>>> correct_log = dp.transform(data=log,
... columns_mapping={"user_id": "user",
... "item_id": "item_id",
... "relevance": "rel"}
... )
>>> correct_log.show(2)
+-------+-------+---------+-------------------+
|user_id|item_id|relevance| timestamp|
+-------+-------+---------+-------------------+
| 2| 1| 5.0|2099-01-01 00:00:00|
| 2| 2| 5.0|2099-01-01 00:00:00|
+-------+-------+---------+-------------------+
only showing top 2 rows
<BLANKLINE>
Loading user features
>>> import pandas as pd
>>> from replay.data_preparator import DataPreparator
>>>
>>> log = pd.DataFrame({"user": ["user1", "user1", "user2"],
... "f0": ["feature1","feature2","feature1"],
... "f1": ["left","left","center"],
... "ts": ["2019-01-01","2019-01-01","2019-01-01"]}
... )
>>> dp = DataPreparator()
>>> correct_log = dp.transform(data=log,
... columns_mapping={"user_id": "user"},
... )
>>> correct_log.show(3)
+-------+--------+------+----------+
|user_id| f0| f1| ts|
+-------+--------+------+----------+
| user1|feature1| left|2019-01-01|
| user1|feature2| left|2019-01-01|
| user2|feature1|center|2019-01-01|
+-------+--------+------+----------+
<BLANKLINE>
"""
_logger: Optional[logging.Logger] = None
@property
def logger(self) -> logging.Logger:
"""
:returns: get library logger
"""
if self._logger is None:
self._logger = logging.getLogger("replay")
return self._logger
@staticmethod
def read_as_spark_df(
data: Optional[AnyDataFrame] = None,
path: str = None,
format_type: str = None,
**kwargs,
) -> DataFrame:
"""
Read spark dataframe from file of transform pandas dataframe.
:param data: DataFrame to process (``pass`` or ``data`` should be defined)
:param path: path to data (``pass`` or ``data`` should be defined)
:param format_type: file type, one of ``[csv , parquet , json , table]``
:param kwargs: extra arguments passed to
``spark.read.<format>(path, **reader_kwargs)``
:return: spark DataFrame
"""
if data is not None:
dataframe = convert2spark(data)
elif path and format_type:
spark = State().session
if format_type == "csv":
dataframe = spark.read.csv(path, inferSchema=True, **kwargs)
elif format_type == "parquet":
dataframe = spark.read.parquet(path)
elif format_type == "json":
dataframe = spark.read.json(path, **kwargs)
elif format_type == "table":
dataframe = spark.read.table(path)
else:
raise ValueError(
f"Invalid value of format_type='{format_type}'"
)
else:
raise ValueError("Either data or path parameters must not be None")
return dataframe
def check_df(
self, dataframe: DataFrame, columns_mapping: Dict[str, str]
) -> None:
"""
Check:
- if dataframe is not empty,
- if columns from ``columns_mapping`` are present in dataframe
- warn about nulls in columns from ``columns_mapping``
- warn about absent of ``timestamp/relevance`` columns for interactions log
- warn about wrong relevance DataType
:param dataframe: spark DataFrame to process
:param columns_mapping: dictionary mapping "key: column name in input DataFrame".
Possible keys: ``[user_id, user_id, timestamp, relevance]``
``columns_mapping`` values specifies the nature of the DataFrame:
- if both ``[user_id, item_id]`` are present,
then the dataframe is a log of interactions.
Specify ``timestamp, relevance`` columns in mapping if available.
- if ether ``user_id`` or ``item_id`` is present,
then the dataframe is a dataframe of user/item features
"""
if not dataframe.head(1):
raise ValueError("DataFrame is empty")
for value in columns_mapping.values():
if value not in dataframe.columns:
raise ValueError(
f"Column `{value}` stated in mapping is absent in dataframe"
)
for column in columns_mapping.values():
if dataframe.where(sf.col(column).isNull()).count() > 0:
self.logger.info(
"Column `%s` has NULL values. Handle NULL values before "
"the next data preprocessing/model training steps",
column,
)
if (
"user_id" in columns_mapping.keys()
and "item_id" in columns_mapping.keys()
):
absent_cols = set(LOG_COLUMNS).difference(columns_mapping.keys())
if len(absent_cols) > 0:
self.logger.info(
"Columns %s are absent, but may be required for models training. "
"Add them with DataPreparator().generate_absent_log_cols",
list(absent_cols),
)
if "relevance" in columns_mapping.keys():
if not isinstance(
dataframe.schema[columns_mapping["relevance"]].dataType,
NumericType,
):
self.logger.info(
"Relevance column `%s` should be numeric, but it is %s",
columns_mapping["relevance"],
dataframe.schema[columns_mapping["relevance"]].dataType,
)
@staticmethod
def add_absent_log_cols(
dataframe: DataFrame,
columns_mapping: Dict[str, str],
default_relevance: float = 1.0,
default_ts: str = "2099-01-01",
):
"""
Add ``relevance`` and ``timestamp`` columns with default values if
``relevance`` or ``timestamp`` is absent among mapping keys.
:param dataframe: interactions log to process
:param columns_mapping: dictionary mapping "key: column name in input DataFrame".
Possible keys: ``[user_id, user_id, timestamp, relevance]``
:param default_relevance: default value for generated `relevance` column
:param default_ts: str, default value for generated `timestamp` column
:return: spark DataFrame with generated ``timestamp`` and ``relevance`` columns
if absent in original dataframe
"""
absent_cols = set(LOG_COLUMNS).difference(columns_mapping.keys())
if "relevance" in absent_cols:
dataframe = dataframe.withColumn(
"relevance", sf.lit(default_relevance).cast(DoubleType())
)
if "timestamp" in absent_cols:
dataframe = dataframe.withColumn(
"timestamp", sf.to_timestamp(sf.lit(default_ts))
)
return dataframe
@staticmethod
def _rename(df: DataFrame, mapping: Dict) -> Optional[DataFrame]:
"""
rename dataframe columns based on mapping
"""
if df is None or mapping is None:
return df
for out_col, in_col in mapping.items():
if in_col in df.columns:
df = df.withColumnRenamed(in_col, out_col)
return df
# pylint: disable=too-many-arguments
def transform(
self,
columns_mapping: Dict[str, str],
data: Optional[AnyDataFrame] = None,
path: Optional[str] = None,
format_type: Optional[str] = None,
date_format: Optional[str] = None,
reader_kwargs: Optional[Dict] = None,
) -> DataFrame:
"""
Transforms log, user or item features into a Spark DataFrame
``[user_id, user_id, timestamp, relevance]``,
``[user_id, *features]``, or ``[item_id, *features]``.
Input is either file of ``format_type``
at ``path``, or ``pandas.DataFrame`` or ``spark.DataFrame``.
Transform performs:
- dataframe reading/convert to spark DataFrame format
- check dataframe (nulls, columns_mapping)
- rename columns from mapping to standard names (user_id, user_id, timestamp, relevance)
- for interactions log: create absent columns,
convert ``timestamp`` column to TimestampType and ``relevance`` to DoubleType
:param columns_mapping: dictionary mapping "key: column name in input DataFrame".
Possible keys: ``[user_id, user_id, timestamp, relevance]``
``columns_mapping`` values specifies the nature of the DataFrame:
- if both ``[user_id, item_id]`` are present,
then the dataframe is a log of interactions.
Specify ``timestamp, relevance`` columns in mapping if present.
- if ether ``user_id`` or ``item_id`` is present,
then the dataframe is a dataframe of user/item features
:param data: DataFrame to process
:param path: path to data
:param format_type: file type, one of ``[csv , parquet , json , table]``
:param date_format: format for the ``timestamp`` column
:param reader_kwargs: extra arguments passed to
``spark.read.<format>(path, **reader_kwargs)``
:return: processed DataFrame
"""
is_log = False
if (
"user_id" in columns_mapping.keys()
and "item_id" in columns_mapping.keys()
):
self.logger.info(
"Columns with ids of users or items are present in mapping. "
"The dataframe will be treated as an interactions log."
)
is_log = True
elif (
"user_id" not in columns_mapping.keys()
and "item_id" not in columns_mapping.keys()
):
raise ValueError(
"Mapping either for user ids or for item ids is not stated in `columns_mapping`"
)
else:
self.logger.info(
"Column with ids of users or items is absent in mapping. "
"The dataframe will be treated as a users'/items' features dataframe."
)
reader_kwargs = {} if reader_kwargs is None else reader_kwargs
dataframe = self.read_as_spark_df(
data=data, path=path, format_type=format_type, **reader_kwargs
)
self.check_df(dataframe, columns_mapping=columns_mapping)
dataframe = self._rename(df=dataframe, mapping=columns_mapping)
if is_log:
dataframe = self.add_absent_log_cols(
dataframe=dataframe, columns_mapping=columns_mapping
)
dataframe = dataframe.withColumn(
"relevance", sf.col("relevance").cast(DoubleType())
)
dataframe = process_timestamp_column(
dataframe=dataframe,
column_name="timestamp",
date_format=date_format,
)
return dataframe
class CatFeaturesTransformer:
"""Transform categorical features in ``cat_cols_list``
with one-hot encoding and remove original columns."""
def __init__(
self,
cat_cols_list: List,
alias: str = "ohe",
):
"""
:param cat_cols_list: list of categorical columns
:param alias: prefix for one-hot encoding columns
"""
self.cat_cols_list = cat_cols_list
self.expressions_list = []
self.alias = alias
def fit(self, spark_df: Optional[DataFrame]) -> None:
"""
Save categories for each column
:param spark_df: Spark DataFrame with features
"""
if spark_df is None:
return
cat_feat_values_dict = {
name: (
spark_df.select(sf.collect_set(sf.col(name))).collect()[0][0]
)
for name in self.cat_cols_list
}
self.expressions_list = [
sf.when(sf.col(col_name) == cur_name, 1)
.otherwise(0)
.alias(
f"""{self.alias}_{col_name}_{str(cur_name).translate(
str.maketrans(
"", "", string.punctuation + string.whitespace
)
)[:30]}"""
)
for col_name, col_values in cat_feat_values_dict.items()
for cur_name in col_values
]
def transform(self, spark_df: Optional[DataFrame]):
"""
Transform categorical columns.
If there are any new categories that were not present at fit stage, they will be ignored.
:param spark_df: feature DataFrame
:return: transformed DataFrame
"""
if spark_df is None:
return None
return spark_df.select(*spark_df.columns, *self.expressions_list).drop(
*self.cat_cols_list
)
class ToNumericFeatureTransformer:
"""Transform user/item features to numeric types:
- numeric features stays as is
- categorical features:
if threshold is defined:
- all non-numeric columns with less unique values than threshold are one-hot encoded
- remaining columns are dropped
else all non-numeric columns are one-hot encoded
"""
cat_feat_transformer: Optional[CatFeaturesTransformer]
cols_to_ohe: Optional[List]
cols_to_del: Optional[List]
all_columns: Optional[List]
def __init__(self, threshold: Optional[int] = 100):
self.threshold = threshold
self.fitted = False
def fit(self, features: Optional[DataFrame]) -> None:
"""
Determine categorical columns for one-hot encoding.
Non categorical columns with more values than threshold will be deleted.
Saves categories for each column.
:param features: input DataFrame
"""
self.cat_feat_transformer = None
self.cols_to_del = []
self.fitted = True
if features is None:
self.all_columns = None
return
self.all_columns = sorted(features.columns)
spark_df_non_numeric_cols = [
col
for col in features.columns
if (not isinstance(features.schema[col].dataType, NumericType))
and (col not in {"user_idx", "item_idx"})
]
# numeric only
if len(spark_df_non_numeric_cols) == 0:
self.cols_to_ohe = []
return
if self.threshold is None:
self.cols_to_ohe = spark_df_non_numeric_cols
else:
counts_pd = (
features.agg(
*[
sf.approx_count_distinct(sf.col(c)).alias(c)
for c in spark_df_non_numeric_cols
]
)
.toPandas()
.T
)
self.cols_to_ohe = (
counts_pd[counts_pd[0] <= self.threshold]
).index.values
self.cols_to_del = [
col
for col in spark_df_non_numeric_cols
if col not in set(self.cols_to_ohe)
]
if self.cols_to_del:
State().logger.warning(
"%s columns contain more that threshold unique "
"values and will be deleted",
self.cols_to_del,
)
if len(self.cols_to_ohe) > 0:
self.cat_feat_transformer = CatFeaturesTransformer(
cat_cols_list=self.cols_to_ohe
)
self.cat_feat_transformer.fit(features.drop(*self.cols_to_del))
def transform(self, spark_df: Optional[DataFrame]) -> Optional[DataFrame]:
"""
Transform categorical features.
Use one hot encoding for columns with the amount of unique values smaller
than threshold and delete other columns.
:param spark_df: input DataFrame
:return: processed DataFrame
"""
if not self.fitted:
raise AttributeError("Call fit before running transform")
if spark_df is None or self.all_columns is None:
return None
if self.cat_feat_transformer is None:
return spark_df.drop(*self.cols_to_del)
if sorted(spark_df.columns) != self.all_columns:
raise ValueError(
f"Columns from fit do not match "
f"columns in transform. "
f"Fit columns: {self.all_columns},"
f"Transform columns: {spark_df.columns}"
)
return self.cat_feat_transformer.transform(
spark_df.drop(*self.cols_to_del)
)
def fit_transform(self, spark_df: DataFrame) -> DataFrame:
"""
:param spark_df: input DataFrame
:return: output DataFrame
"""
self.fit(spark_df)
return self.transform(spark_df) | /replay_rec-0.11.0-py3-none-any.whl/replay/data_preparator.py | 0.856947 | 0.322313 | data_preparator.py | pypi |
import json
from inspect import getfullargspec
from os.path import join
from pathlib import Path
from typing import Union
import pyspark.sql.types as st
from pyspark.ml.feature import StringIndexerModel, IndexToString
from pyspark.sql import SparkSession
from replay.data_preparator import Indexer
from replay.models import *
from replay.models.base_rec import BaseRecommender
from replay.session_handler import State
from replay.splitters import *
from replay.utils import save_picklable_to_parquet, load_pickled_from_parquet
def get_fs(spark: SparkSession):
"""
Gets `org.apache.hadoop.fs.FileSystem` instance from JVM gateway
:param spark: spark session
:return:
"""
fs = spark._jvm.org.apache.hadoop.fs.FileSystem.get(
spark._jsc.hadoopConfiguration()
)
return fs
def get_list_of_paths(spark: SparkSession, dir_path: str):
"""
Returns list of paths to files in the `dir_path`
:param spark: spark session
:param dir_path: path to dir in hdfs or local disk
:return: list of paths to files
"""
fs = get_fs(spark)
statuses = fs.listStatus(spark._jvm.org.apache.hadoop.fs.Path(dir_path))
return [str(f.getPath()) for f in statuses]
def save(
model: BaseRecommender, path: Union[str, Path], overwrite: bool = False
):
"""
Save fitted model to disk as a folder
:param model: Trained recommender
:param path: destination where model files will be stored
:return:
"""
if isinstance(path, Path):
path = str(path)
spark = State().session
fs = get_fs(spark)
if not overwrite:
is_exists = fs.exists(spark._jvm.org.apache.hadoop.fs.Path(path))
if is_exists:
raise FileExistsError(
f"Path '{path}' already exists. Mode is 'overwrite = False'."
)
fs.mkdirs(spark._jvm.org.apache.hadoop.fs.Path(path))
model._save_model(join(path, "model"))
init_args = model._init_args
init_args["_model_name"] = str(model)
sc = spark.sparkContext
df = spark.read.json(sc.parallelize([json.dumps(init_args)]))
df.coalesce(1).write.mode("overwrite").option(
"ignoreNullFields", "false"
).json(join(path, "init_args.json"))
dataframes = model._dataframes
df_path = join(path, "dataframes")
for name, df in dataframes.items():
if df is not None:
df.write.mode("overwrite").parquet(join(df_path, name))
if hasattr(model, "fit_users"):
model.fit_users.write.mode("overwrite").parquet(
join(df_path, "fit_users")
)
if hasattr(model, "fit_items"):
model.fit_items.write.mode("overwrite").parquet(
join(df_path, "fit_items")
)
if hasattr(model, "study"):
save_picklable_to_parquet(model.study, join(path, "study"))
def load(path: str) -> BaseRecommender:
"""
Load saved model from disk
:param path: path to model folder
:return: Restored trained model
"""
spark = State().session
args = (
spark.read.json(join(path, "init_args.json"))
.first()
.asDict(recursive=True)
)
name = args["_model_name"]
del args["_model_name"]
model_class = globals()[name]
init_args = getfullargspec(model_class.__init__).args
init_args.remove("self")
extra_args = set(args) - set(init_args)
if len(extra_args) > 0:
extra_args = {key: args[key] for key in args}
init_args = {key: args[key] for key in init_args}
else:
init_args = args
extra_args = {}
model = model_class(**init_args)
for arg in extra_args:
model.arg = extra_args[arg]
dataframes_paths = get_list_of_paths(spark, join(path, "dataframes"))
for dataframe_path in dataframes_paths:
df = spark.read.parquet(dataframe_path)
attr_name = dataframe_path.split("/")[-1]
setattr(model, attr_name, df)
model._load_model(join(path, "model"))
fs = get_fs(spark)
model.study = (
load_pickled_from_parquet(join(path, "study"))
if fs.exists(spark._jvm.org.apache.hadoop.fs.Path(join(path, "study")))
else None
)
return model
def save_indexer(
indexer: Indexer, path: Union[str, Path], overwrite: bool = False
):
"""
Save fitted indexer to disk as a folder
:param indexer: Trained indexer
:param path: destination where indexer files will be stored
"""
if isinstance(path, Path):
path = str(path)
spark = State().session
if not overwrite:
fs = get_fs(spark)
is_exists = fs.exists(spark._jvm.org.apache.hadoop.fs.Path(path))
if is_exists:
raise FileExistsError(
f"Path '{path}' already exists. Mode is 'overwrite = False'."
)
init_args = indexer._init_args
init_args["user_type"] = str(indexer.user_type)
init_args["item_type"] = str(indexer.item_type)
sc = spark.sparkContext
df = spark.read.json(sc.parallelize([json.dumps(init_args)]))
df.coalesce(1).write.mode("overwrite").json(join(path, "init_args.json"))
indexer.user_indexer.write().overwrite().save(join(path, "user_indexer"))
indexer.item_indexer.write().overwrite().save(join(path, "item_indexer"))
indexer.inv_user_indexer.write().overwrite().save(
join(path, "inv_user_indexer")
)
indexer.inv_item_indexer.write().overwrite().save(
join(path, "inv_item_indexer")
)
def load_indexer(path: str) -> Indexer:
"""
Load saved indexer from disk
:param path: path to folder
:return: restored Indexer
"""
spark = State().session
args = spark.read.json(join(path, "init_args.json")).first().asDict()
user_type = args["user_type"]
del args["user_type"]
item_type = args["item_type"]
del args["item_type"]
indexer = Indexer(**args)
indexer.user_type = getattr(st, user_type)()
indexer.item_type = getattr(st, item_type)()
indexer.user_indexer = StringIndexerModel.load(join(path, "user_indexer"))
indexer.item_indexer = StringIndexerModel.load(join(path, "item_indexer"))
indexer.inv_user_indexer = IndexToString.load(
join(path, "inv_user_indexer")
)
indexer.inv_item_indexer = IndexToString.load(
join(path, "inv_item_indexer")
)
return indexer
def save_splitter(splitter: Splitter, path: str, overwrite: bool = False):
"""
Save initialized splitter
:param splitter: Initialized splitter
:param path: destination where splitter files will be stored
"""
init_args = splitter._init_args
init_args["_splitter_name"] = str(splitter)
spark = State().session
sc = spark.sparkContext
df = spark.read.json(sc.parallelize([json.dumps(init_args)]))
if overwrite:
df.coalesce(1).write.mode("overwrite").json(
join(path, "init_args.json")
)
else:
df.coalesce(1).write.json(join(path, "init_args.json"))
def load_splitter(path: str) -> Splitter:
"""
Load splitter
:param path: path to folder
:return: restored Splitter
"""
spark = State().session
args = spark.read.json(join(path, "init_args.json")).first().asDict()
name = args["_splitter_name"]
del args["_splitter_name"]
splitter = globals()[name]
return splitter(**args) | /replay_rec-0.11.0-py3-none-any.whl/replay/model_handler.py | 0.822795 | 0.260407 | model_handler.py | pypi |
import pandas as pd
import seaborn as sns
from pyspark.sql import functions as sf
from replay.constants import AnyDataFrame
from replay.utils import convert2spark, get_top_k_recs
def plot_user_dist(
user_dist: pd.DataFrame, window: int = 1, title: str = ""
): # pragma: no cover
"""
Plot mean metric value by the number of user ratings
:param user_dist: output of ``user_distribution`` method for a metric
:param window: the number of closest values to average for smoothing
:param title: plot title
:return: plot object
"""
user_dist["smoothed"] = (
user_dist["value"].rolling(window, center=True).mean()
)
plot = sns.lineplot(x="count", y="smoothed", data=user_dist)
plot.set(
xlabel="# of ratings",
ylabel="smoothed value",
title=title,
xscale="log",
)
return plot
def plot_item_dist(
item_dist: pd.DataFrame, palette: str = "magma", col: str = "rec_count"
): # pragma: no cover
"""
Show the results of ``item_distribution`` method
:param item_dist: ``pd.DataFrame``
:param palette: colour scheme for seaborn
:param col: column to use for a plot
:return: plot
"""
limits = list(range(len(item_dist), 0, -len(item_dist) // 10))[::-1]
values = [(item_dist.iloc[:limit][col]).sum() for limit in limits]
# pylint: disable=too-many-function-args
plot = sns.barplot(
list(range(1, 11)),
values / max(values),
palette=sns.color_palette(palette, 10),
)
plot.set(
xlabel="popularity decentile",
ylabel="proportion",
title="Popularity distribution",
)
return plot
def item_distribution(
log: AnyDataFrame, recommendations: AnyDataFrame, k: int
) -> pd.DataFrame:
"""
Calculate item distribution in ``log`` and ``recommendations``.
:param log: historical DataFrame used to calculate popularity
:param recommendations: model recommendations
:param k: length of a recommendation list
:return: DataFrame with results
"""
log = convert2spark(log)
res = (
log.groupBy("item_idx")
.agg(sf.countDistinct("user_idx").alias("user_count"))
.select("item_idx", "user_count")
)
rec = convert2spark(recommendations)
rec = get_top_k_recs(rec, k)
rec = (
rec.groupBy("item_idx")
.agg(sf.countDistinct("user_idx").alias("rec_count"))
.select("item_idx", "rec_count")
)
res = (
res.join(rec, on="item_idx", how="outer")
.fillna(0)
.orderBy(["user_count", "item_idx"])
.toPandas()
)
return res | /replay_rec-0.11.0-py3-none-any.whl/replay/distributions.py | 0.918322 | 0.598371 | distributions.py | pypi |
import logging
import os
import sys
from math import floor
from typing import Any, Dict, Optional
import psutil
import torch
from pyspark import __version__ as pyspark_version
from pyspark.sql import SparkSession
def get_spark_session(
spark_memory: Optional[int] = None,
shuffle_partitions: Optional[int] = None,
) -> SparkSession:
"""
Get default SparkSession
:param spark_memory: GB of memory allocated for Spark;
70% of RAM by default.
:param shuffle_partitions: number of partitions for Spark; triple CPU count by default
"""
if os.environ.get("SCRIPT_ENV", None) == "cluster":
return SparkSession.builder.getOrCreate()
os.environ["PYSPARK_PYTHON"] = sys.executable
os.environ["PYSPARK_DRIVER_PYTHON"] = sys.executable
if os.environ.get("REPLAY_JAR_PATH"):
path_to_replay_jar = os.environ.get("REPLAY_JAR_PATH")
else:
if pyspark_version.startswith("3.1"):
path_to_replay_jar = "jars/replay_2.12-0.1_spark_3.1.jar"
elif pyspark_version.startswith("3.2") or pyspark_version.startswith(
"3.3"
):
path_to_replay_jar = "jars/replay_2.12-0.1_spark_3.2.jar"
elif pyspark_version.startswith("3.4"):
path_to_replay_jar = "jars/replay_2.12-0.1_spark_3.4.jar"
else:
path_to_replay_jar = "jars/replay_2.12-0.1_spark_3.1.jar"
logging.warning(
"Replay ALS model support only spark 3.1-3.4 versions! "
"Replay will use 'jars/replay_2.12-0.1_spark_3.1.jar' in 'spark.jars' property."
)
if spark_memory is None:
spark_memory = floor(psutil.virtual_memory().total / 1024**3 * 0.7)
if shuffle_partitions is None:
shuffle_partitions = os.cpu_count() * 3
driver_memory = f"{spark_memory}g"
user_home = os.environ["HOME"]
spark = (
SparkSession.builder.config("spark.driver.memory", driver_memory)
.config(
"spark.driver.extraJavaOptions",
"-Dio.netty.tryReflectionSetAccessible=true",
)
.config("spark.jars", path_to_replay_jar)
.config("spark.sql.shuffle.partitions", str(shuffle_partitions))
.config("spark.local.dir", os.path.join(user_home, "tmp"))
.config("spark.driver.maxResultSize", "4g")
.config("spark.driver.bindAddress", "127.0.0.1")
.config("spark.driver.host", "localhost")
.config("spark.sql.execution.arrow.pyspark.enabled", "true")
.config("spark.kryoserializer.buffer.max", "256m")
.master("local[*]")
.enableHiveSupport()
.getOrCreate()
)
return spark
def logger_with_settings() -> logging.Logger:
"""Set up default logging"""
spark_logger = logging.getLogger("py4j")
spark_logger.setLevel(logging.WARN)
logger = logging.getLogger("replay")
formatter = logging.Formatter(
"%(asctime)s, %(name)s, %(levelname)s: %(message)s",
datefmt="%d-%b-%y %H:%M:%S",
)
hdlr = logging.StreamHandler()
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger
# pylint: disable=too-few-public-methods
class Borg:
"""
This class allows to share objects between instances.
"""
_shared_state: Dict[str, Any] = {}
def __init__(self):
self.__dict__ = self._shared_state
# pylint: disable=too-few-public-methods
class State(Borg):
"""
All modules look for Spark session via this class. You can put your own session here.
Other parameters are stored here too: ``default device`` for ``pytorch`` (CPU/CUDA)
"""
def __init__(
self,
session: Optional[SparkSession] = None,
device: Optional[torch.device] = None,
):
Borg.__init__(self)
if not hasattr(self, "logger_set"):
self.logger = logger_with_settings()
self.logger_set = True
if session is None:
if not hasattr(self, "session"):
self.session = get_spark_session()
else:
self.session = session
if device is None:
if not hasattr(self, "device"):
if torch.cuda.is_available():
self.device = torch.device(
f"cuda:{torch.cuda.current_device()}"
)
else:
self.device = torch.device("cpu")
else:
self.device = device | /replay_rec-0.11.0-py3-none-any.whl/replay/session_handler.py | 0.650356 | 0.17252 | session_handler.py | pypi |
from pyspark.ml import Transformer
from pyspark.ml.param import TypeConverters, Params, Param
from pyspark.ml.util import DefaultParamsWritable, DefaultParamsReadable
from pyspark.sql import DataFrame
from replay.model_handler import get_fs
from replay.session_handler import State
class DataframeBucketizer(
Transformer, DefaultParamsWritable, DefaultParamsReadable
):
"""
Buckets the input dataframe, dumps it to spark warehouse directory,
and returns a bucketed dataframe.
"""
bucketingKey = Param(
Params._dummy(),
"bucketingKey",
"bucketing key (also used as sort key)",
typeConverter=TypeConverters.toString,
)
partitionNum = Param(
Params._dummy(),
"partitionNum",
"number of buckets",
typeConverter=TypeConverters.toInt,
)
tableName = Param(
Params._dummy(),
"tableName",
"parquet file name (for storage in 'spark-warehouse') and spark table name",
typeConverter=TypeConverters.toString,
)
sparkWarehouseDir = Param(
Params._dummy(),
"sparkWarehouseDir",
"sparkWarehouseDir",
typeConverter=TypeConverters.toString,
)
def __init__(
self,
bucketing_key: str,
partition_num: int,
spark_warehouse_dir: str,
table_name: str = "",
):
"""Makes bucketed dataframe from input dataframe.
Args:
bucketing_key: bucketing key (also used as sort key)
partition_num: number of buckets
table_name: parquet file name (for storage in 'spark-warehouse') and spark table name
spark_warehouse_dir: spark warehouse dir,
i.e. value of 'spark.sql.warehouse.dir' property
"""
super().__init__()
self.set(self.bucketingKey, bucketing_key)
self.set(self.partitionNum, partition_num)
self.set(self.tableName, table_name)
self.set(self.sparkWarehouseDir, spark_warehouse_dir)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.remove_parquet()
def remove_parquet(self):
"""Removes parquets where bucketed dataset is stored"""
spark = State().session
spark_warehouse_dir = self.getOrDefault(self.sparkWarehouseDir)
table_name = self.getOrDefault(self.tableName)
fs = get_fs(spark) # pylint: disable=invalid-name
fs_path = spark._jvm.org.apache.hadoop.fs.Path(
f"{spark_warehouse_dir}/{table_name}"
)
is_exists = fs.exists(fs_path)
if is_exists:
fs.delete(fs_path, True)
def set_table_name(self, table_name: str):
"""Sets table name"""
self.set(self.tableName, table_name)
def _transform(self, dataset: DataFrame):
bucketing_key = self.getOrDefault(self.bucketingKey)
partition_num = self.getOrDefault(self.partitionNum)
table_name = self.getOrDefault(self.tableName)
spark_warehouse_dir = self.getOrDefault(self.sparkWarehouseDir)
if not table_name:
raise ValueError(
"Parameter 'table_name' is not set! "
"Please set it via method 'set_table_name'."
)
(
dataset.repartition(partition_num, bucketing_key)
.write.mode("overwrite")
.bucketBy(partition_num, bucketing_key)
.sortBy(bucketing_key)
.saveAsTable(
table_name,
format="parquet",
path=f"{spark_warehouse_dir}/{table_name}",
)
)
spark = State().session
return spark.table(table_name) | /replay_rec-0.11.0-py3-none-any.whl/replay/dataframe_bucketizer.py | 0.844697 | 0.423577 | dataframe_bucketizer.py | pypi |
import collections
import pickle
import logging
import os
from typing import Any, Iterable, List, Optional, Set, Tuple, Union
import numpy as np
import pandas as pd
import pyspark.sql.types as st
from numpy.random import default_rng
from pyspark.ml.linalg import DenseVector, Vectors, VectorUDT
from pyspark.sql import SparkSession, Column, DataFrame, Window, functions as sf
from pyspark.sql.column import _to_java_column, _to_seq
from scipy.sparse import csr_matrix
from replay.constants import AnyDataFrame, NumType, REC_SCHEMA
from replay.session_handler import State
# pylint: disable=invalid-name
def convert2spark(data_frame: Optional[AnyDataFrame]) -> Optional[DataFrame]:
"""
Converts Pandas DataFrame to Spark DataFrame
:param data_frame: pandas DataFrame
:return: converted data
"""
if data_frame is None:
return None
if isinstance(data_frame, DataFrame):
return data_frame
spark = State().session
return spark.createDataFrame(data_frame) # type: ignore
def get_distinct_values_in_column(
dataframe: DataFrame, column: str
) -> Set[Any]:
"""
Get unique values from a column as a set.
:param dataframe: spark DataFrame
:param column: column name
:return: set of unique values
"""
return {
row[column] for row in (dataframe.select(column).distinct().collect())
}
def func_get(vector: np.ndarray, i: int) -> float:
"""
helper function for Spark UDF to get element by index
:param vector: Scala vector or numpy array
:param i: index in a vector
:returns: element value
"""
return float(vector[i])
def get_top_k(
dataframe: DataFrame,
partition_by_col: Column,
order_by_col: List[Column],
k: int,
) -> DataFrame:
"""
Return top ``k`` rows for each entity in ``partition_by_col`` ordered by
``order_by_col``.
>>> from replay.session_handler import State
>>> spark = State().session
>>> log = spark.createDataFrame([(1, 2, 1.), (1, 3, 1.), (1, 4, 0.5), (2, 1, 1.)]).toDF("user_id", "item_id", "relevance")
>>> log.show()
+-------+-------+---------+
|user_id|item_id|relevance|
+-------+-------+---------+
| 1| 2| 1.0|
| 1| 3| 1.0|
| 1| 4| 0.5|
| 2| 1| 1.0|
+-------+-------+---------+
<BLANKLINE>
>>> get_top_k(dataframe=log,
... partition_by_col=sf.col('user_id'),
... order_by_col=[sf.col('relevance').desc(), sf.col('item_id').desc()],
... k=1).orderBy('user_id').show()
+-------+-------+---------+
|user_id|item_id|relevance|
+-------+-------+---------+
| 1| 3| 1.0|
| 2| 1| 1.0|
+-------+-------+---------+
<BLANKLINE>
:param dataframe: spark dataframe to filter
:param partition_by_col: spark column to partition by
:param order_by_col: list of spark columns to orted by
:param k: number of first rows for each entity in ``partition_by_col`` to return
:return: filtered spark dataframe
"""
return (
dataframe.withColumn(
"temp_rank",
sf.row_number().over(
Window.partitionBy(partition_by_col).orderBy(*order_by_col)
),
)
.filter(sf.col("temp_rank") <= k)
.drop("temp_rank")
)
def get_top_k_recs(recs: DataFrame, k: int, id_type: str = "idx") -> DataFrame:
"""
Get top k recommendations by `relevance`.
:param recs: recommendations DataFrame
`[user_id, item_id, relevance]`
:param k: length of a recommendation list
:param id_type: id or idx
:return: top k recommendations `[user_id, item_id, relevance]`
"""
return get_top_k(
dataframe=recs,
partition_by_col=sf.col(f"user_{id_type}"),
order_by_col=[sf.col("relevance").desc()],
k=k,
)
@sf.udf(returnType=st.DoubleType())
def vector_dot(one: DenseVector, two: DenseVector) -> float:
"""
dot product of two column vectors
>>> from replay.session_handler import State
>>> from pyspark.ml.linalg import Vectors
>>> spark = State().session
>>> input_data = (
... spark.createDataFrame([(Vectors.dense([1.0, 2.0]), Vectors.dense([3.0, 4.0]))])
... .toDF("one", "two")
... )
>>> input_data.dtypes
[('one', 'vector'), ('two', 'vector')]
>>> input_data.show()
+---------+---------+
| one| two|
+---------+---------+
|[1.0,2.0]|[3.0,4.0]|
+---------+---------+
<BLANKLINE>
>>> output_data = input_data.select(vector_dot("one", "two").alias("dot"))
>>> output_data.schema
StructType(List(StructField(dot,DoubleType,true)))
>>> output_data.show()
+----+
| dot|
+----+
|11.0|
+----+
<BLANKLINE>
:param one: vector one
:param two: vector two
:returns: dot product
"""
return float(one.dot(two))
@sf.udf(returnType=VectorUDT()) # type: ignore
def vector_mult(
one: Union[DenseVector, NumType], two: DenseVector
) -> DenseVector:
"""
elementwise vector multiplication
>>> from replay.session_handler import State
>>> from pyspark.ml.linalg import Vectors
>>> spark = State().session
>>> input_data = (
... spark.createDataFrame([(Vectors.dense([1.0, 2.0]), Vectors.dense([3.0, 4.0]))])
... .toDF("one", "two")
... )
>>> input_data.dtypes
[('one', 'vector'), ('two', 'vector')]
>>> input_data.show()
+---------+---------+
| one| two|
+---------+---------+
|[1.0,2.0]|[3.0,4.0]|
+---------+---------+
<BLANKLINE>
>>> output_data = input_data.select(vector_mult("one", "two").alias("mult"))
>>> output_data.schema
StructType(List(StructField(mult,VectorUDT,true)))
>>> output_data.show()
+---------+
| mult|
+---------+
|[3.0,8.0]|
+---------+
<BLANKLINE>
:param one: vector one
:param two: vector two
:returns: result
"""
return one * two
def multiply_scala_udf(scalar, vector):
"""
Multiplies a scalar by a vector
:param scalar: column with scalars
:param vector: column with vectors
:return: column expression
"""
sc = SparkSession.getActiveSession().sparkContext
_f = sc._jvm.org.apache.spark.replay.utils.ScalaPySparkUDFs.multiplyUDF()
return Column(_f.apply(_to_seq(sc, [scalar, vector], _to_java_column)))
@sf.udf(returnType=st.ArrayType(st.DoubleType()))
def array_mult(first: st.ArrayType, second: st.ArrayType):
"""
elementwise array multiplication
>>> from replay.session_handler import State
>>> spark = State().session
>>> input_data = (
... spark.createDataFrame([([1.0, 2.0], [3.0, 4.0])])
... .toDF("one", "two")
... )
>>> input_data.dtypes
[('one', 'array<double>'), ('two', 'array<double>')]
>>> input_data.show()
+----------+----------+
| one| two|
+----------+----------+
|[1.0, 2.0]|[3.0, 4.0]|
+----------+----------+
<BLANKLINE>
>>> output_data = input_data.select(array_mult("one", "two").alias("mult"))
>>> output_data.schema
StructType(List(StructField(mult,ArrayType(DoubleType,true),true)))
>>> output_data.show()
+----------+
| mult|
+----------+
|[3.0, 8.0]|
+----------+
<BLANKLINE>
:param first: first array
:param second: second array
:returns: result
"""
return [first[i] * second[i] for i in range(len(first))]
def get_log_info(
log: DataFrame, user_col="user_idx", item_col="item_idx"
) -> str:
"""
Basic log statistics
>>> from replay.session_handler import State
>>> spark = State().session
>>> log = spark.createDataFrame([(1, 2), (3, 4), (5, 2)]).toDF("user_idx", "item_idx")
>>> log.show()
+--------+--------+
|user_idx|item_idx|
+--------+--------+
| 1| 2|
| 3| 4|
| 5| 2|
+--------+--------+
<BLANKLINE>
>>> get_log_info(log)
'total lines: 3, total users: 3, total items: 2'
:param log: interaction log containing ``user_idx`` and ``item_idx``
:param user_col: name of a columns containing users' identificators
:param item_col: name of a columns containing items' identificators
:returns: statistics string
"""
cnt = log.count()
user_cnt = log.select(user_col).distinct().count()
item_cnt = log.select(item_col).distinct().count()
return ", ".join(
[
f"total lines: {cnt}",
f"total users: {user_cnt}",
f"total items: {item_cnt}",
]
)
def get_stats(
log: DataFrame, group_by: str = "user_id", target_column: str = "relevance"
) -> DataFrame:
"""
Calculate log statistics: min, max, mean, median ratings, number of ratings.
>>> from replay.session_handler import get_spark_session, State
>>> spark = get_spark_session(1, 1)
>>> test_df = (spark.
... createDataFrame([(1, 2, 1), (1, 3, 3), (1, 1, 2), (2, 3, 2)])
... .toDF("user_id", "item_id", "rel")
... )
>>> get_stats(test_df, target_column='rel').show()
+-------+--------+-------+-------+---------+----------+
|user_id|mean_rel|max_rel|min_rel|count_rel|median_rel|
+-------+--------+-------+-------+---------+----------+
| 1| 2.0| 3| 1| 3| 2|
| 2| 2.0| 2| 2| 1| 2|
+-------+--------+-------+-------+---------+----------+
<BLANKLINE>
>>> get_stats(test_df, group_by='item_id', target_column='rel').show()
+-------+--------+-------+-------+---------+----------+
|item_id|mean_rel|max_rel|min_rel|count_rel|median_rel|
+-------+--------+-------+-------+---------+----------+
| 2| 1.0| 1| 1| 1| 1|
| 3| 2.5| 3| 2| 2| 2|
| 1| 2.0| 2| 2| 1| 2|
+-------+--------+-------+-------+---------+----------+
<BLANKLINE>
:param log: spark DataFrame with ``user_id``, ``item_id`` and ``relevance`` columns
:param group_by: column to group data by, ``user_id`` or ``item_id``
:param target_column: column with interaction ratings
:return: spark DataFrame with statistics
"""
agg_functions = {
"mean": sf.avg,
"max": sf.max,
"min": sf.min,
"count": sf.count,
}
agg_functions_list = [
func(target_column).alias(str(name + "_" + target_column))
for name, func in agg_functions.items()
]
agg_functions_list.append(
sf.expr(f"percentile_approx({target_column}, 0.5)").alias(
"median_" + target_column
)
)
return log.groupBy(group_by).agg(*agg_functions_list)
def check_numeric(feature_table: DataFrame) -> None:
"""
Check if spark DataFrame columns are of NumericType
:param feature_table: spark DataFrame
"""
for column in feature_table.columns:
if not isinstance(
feature_table.schema[column].dataType, st.NumericType
):
raise ValueError(
f"""Column {column} has type {feature_table.schema[
column].dataType}, that is not numeric."""
)
def to_csr(
log: DataFrame,
user_count: Optional[int] = None,
item_count: Optional[int] = None,
) -> csr_matrix:
"""
Convert DataFrame to csr matrix
>>> import pandas as pd
>>> from replay.utils import convert2spark
>>> data_frame = pd.DataFrame({"user_idx": [0, 1], "item_idx": [0, 2], "relevance": [1, 2]})
>>> data_frame = convert2spark(data_frame)
>>> m = to_csr(data_frame)
>>> m.toarray()
array([[1, 0, 0],
[0, 0, 2]])
:param log: interaction log with ``user_idx``, ``item_idx`` and
``relevance`` columns
:param user_count: number of rows in resulting matrix
:param item_count: number of columns in resulting matrix
"""
pandas_df = log.select("user_idx", "item_idx", "relevance").toPandas()
if pandas_df.empty:
return csr_matrix(
(
[],
([],[]),
),
shape=(0, 0),
)
row_count = int(
user_count
if user_count is not None
else pandas_df["user_idx"].max() + 1
)
col_count = int(
item_count
if item_count is not None
else pandas_df["item_idx"].max() + 1
)
return csr_matrix(
(
pandas_df["relevance"],
(pandas_df["user_idx"], pandas_df["item_idx"]),
),
shape=(row_count, col_count),
)
def horizontal_explode(
data_frame: DataFrame,
column_to_explode: str,
prefix: str,
other_columns: List[Column],
) -> DataFrame:
"""
Transform a column with an array of values into separate columns.
Each array must contain the same amount of values.
>>> from replay.session_handler import State
>>> spark = State().session
>>> input_data = (
... spark.createDataFrame([(5, [1.0, 2.0]), (6, [3.0, 4.0])])
... .toDF("id_col", "array_col")
... )
>>> input_data.show()
+------+----------+
|id_col| array_col|
+------+----------+
| 5|[1.0, 2.0]|
| 6|[3.0, 4.0]|
+------+----------+
<BLANKLINE>
>>> horizontal_explode(input_data, "array_col", "element", [sf.col("id_col")]).show()
+------+---------+---------+
|id_col|element_0|element_1|
+------+---------+---------+
| 5| 1.0| 2.0|
| 6| 3.0| 4.0|
+------+---------+---------+
<BLANKLINE>
:param data_frame: input DataFrame
:param column_to_explode: column with type ``array``
:param prefix: prefix used for new columns, suffix is an integer
:param other_columns: columns to select beside newly created
:returns: DataFrame with elements from ``column_to_explode``
"""
num_columns = len(data_frame.select(column_to_explode).head()[0])
return data_frame.select(
*other_columns,
*[
sf.element_at(column_to_explode, i + 1).alias(f"{prefix}_{i}")
for i in range(num_columns)
],
)
def join_or_return(first, second, on, how):
"""
Safe wrapper for join of two DataFrames if ``second`` parameter is None it returns ``first``.
:param first: Spark DataFrame
:param second: Spark DataFrame
:param on: name of the join column
:param how: type of join
:return: Spark DataFrame
"""
if second is None:
return first
return first.join(second, on=on, how=how)
def fallback(
base: DataFrame, fill: DataFrame, k: int, id_type: str = "idx"
) -> DataFrame:
"""
Fill missing recommendations for users that have less than ``k`` recommended items.
Score values for the fallback model may be decreased to preserve sorting.
:param base: base recommendations that need to be completed
:param fill: extra recommendations
:param k: desired recommendation list lengths for each user
:param id_type: id or idx
:return: augmented recommendations
"""
if fill is None:
return base
if base.count() == 0:
return get_top_k_recs(fill, k, id_type)
margin = 0.1
min_in_base = base.agg({"relevance": "min"}).collect()[0][0]
max_in_fill = fill.agg({"relevance": "max"}).collect()[0][0]
diff = max_in_fill - min_in_base
fill = fill.withColumnRenamed("relevance", "relevance_fallback")
if diff >= 0:
fill = fill.withColumn(
"relevance_fallback", sf.col("relevance_fallback") - diff - margin
)
recs = base.join(
fill, on=["user_" + id_type, "item_" + id_type], how="full_outer"
)
recs = recs.withColumn(
"relevance", sf.coalesce("relevance", "relevance_fallback")
).select("user_" + id_type, "item_" + id_type, "relevance")
recs = get_top_k_recs(recs, k, id_type)
return recs
def cache_if_exists(dataframe: Optional[DataFrame]) -> Optional[DataFrame]:
"""
Cache a DataFrame
:param dataframe: Spark DataFrame or None
:return: DataFrame or None
"""
if dataframe is not None:
return dataframe.cache()
return dataframe
def unpersist_if_exists(dataframe: Optional[DataFrame]) -> None:
"""
:param dataframe: DataFrame or None
"""
if dataframe is not None and dataframe.is_cached:
dataframe.unpersist()
def join_with_col_renaming(
left: DataFrame,
right: DataFrame,
on_col_name: Union[str, List],
how: str = "inner",
suffix="join",
) -> DataFrame:
"""
There is a bug in some Spark versions (e.g. 3.0.2), which causes errors
in joins of DataFrames derived form the same DataFrame on the columns with the same name:
https://issues.apache.org/jira/browse/SPARK-14948
https://issues.apache.org/jira/browse/SPARK-36815.
The function renames columns stated in `on_col_name` in one dataframe,
performs join and removes renamed columns.
:param left: left-side dataframe
:param right: right-side dataframe
:param on_col_name: column names to join on
:param how: join type
:param suffix: suffix added to `on_col_name` values to name temporary column
:return: join result
"""
if isinstance(on_col_name, str):
on_col_name = [on_col_name]
on_condition = sf.lit(True)
for name in on_col_name:
if how == "right":
left = left.withColumnRenamed(name, f"{name}_{suffix}")
else:
right = right.withColumnRenamed(name, f"{name}_{suffix}")
on_condition &= sf.col(name) == sf.col(f"{name}_{suffix}")
return (left.join(right, on=on_condition, how=how)).drop(
*[f"{name}_{suffix}" for name in on_col_name]
)
def add_to_date(
dataframe: DataFrame,
column_name: str,
base_date: str,
base_date_format: Optional[str] = None,
) -> DataFrame:
"""
Get user or item features from replay model.
If a model can return both user and item embeddings,
elementwise multiplication can be performed too.
If a model can't return embedding for specific user/item, zero vector is returned.
Treats column ``column_name`` as a number of days after the ``base_date``.
Converts ``column_name`` to TimestampType with
``base_date`` + values of the ``column_name``.
>>> from replay.session_handler import State
>>> from pyspark.sql.types import IntegerType
>>> spark = State().session
>>> input_data = (
... spark.createDataFrame([5, 6], IntegerType())
... .toDF("days")
... )
>>> input_data.show()
+----+
|days|
+----+
| 5|
| 6|
+----+
<BLANKLINE>
>>> add_to_date(input_data, 'days', '2021/09/01', 'yyyy/MM/dd').show()
+-------------------+
| days|
+-------------------+
|2021-09-06 00:00:00|
|2021-09-07 00:00:00|
+-------------------+
<BLANKLINE>
:param dataframe: spark dataframe
:param column_name: name of a column with numbers
to add to the ``base_date``
:param base_date: str with the date to add to
:param base_date_format: base date pattern to parse
:return: dataframe with new ``column_name`` converted to TimestampType
"""
dataframe = (
dataframe.withColumn(
"tmp", sf.to_timestamp(sf.lit(base_date), format=base_date_format)
)
.withColumn(
column_name,
sf.to_timestamp(sf.expr(f"date_add(tmp, {column_name})")),
)
.drop("tmp")
)
return dataframe
def process_timestamp_column(
dataframe: DataFrame,
column_name: str,
date_format: Optional[str] = None,
) -> DataFrame:
"""
Convert ``column_name`` column of numeric/string/timestamp type
to TimestampType.
Return original ``dataframe`` if the column has TimestampType.
Treats numbers as unix timestamp, treats strings as
a string representation of dates in ``date_format``.
Date format is inferred by pyspark if not defined by ``date_format``.
:param dataframe: spark dataframe
:param column_name: name of ``dataframe`` column to convert
:param date_format: datetime pattern passed to
``to_timestamp`` pyspark sql function
:return: dataframe with updated column ``column_name``
"""
if column_name not in dataframe.columns:
raise ValueError(f"Column {column_name} not found")
# no conversion needed
if isinstance(dataframe.schema[column_name].dataType, st.TimestampType):
return dataframe
# unix timestamp
if isinstance(dataframe.schema[column_name].dataType, st.NumericType):
return dataframe.withColumn(
column_name, sf.to_timestamp(sf.from_unixtime(sf.col(column_name)))
)
# datetime in string format
dataframe = dataframe.withColumn(
column_name,
sf.to_timestamp(sf.col(column_name), format=date_format),
)
return dataframe
@sf.udf(returnType=VectorUDT())
def list_to_vector_udf(array: st.ArrayType) -> DenseVector:
"""
convert spark array to vector
:param array: spark Array to convert
:return: spark DenseVector
"""
return Vectors.dense(array)
@sf.udf(returnType=st.FloatType())
def vector_squared_distance(first: DenseVector, second: DenseVector) -> float:
"""
:param first: first vector
:param second: second vector
:returns: squared distance value
"""
return float(first.squared_distance(second))
@sf.udf(returnType=st.FloatType())
def vector_euclidean_distance_similarity(
first: DenseVector, second: DenseVector
) -> float:
"""
:param first: first vector
:param second: second vector
:returns: 1/(1 + euclidean distance value)
"""
return 1 / (1 + float(first.squared_distance(second)) ** 0.5)
@sf.udf(returnType=st.FloatType())
def cosine_similarity(first: DenseVector, second: DenseVector) -> float:
"""
:param first: first vector
:param second: second vector
:returns: cosine similarity value
"""
num = first.dot(second)
denom = first.dot(first) ** 0.5 * second.dot(second) ** 0.5
return float(num / denom)
def cache_temp_view(df: DataFrame, name: str) -> None:
"""
Create Spark SQL temporary view with `name` and cache it
"""
spark = State().session
df.createOrReplaceTempView(name)
spark.sql(f"cache table {name}")
def drop_temp_view(temp_view_name: str) -> None:
"""
Uncache and drop Spark SQL temporary view
"""
spark = State().session
spark.catalog.dropTempView(temp_view_name)
def sample_top_k_recs(pairs: DataFrame, k: int, seed: int = None):
"""
Sample k items for each user with probability proportional to the relevance score.
Motivation: sometimes we have a pre-defined list of items for each user
and could use `predict_pairs` method of RePlay models to score them.
After that we could select top K most relevant items for each user
with `replay.utils.get_top_k_recs` or sample them with
probabilities proportional to their relevance score
with `replay.utils.sample_top_k_recs` to get more diverse recommendations.
:param pairs: spark dataframe with columns ``[user_idx, item_idx, relevance]``
:param k: number of items for each user to return
:param seed: random seed
:return: spark dataframe with columns ``[user_idx, item_idx, relevance]``
"""
pairs = pairs.withColumn(
"probability",
sf.col("relevance")
/ sf.sum("relevance").over(Window.partitionBy("user_idx")),
)
def grouped_map(pandas_df: pd.DataFrame) -> pd.DataFrame:
user_idx = pandas_df["user_idx"][0]
if seed is not None:
local_rng = default_rng(seed + user_idx)
else:
local_rng = default_rng()
items_positions = local_rng.choice(
np.arange(pandas_df.shape[0]),
size=min(k, pandas_df.shape[0]),
p=pandas_df["probability"].values,
replace=False,
)
return pd.DataFrame(
{
"user_idx": k * [user_idx],
"item_idx": pandas_df["item_idx"].values[items_positions],
"relevance": pandas_df["relevance"].values[items_positions],
}
)
recs = pairs.groupby("user_idx").applyInPandas(grouped_map, REC_SCHEMA)
return recs
def filter_cold(
df: Optional[DataFrame],
warm_df: DataFrame,
col_name: str,
) -> Tuple[int, Optional[DataFrame]]:
"""
Filter out new user/item ids absent in `warm_df`.
Return number of new users/items and filtered dataframe.
:param df: spark dataframe with columns ``[`col_name`, ...]``
:param warm_df: spark dataframe with column ``[`col_name`]``,
containing ids of `warm` users/items
:param col_name: name of a column
:return: filtered spark dataframe columns ``[`col_name`, ...]``
"""
if df is None:
return 0, df
num_cold = (
df.select(col_name)
.distinct()
.join(warm_df, on=col_name, how="anti")
.count()
)
if num_cold == 0:
return 0, df
return num_cold, df.join(
warm_df.select(col_name), on=col_name, how="inner"
)
def get_unique_entities(
df: Union[Iterable, DataFrame],
column: str,
) -> DataFrame:
"""
Get unique values from ``df`` and put them into dataframe with column ``column``.
:param df: spark dataframe with ``column`` or python iterable
:param column: name of a column
:return: spark dataframe with column ``[`column`]``
"""
spark = State().session
if isinstance(df, DataFrame):
unique = df.select(column).distinct()
elif isinstance(df, collections.abc.Iterable):
unique = spark.createDataFrame(
data=pd.DataFrame(pd.unique(list(df)), columns=[column])
)
else:
raise ValueError(f"Wrong type {type(df)}")
return unique
def return_recs(
recs: DataFrame, recs_file_path: Optional[str] = None
) -> Optional[DataFrame]:
"""
Save dataframe `recs` to `recs_file_path` if presents otherwise cache
and materialize the dataframe.
:param recs: dataframe with recommendations
:param recs_file_path: absolute path to save recommendations as a parquet file.
:return: cached and materialized dataframe `recs` if `recs_file_path` is provided otherwise None
"""
if recs_file_path is None:
output = recs.cache()
output.count()
return output
recs.write.parquet(path=recs_file_path, mode="overwrite")
return None
def save_picklable_to_parquet(obj: Any, path: str) -> None:
"""
Function dumps object to disk or hdfs in parquet format.
:param obj: object to be saved
:param path: path to dump
:return:
"""
sc = State().session.sparkContext
# We can use `RDD.saveAsPickleFile`, but it has no "overwrite" parameter
pickled_instance = pickle.dumps(obj)
Record = collections.namedtuple("Record", ["data"])
rdd = sc.parallelize([Record(pickled_instance)])
instance_df = rdd.map(lambda rec: Record(bytearray(rec.data))).toDF()
instance_df.write.mode("overwrite").parquet(path)
def load_pickled_from_parquet(path: str) -> Any:
"""
Function loads object from disk or hdfs,
what was dumped via `save_picklable_to_parquet` function.
:param path: source path
:return: unpickled object
"""
spark = State().session
df = spark.read.parquet(path)
pickled_instance = df.rdd.map(lambda row: bytes(row.data)).first()
return pickle.loads(pickled_instance)
def assert_omp_single_thread():
"""
Check that OMP_NUM_THREADS is set to 1 and warn if not.
PyTorch uses multithreading for cpu math operations via OpenMP library. Sometimes this
leads to failures when OpenMP multithreading is mixed with multiprocessing.
"""
omp_num_threads = os.environ.get('OMP_NUM_THREADS', None)
if omp_num_threads != '1':
logging.getLogger("replay").warning(
'Environment variable "OMP_NUM_THREADS" is set to "%s". '
'Set it to 1 if the working process freezes.', omp_num_threads
) | /replay_rec-0.11.0-py3-none-any.whl/replay/utils.py | 0.876489 | 0.653168 | utils.py | pypi |
import collections
import logging
from functools import partial
from typing import Any, Dict, List, Optional, Callable, Union
from optuna import Trial
from pyspark.sql import functions as sf
from replay.metrics.base_metric import Metric
SplitData = collections.namedtuple(
"SplitData",
"train test users items user_features_train "
"user_features_test item_features_train item_features_test",
)
# pylint: disable=too-few-public-methods
class ObjectiveWrapper:
"""
This class is implemented according to
`instruction <https://optuna.readthedocs.io/en/stable/faq.html#how-to-define-objective-functions-that-have-own-arguments>`_
on integration with ``optuna``.
Criterion is calculated with ``__call__``,
other arguments are passed into ``__init__``.
"""
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(
self, objective_calculator: Callable[..., float], **kwargs: Any
):
self.objective_calculator = objective_calculator
self.kwargs = kwargs
def __call__(self, trial: Trial) -> float:
"""
Calculate criterion for ``optuna``.
:param trial: current trial
:return: criterion value
"""
return self.objective_calculator(trial=trial, **self.kwargs)
def suggest_params(
trial: Trial, search_space: Dict[str, Dict[str, Union[str, List[Any]]]],
) -> Dict[str, Any]:
"""
This function suggests params to try.
:param trial: optuna trial
:param search_space: hyper parameters and their bounds
:return: dict with parameter values
"""
suggest_dict = {
"uniform": trial.suggest_uniform,
"int": trial.suggest_int,
"loguniform": trial.suggest_loguniform,
"loguniform_int": partial(trial.suggest_int, log=True),
}
res = {}
for param in search_space:
border = search_space[param]["args"]
param_type = search_space[param]["type"]
if param_type == "categorical":
res[param] = trial.suggest_categorical(param, border)
else:
low, high = border
suggest_fn = suggest_dict[param_type]
res[param] = suggest_fn(param, low=low, high=high)
return res
def eval_quality(
split_data: SplitData, recommender, criterion: Metric, k: int,
) -> float:
"""
Calculate criterion value for given parameters
:param split_data: data to train and test model
:param recommender: recommender model
:param criterion: optimization metric
:param k: length of a recommendation list
:return: criterion value
"""
logger = logging.getLogger("replay")
logger.debug("Fitting model inside optimization")
# pylint: disable=protected-access
recommender._fit_wrap(
split_data.train,
split_data.user_features_train,
split_data.item_features_train,
)
logger.debug("Predicting inside optimization")
recs = recommender._predict_wrap(
log=split_data.train,
k=k,
users=split_data.users,
items=split_data.items,
user_features=split_data.user_features_test,
item_features=split_data.item_features_test,
)
logger.debug("Calculating criterion")
criterion_value = criterion(recs, split_data.test, k)
logger.debug("%s=%.6f", criterion, criterion_value)
return criterion_value
# pylint: disable=too-many-arguments
def scenario_objective_calculator(
trial: Trial,
search_space: Dict[str, List[Optional[Any]]],
split_data: SplitData,
recommender,
criterion: Metric,
k: int,
) -> float:
"""
Sample parameters and calculate criterion value
:param trial: optuna trial
:param search_space: hyper parameter search space
:param split_data: data to train and test model
:param recommender: recommender model
:param criterion: optimization metric
:param k: length of a recommendation list
:return: criterion value
"""
params_for_trial = suggest_params(trial, search_space)
recommender.set_params(**params_for_trial)
return eval_quality(split_data, recommender, criterion, k)
MainObjective = partial(
ObjectiveWrapper, objective_calculator=scenario_objective_calculator
)
# pylint: disable=too-few-public-methods
class ItemKNNObjective:
"""
This class is implemented according to
`instruction <https://optuna.readthedocs.io/en/stable/faq.html#how-to-define-objective-functions-that-have-own-arguments>`_
on integration with ``optuna``.
Criterion is calculated with ``__call__``,
other arguments are passed into ``__init__``.
"""
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, **kwargs: Any):
self.kwargs = kwargs
max_neighbours = self.kwargs["search_space"]["num_neighbours"]["args"][
1
]
model = self.kwargs["recommender"]
split_data = self.kwargs["split_data"]
train = split_data.train
model.num_neighbours = max_neighbours
df = train.select("user_idx", "item_idx", "relevance")
if not model.use_relevance:
df = df.withColumn("relevance", sf.lit(1))
self.dot_products = model._get_products(df).cache()
def objective_calculator(
self,
trial: Trial,
search_space: Dict[str, List[Optional[Any]]],
split_data: SplitData,
recommender,
criterion: Metric,
k: int,
) -> float:
"""
Sample parameters and calculate criterion value
:param trial: optuna trial
:param search_space: hyper parameter search space
:param split_data: data to train and test model
:param recommender: recommender model
:param criterion: optimization metric
:param k: length of a recommendation list
:return: criterion value
"""
params_for_trial = suggest_params(trial, search_space)
recommender.set_params(**params_for_trial)
recommender.fit_users = split_data.train.select("user_idx").distinct()
recommender.fit_items = split_data.train.select("item_idx").distinct()
similarity = recommender._shrink(self.dot_products, recommender.shrink)
recommender.similarity = recommender._get_k_most_similar(
similarity
).cache()
recs = recommender._predict_wrap(
log=split_data.train,
k=k,
users=split_data.users,
items=split_data.items,
user_features=split_data.user_features_test,
item_features=split_data.item_features_test,
)
logger = logging.getLogger("replay")
logger.debug("Calculating criterion")
criterion_value = criterion(recs, split_data.test, k)
logger.debug("%s=%.6f", criterion, criterion_value)
return criterion_value
def __call__(self, trial: Trial) -> float:
"""
Calculate criterion for ``optuna``.
:param trial: current trial
:return: criterion value
"""
return self.objective_calculator(trial=trial, **self.kwargs) | /replay_rec-0.11.0-py3-none-any.whl/replay/optuna_objective.py | 0.90592 | 0.288134 | optuna_objective.py | pypi |
from abc import abstractmethod
from typing import Optional, Union, Iterable, Dict, List, Any, Tuple
from pyspark.sql import DataFrame
from replay.constants import AnyDataFrame
from replay.filters import filter_by_min_count
from replay.metrics import Metric, NDCG
from replay.models.base_rec import BaseRecommender
from replay.utils import convert2spark, get_unique_entities
class BaseScenario(BaseRecommender):
"""Base scenario class"""
can_predict_cold_users: bool = False
def __init__(self, cold_model, threshold=5):
self.threshold = threshold
self.cold_model = cold_model
self.hot_users = None
# TO DO: add save/load for scenarios
@property
def _init_args(self):
return {"threshold": self.threshold}
def fit(
self,
log: AnyDataFrame,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
) -> None:
"""
:param log: input DataFrame ``[user_id, item_id, timestamp, relevance]``
:param user_features: user features ``[user_id, timestamp]`` + feature columns
:param item_features: item features ``[item_id, timestamp]`` + feature columns
:return:
"""
hot_data = filter_by_min_count(log, self.threshold, "user_idx")
self.hot_users = hot_data.select("user_idx").distinct()
self._fit_wrap(hot_data, user_features, item_features)
self.cold_model._fit_wrap(log, user_features, item_features)
# pylint: disable=too-many-arguments
def predict(
self,
log: AnyDataFrame,
k: int,
users: Optional[Union[AnyDataFrame, Iterable]] = None,
items: Optional[Union[AnyDataFrame, Iterable]] = None,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
"""
Get recommendations
:param log: historical log of interactions
``[user_id, item_id, timestamp, relevance]``
:param k: length of recommendation lists, should be less that the total number of ``items``
:param users: users to create recommendations for
dataframe containing ``[user_id]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_id]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be``0``.
:param user_features: user features
``[user_id , timestamp]`` + feature columns
:param item_features: item features
``[item_id , timestamp]`` + feature columns
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:return: recommendation dataframe
``[user_id, item_id, relevance]``
"""
log = convert2spark(log)
users = users or log or user_features or self.fit_users
users = get_unique_entities(users, "user_idx")
hot_data = filter_by_min_count(log, self.threshold, "user_idx")
hot_users = hot_data.select("user_idx").distinct()
if not self.can_predict_cold_users:
hot_users = hot_users.join(self.hot_users)
hot_users = hot_users.join(users, on="user_idx", how="inner")
hot_pred = self._predict_wrap(
log=hot_data,
k=k,
users=hot_users,
items=items,
user_features=user_features,
item_features=item_features,
filter_seen_items=filter_seen_items,
)
if log is not None:
cold_data = log.join(self.hot_users, how="anti", on="user_idx")
else:
cold_data = None
cold_users = users.join(self.hot_users, how="anti", on="user_idx")
cold_pred = self.cold_model._predict_wrap(
log=cold_data,
k=k,
users=cold_users,
items=items,
user_features=user_features,
item_features=item_features,
filter_seen_items=filter_seen_items,
)
return hot_pred.union(cold_pred)
def fit_predict(
self,
log: AnyDataFrame,
k: int,
users: Optional[Union[AnyDataFrame, Iterable]] = None,
items: Optional[Union[AnyDataFrame, Iterable]] = None,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
"""
Train and get recommendations
:param log: historical log of interactions
``[user_id, item_id, timestamp, relevance]``
:param k: length of recommendation lists, should be less that the total number of ``items``
:param users: users to create recommendations for
dataframe containing ``[user_id]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_id]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be``0``.
:param user_features: user features
``[user_id , timestamp]`` + feature columns
:param item_features: item features
``[item_id , timestamp]`` + feature columns
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:return: recommendation dataframe
``[user_id, item_id, relevance]``
"""
self.fit(log, user_features, item_features)
return self.predict(
log,
k,
users,
items,
user_features,
item_features,
filter_seen_items,
)
# pylint: disable=too-many-arguments, too-many-locals
def optimize(
self,
train: AnyDataFrame,
test: AnyDataFrame,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
param_borders: Optional[Dict[str, Dict[str, List[Any]]]] = None,
criterion: Metric = NDCG(),
k: int = 10,
budget: int = 10,
new_study: bool = True,
) -> Tuple[Dict[str, Any]]:
"""
Searches best parameters with optuna.
:param train: train data
:param test: test data
:param user_features: user features
:param item_features: item features
:param param_borders: a dictionary with search grid, where
key is the parameter name and value is
the range of possible values``{param: [low, high]}``.
:param criterion: metric to use for optimization
:param k: recommendation list length
:param budget: number of points to try
:param new_study: keep searching with previous study or start a new study
:return: dictionary with best parameters
"""
if param_borders is None:
param_borders = {"main": None, "cold": None}
self.logger.info("Optimizing main model...")
params = self._optimize(
train,
test,
user_features,
item_features,
param_borders["main"],
criterion,
k,
budget,
new_study,
)
if not isinstance(params, tuple):
self.set_params(**params)
if self.cold_model._search_space is not None:
self.logger.info("Optimizing cold model...")
cold_params = self.cold_model._optimize(
train,
test,
user_features,
item_features,
param_borders["cold"],
criterion,
k,
budget,
new_study,
)
if not isinstance(cold_params, tuple):
self.cold_model.set_params(**cold_params)
else:
cold_params = None
return params, cold_params
@abstractmethod
def _optimize(
self,
train: AnyDataFrame,
test: AnyDataFrame,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
param_borders: Optional[Dict[str, Dict[str, List[Any]]]] = None,
criterion: Metric = NDCG(),
k: int = 10,
budget: int = 10,
new_study: bool = True,
):
pass | /replay_rec-0.11.0-py3-none-any.whl/replay/scenarios/basescenario.py | 0.935013 | 0.294245 | basescenario.py | pypi |
from typing import Optional, Dict, List, Any, Tuple, Union, Iterable
from pyspark.sql import DataFrame
from replay.constants import AnyDataFrame
from replay.filters import filter_by_min_count
from replay.metrics import Metric, NDCG
from replay.models import PopRec
from replay.models.base_rec import BaseRecommender
from replay.utils import fallback, get_unique_entities
class Fallback(BaseRecommender):
"""Fill missing recommendations using fallback model.
Behaves like a recommender and have the same interface."""
can_predict_cold_users: bool = True
def __init__(
self,
main_model: BaseRecommender,
fallback_model: BaseRecommender = PopRec(),
threshold: int = 0,
):
"""Create recommendations with `main_model`, and fill missing with `fallback_model`.
`relevance` of fallback_model will be decrease to keep main recommendations on top.
:param main_model: initialized model
:param fallback_model: initialized model
:param threshold: number of interactions by which users are divided into cold and hot
"""
self.threshold = threshold
self.hot_users = None
self.main_model = main_model
# pylint: disable=invalid-name
self.fb_model = fallback_model
# TO DO: add save/load for scenarios
@property
def _init_args(self):
return {"threshold": self.threshold}
def __str__(self):
return f"Fallback_{str(self.main_model)}_{str(self.fb_model)}"
def fit(
self,
log: AnyDataFrame,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
) -> None:
"""
:param log: input DataFrame ``[user_id, item_id, timestamp, relevance]``
:param user_features: user features ``[user_id, timestamp]`` + feature columns
:param item_features: item features ``[item_id, timestamp]`` + feature columns
:return:
"""
hot_data = filter_by_min_count(log, self.threshold, "user_idx")
self.hot_users = hot_data.select("user_idx").distinct()
self._fit_wrap(hot_data, user_features, item_features)
self.fb_model._fit_wrap(log, user_features, item_features)
# pylint: disable=too-many-arguments
def predict(
self,
log: DataFrame,
k: int,
users: Optional[Union[DataFrame, Iterable]] = None,
items: Optional[Union[DataFrame, Iterable]] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
"""
Get recommendations
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param k: length of recommendation lists, should be less that the total number of ``items``
:param users: users to create recommendations for
dataframe containing ``[user_idx]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_idx]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be``0``.
:param user_features: user features
``[user_idx , timestamp]`` + feature columns
:param item_features: item features
``[item_idx , timestamp]`` + feature columns
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:return: recommendation dataframe
``[user_idx, item_idx, relevance]``
"""
users = users or log or user_features or self.fit_users
users = get_unique_entities(users, "user_idx")
hot_data = filter_by_min_count(log, self.threshold, "user_idx")
hot_users = hot_data.select("user_idx").distinct()
hot_users = hot_users.join(self.hot_users, on="user_idx")
hot_users = hot_users.join(users, on="user_idx", how="inner")
hot_pred = self._predict_wrap(
log=hot_data,
k=k,
users=hot_users,
items=items,
user_features=user_features,
item_features=item_features,
filter_seen_items=filter_seen_items,
)
cold_pred = self.fb_model._predict_wrap(
log=log,
k=k,
users=users,
items=items,
user_features=user_features,
item_features=item_features,
filter_seen_items=filter_seen_items,
)
pred = fallback(hot_pred, cold_pred, k)
return pred
# pylint: disable=too-many-arguments, too-many-locals
def optimize(
self,
train: AnyDataFrame,
test: AnyDataFrame,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
param_borders: Optional[Dict[str, Dict[str, List[Any]]]] = None,
criterion: Metric = NDCG(),
k: int = 10,
budget: int = 10,
new_study: bool = True,
) -> Tuple[Dict[str, Any]]:
"""
Searches best parameters with optuna.
:param train: train data
:param test: test data
:param user_features: user features
:param item_features: item features
:param param_borders: a dictionary with keys main and
fallback containing dictionaries with search grid, where
key is the parameter name and value is the range of possible values
``{param: [low, high]}``.
:param criterion: metric to use for optimization
:param k: recommendation list length
:param budget: number of points to try
:param new_study: keep searching with previous study or start a new study
:return: tuple of dictionaries with best parameters
"""
if param_borders is None:
param_borders = {"main": None, "fallback": None}
self.logger.info("Optimizing main model...")
params = self.main_model.optimize(
train,
test,
user_features,
item_features,
param_borders["main"],
criterion,
k,
budget,
new_study,
)
self.main_model.set_params(**params)
if self.fb_model._search_space is not None:
self.logger.info("Optimizing fallback model...")
fb_params = self.fb_model.optimize(
train,
test,
user_features,
item_features,
param_borders["fallback"],
criterion,
k,
budget,
new_study,
)
self.fb_model.set_params(**fb_params)
else:
fb_params = None
return params, fb_params
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self.main_model._fit_wrap(log, user_features, item_features)
self.fb_model._fit_wrap(log, user_features, item_features)
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
pred = self.main_model._predict(
log,
k,
users,
items,
user_features,
item_features,
filter_seen_items,
)
return pred | /replay_rec-0.11.0-py3-none-any.whl/replay/scenarios/fallback.py | 0.958197 | 0.371251 | fallback.py | pypi |
import logging
from abc import abstractmethod
from typing import Dict, Optional
from lightautoml.automl.presets.tabular_presets import TabularAutoML
from lightautoml.tasks import Task
from pyspark.sql import DataFrame
from replay.utils import (
convert2spark,
get_top_k_recs,
)
class ReRanker:
"""
Base class for models which re-rank recommendations produced by other models.
May be used as a part of two-stages recommendation pipeline.
"""
_logger: Optional[logging.Logger] = None
@property
def logger(self) -> logging.Logger:
"""
:returns: get library logger
"""
if self._logger is None:
self._logger = logging.getLogger("replay")
return self._logger
@abstractmethod
def fit(self, data: DataFrame, fit_params: Optional[Dict] = None) -> None:
"""
Fit the model which re-rank user-item pairs generated outside the models.
:param data: spark dataframe with obligatory ``[user_idx, item_idx, target]``
columns and features' columns
:param fit_params: dict of parameters to pass to model.fit()
"""
@abstractmethod
def predict(self, data, k) -> DataFrame:
"""
Re-rank data with the model and get top-k recommendations for each user.
:param data: spark dataframe with obligatory ``[user_idx, item_idx]``
columns and features' columns
:param k: number of recommendations for each user
"""
class LamaWrap(ReRanker):
"""
LightAutoML TabularPipeline binary classification model wrapper for recommendations re-ranking.
Read more: https://github.com/sberbank-ai-lab/LightAutoML
"""
def __init__(
self,
params: Optional[Dict] = None,
config_path: Optional[str] = None,
):
"""
Initialize LightAutoML TabularPipeline with passed params/configuration file.
:param params: dict of model parameters
:param config_path: path to configuration file
"""
self.model = TabularAutoML(
task=Task("binary"),
config_path=config_path,
**(params if params is not None else {}),
)
def fit(self, data: DataFrame, fit_params: Optional[Dict] = None) -> None:
"""
Fit the LightAutoML TabularPipeline model with binary classification task.
Data should include negative and positive user-item pairs.
:param data: spark dataframe with obligatory ``[user_idx, item_idx, target]``
columns and features' columns. `Target` column should consist of zeros and ones
as the model is a binary classification model.
:param fit_params: dict of parameters to pass to model.fit()
See LightAutoML TabularPipeline fit_predict parameters.
"""
params = {"roles": {"target": "target"}, "verbose": 1}
params.update({} if fit_params is None else fit_params)
data = data.drop("user_idx", "item_idx")
data_pd = data.toPandas()
self.model.fit_predict(data_pd, **params)
def predict(self, data: DataFrame, k: int) -> DataFrame:
"""
Re-rank data with the model and get top-k recommendations for each user.
:param data: spark dataframe with obligatory ``[user_idx, item_idx]``
columns and features' columns
:param k: number of recommendations for each user
:return: spark dataframe with top-k recommendations for each user
the dataframe columns are ``[user_idx, item_idx, relevance]``
"""
data_pd = data.toPandas()
candidates_ids = data_pd[["user_idx", "item_idx"]]
data_pd.drop(columns=["user_idx", "item_idx"], inplace=True)
self.logger.info("Starting re-ranking")
candidates_pred = self.model.predict(data_pd)
candidates_ids.loc[:, "relevance"] = candidates_pred.data[:, 0]
self.logger.info(
"%s candidates rated for %s users",
candidates_ids.shape[0],
candidates_ids["user_idx"].nunique(),
)
self.logger.info("top-k")
return get_top_k_recs(
recs=convert2spark(candidates_ids), k=k, id_type="idx"
) | /replay_rec-0.11.0-py3-none-any.whl/replay/scenarios/two_stages/reranker.py | 0.920763 | 0.544075 | reranker.py | pypi |
from collections.abc import Iterable
from typing import Dict, Optional, Tuple, List, Union, Any
import pyspark.sql.functions as sf
from pyspark.sql import DataFrame
from replay.constants import AnyDataFrame
from replay.data_preparator import ToNumericFeatureTransformer
from replay.history_based_fp import HistoryBasedFeaturesProcessor
from replay.metrics import Metric, Precision
from replay.models import ALSWrap, RandomRec, PopRec
from replay.models.base_rec import BaseRecommender, HybridRecommender
from replay.scenarios.two_stages.reranker import LamaWrap
from replay.session_handler import State
from replay.splitters import Splitter, UserSplitter
from replay.utils import (
array_mult,
cache_if_exists,
fallback,
get_log_info,
get_top_k_recs,
horizontal_explode,
join_or_return,
join_with_col_renaming,
unpersist_if_exists,
)
# pylint: disable=too-many-locals, too-many-arguments
def get_first_level_model_features(
model: DataFrame,
pairs: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
add_factors_mult: bool = True,
prefix: str = "",
) -> DataFrame:
"""
Get user and item embeddings from replay model.
Can also compute elementwise multiplication between them with ``add_factors_mult`` parameter.
Zero vectors are returned if a model does not have embeddings for specific users/items.
:param model: trained model
:param pairs: user-item pairs to get vectors for `[user_id/user_idx, item_id/item_id]`
:param user_features: user features `[user_id/user_idx, feature_1, ....]`
:param item_features: item features `[item_id/item_idx, feature_1, ....]`
:param add_factors_mult: flag to add elementwise multiplication
:param prefix: name to add to the columns
:return: DataFrame
"""
users = pairs.select("user_idx").distinct()
items = pairs.select("item_idx").distinct()
user_factors, user_vector_len = model._get_features_wrap(
users, user_features
)
item_factors, item_vector_len = model._get_features_wrap(
items, item_features
)
pairs_with_features = join_or_return(
pairs, user_factors, how="left", on="user_idx"
)
pairs_with_features = join_or_return(
pairs_with_features,
item_factors,
how="left",
on="item_idx",
)
factors_to_explode = []
if user_factors is not None:
pairs_with_features = pairs_with_features.withColumn(
"user_factors",
sf.coalesce(
sf.col("user_factors"),
sf.array([sf.lit(0.0)] * user_vector_len),
),
)
factors_to_explode.append(("user_factors", "uf"))
if item_factors is not None:
pairs_with_features = pairs_with_features.withColumn(
"item_factors",
sf.coalesce(
sf.col("item_factors"),
sf.array([sf.lit(0.0)] * item_vector_len),
),
)
factors_to_explode.append(("item_factors", "if"))
if model.__str__() == "LightFMWrap":
pairs_with_features = (
pairs_with_features.fillna({"user_bias": 0, "item_bias": 0})
.withColumnRenamed("user_bias", f"{prefix}_user_bias")
.withColumnRenamed("item_bias", f"{prefix}_item_bias")
)
if (
add_factors_mult
and user_factors is not None
and item_factors is not None
):
pairs_with_features = pairs_with_features.withColumn(
"factors_mult",
array_mult(sf.col("item_factors"), sf.col("user_factors")),
)
factors_to_explode.append(("factors_mult", "fm"))
for col_name, feature_prefix in factors_to_explode:
col_set = set(pairs_with_features.columns)
col_set.remove(col_name)
pairs_with_features = horizontal_explode(
data_frame=pairs_with_features,
column_to_explode=col_name,
other_columns=[sf.col(column) for column in sorted(list(col_set))],
prefix=f"{prefix}_{feature_prefix}",
)
return pairs_with_features
# pylint: disable=too-many-instance-attributes
class TwoStagesScenario(HybridRecommender):
"""
*train*:
1) take input ``log`` and split it into first_level_train and second_level_train
default splitter splits each user's data 50/50
2) train ``first_stage_models`` on ``first_stage_train``
3) create negative examples to train second stage model using one of:
- wrong recommendations from first stage
- random examples
use ``num_negatives`` to specify number of negatives per user
4) augments dataset with features:
- get 1 level recommendations for positive examples
from second_level_train and for generated negative examples
- add user and item features
- generate statistical and pair features
5) train ``TabularAutoML`` from LightAutoML
*inference*:
1) take ``log``
2) generate candidates, their number can be specified with ``num_candidates``
3) add features as in train
4) get recommendations
"""
can_predict_cold_users: bool = True
can_predict_cold_items: bool = True
# pylint: disable=too-many-arguments
def __init__(
self,
train_splitter: Splitter = UserSplitter(
item_test_size=0.5, shuffle=True, seed=42
),
first_level_models: Union[
List[BaseRecommender], BaseRecommender
] = ALSWrap(rank=128),
fallback_model: Optional[BaseRecommender] = PopRec(),
use_first_level_models_feat: Union[List[bool], bool] = False,
second_model_params: Optional[Union[Dict, str]] = None,
second_model_config_path: Optional[str] = None,
num_negatives: int = 100,
negatives_type: str = "first_level",
use_generated_features: bool = False,
user_cat_features_list: Optional[List] = None,
item_cat_features_list: Optional[List] = None,
custom_features_processor: HistoryBasedFeaturesProcessor = None,
seed: int = 123,
) -> None:
"""
:param train_splitter: splitter to get ``first_level_train`` and ``second_level_train``.
Default is random 50% split.
:param first_level_models: model or a list of models
:param fallback_model: model used to fill missing recommendations at first level models
:param use_first_level_models_feat: flag or a list of flags to use
features created by first level models
:param second_model_params: TabularAutoML parameters
:param second_model_config_path: path to config file for TabularAutoML
:param num_negatives: number of negative examples used during train
:param negatives_type: negative examples creation strategy,``random``
or most relevant examples from ``first-level``
:param use_generated_features: flag to use generated features to train second level
:param user_cat_features_list: list of user categorical features
:param item_cat_features_list: list of item categorical features
:param custom_features_processor: you can pass custom feature processor
:param seed: random seed
"""
self.train_splitter = train_splitter
self.cached_list = []
self.first_level_models = (
first_level_models
if isinstance(first_level_models, Iterable)
else [first_level_models]
)
self.first_level_item_len = 0
self.first_level_user_len = 0
self.random_model = RandomRec(seed=seed)
self.fallback_model = fallback_model
self.first_level_user_features_transformer = (
ToNumericFeatureTransformer()
)
self.first_level_item_features_transformer = (
ToNumericFeatureTransformer()
)
if isinstance(use_first_level_models_feat, bool):
self.use_first_level_models_feat = [
use_first_level_models_feat
] * len(self.first_level_models)
else:
if len(self.first_level_models) != len(
use_first_level_models_feat
):
raise ValueError(
f"For each model from first_level_models specify "
f"flag to use first level features."
f"Length of first_level_models is {len(first_level_models)}, "
f"Length of use_first_level_models_feat is {len(use_first_level_models_feat)}"
)
self.use_first_level_models_feat = use_first_level_models_feat
self.second_stage_model = LamaWrap(
params=second_model_params, config_path=second_model_config_path
)
self.num_negatives = num_negatives
if negatives_type not in ["random", "first_level"]:
raise ValueError(
f"Invalid negatives_type value: {negatives_type}. Use 'random' or 'first_level'"
)
self.negatives_type = negatives_type
self.use_generated_features = use_generated_features
self.features_processor = (
custom_features_processor
if custom_features_processor
else HistoryBasedFeaturesProcessor(
user_cat_features_list=user_cat_features_list,
item_cat_features_list=item_cat_features_list,
)
)
self.seed = seed
# TO DO: add save/load for scenarios
@property
def _init_args(self):
return {}
# pylint: disable=too-many-locals
def _add_features_for_second_level(
self,
log_to_add_features: DataFrame,
log_for_first_level_models: DataFrame,
user_features: DataFrame,
item_features: DataFrame,
) -> DataFrame:
"""
Added features are:
- relevance from first level models
- user and item features from first level models
- dataset features
- FeatureProcessor features
:param log_to_add_features: input DataFrame``[user_idx, item_idx, timestamp, relevance]``
:param log_for_first_level_models: DataFrame``[user_idx, item_idx, timestamp, relevance]``
:param user_features: user features``[user_idx]`` + feature columns
:param item_features: item features``[item_idx]`` + feature columns
:return: DataFrame
"""
self.logger.info("Generating features")
full_second_level_train = log_to_add_features
first_level_item_features_cached = cache_if_exists(
self.first_level_item_features_transformer.transform(item_features)
)
first_level_user_features_cached = cache_if_exists(
self.first_level_user_features_transformer.transform(user_features)
)
pairs = log_to_add_features.select("user_idx", "item_idx")
for idx, model in enumerate(self.first_level_models):
current_pred = self._predict_pairs_with_first_level_model(
model=model,
log=log_for_first_level_models,
pairs=pairs,
user_features=first_level_user_features_cached,
item_features=first_level_item_features_cached,
).withColumnRenamed("relevance", f"rel_{idx}_{model}")
full_second_level_train = full_second_level_train.join(
sf.broadcast(current_pred),
on=["user_idx", "item_idx"],
how="left",
)
if self.use_first_level_models_feat[idx]:
features = get_first_level_model_features(
model=model,
pairs=full_second_level_train.select(
"user_idx", "item_idx"
),
user_features=first_level_user_features_cached,
item_features=first_level_item_features_cached,
prefix=f"m_{idx}",
)
full_second_level_train = join_with_col_renaming(
left=full_second_level_train,
right=features,
on_col_name=["user_idx", "item_idx"],
how="left",
)
unpersist_if_exists(first_level_user_features_cached)
unpersist_if_exists(first_level_item_features_cached)
full_second_level_train_cached = full_second_level_train.fillna(
0
).cache()
self.logger.info("Adding features from the dataset")
full_second_level_train = join_or_return(
full_second_level_train_cached,
user_features,
on="user_idx",
how="left",
)
full_second_level_train = join_or_return(
full_second_level_train,
item_features,
on="item_idx",
how="left",
)
if self.use_generated_features:
if not self.features_processor.fitted:
self.features_processor.fit(
log=log_for_first_level_models,
user_features=user_features,
item_features=item_features,
)
self.logger.info("Adding generated features")
full_second_level_train = self.features_processor.transform(
log=full_second_level_train
)
self.logger.info(
"Columns at second level: %s",
" ".join(full_second_level_train.columns),
)
full_second_level_train_cached.unpersist()
return full_second_level_train
def _split_data(self, log: DataFrame) -> Tuple[DataFrame, DataFrame]:
"""Write statistics"""
first_level_train, second_level_train = self.train_splitter.split(log)
State().logger.debug("Log info: %s", get_log_info(log))
State().logger.debug(
"first_level_train info: %s", get_log_info(first_level_train)
)
State().logger.debug(
"second_level_train info: %s", get_log_info(second_level_train)
)
return first_level_train, second_level_train
@staticmethod
def _filter_or_return(dataframe, condition):
if dataframe is None:
return dataframe
return dataframe.filter(condition)
def _predict_with_first_level_model(
self,
model: BaseRecommender,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: DataFrame,
item_features: DataFrame,
log_to_filter: DataFrame,
):
"""
Filter users and items using can_predict_cold_items and can_predict_cold_users, and predict
"""
if not model.can_predict_cold_items:
log, items, item_features = [
self._filter_or_return(
dataframe=df,
condition=sf.col("item_idx") < self.first_level_item_len,
)
for df in [log, items, item_features]
]
if not model.can_predict_cold_users:
log, users, user_features = [
self._filter_or_return(
dataframe=df,
condition=sf.col("user_idx") < self.first_level_user_len,
)
for df in [log, users, user_features]
]
log_to_filter_cached = join_with_col_renaming(
left=log_to_filter,
right=users,
on_col_name="user_idx",
).cache()
max_positives_to_filter = 0
if log_to_filter_cached.count() > 0:
max_positives_to_filter = (
log_to_filter_cached.groupBy("user_idx")
.agg(sf.count("item_idx").alias("num_positives"))
.select(sf.max("num_positives"))
.collect()[0][0]
)
pred = model._predict(
log,
k=k + max_positives_to_filter,
users=users,
items=items,
user_features=user_features,
item_features=item_features,
filter_seen_items=False,
)
pred = pred.join(
log_to_filter_cached.select("user_idx", "item_idx"),
on=["user_idx", "item_idx"],
how="anti",
).drop("user", "item")
log_to_filter_cached.unpersist()
return get_top_k_recs(pred, k)
def _predict_pairs_with_first_level_model(
self,
model: BaseRecommender,
log: DataFrame,
pairs: DataFrame,
user_features: DataFrame,
item_features: DataFrame,
):
"""
Get relevance for selected user-item pairs.
"""
if not model.can_predict_cold_items:
log, pairs, item_features = [
self._filter_or_return(
dataframe=df,
condition=sf.col("item_idx") < self.first_level_item_len,
)
for df in [log, pairs, item_features]
]
if not model.can_predict_cold_users:
log, pairs, user_features = [
self._filter_or_return(
dataframe=df,
condition=sf.col("user_idx") < self.first_level_user_len,
)
for df in [log, pairs, user_features]
]
return model._predict_pairs(
pairs=pairs,
log=log,
user_features=user_features,
item_features=item_features,
)
# pylint: disable=unused-argument
def _get_first_level_candidates(
self,
model: BaseRecommender,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: DataFrame,
item_features: DataFrame,
log_to_filter: DataFrame,
) -> DataFrame:
"""
Combining the base model predictions with the fallback model
predictions.
"""
passed_arguments = locals()
passed_arguments.pop("self")
candidates = self._predict_with_first_level_model(**passed_arguments)
if self.fallback_model is not None:
passed_arguments.pop("model")
fallback_candidates = self._predict_with_first_level_model(
model=self.fallback_model, **passed_arguments
)
candidates = fallback(
base=candidates,
fill=fallback_candidates,
k=self.num_negatives,
)
return candidates
# pylint: disable=too-many-locals,too-many-statements
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self.cached_list = []
self.logger.info("Data split")
first_level_train, second_level_positive = self._split_data(log)
# second_level_positive = second_level_positive
# .join(first_level_train.select("user_idx"), on="user_idx", how="left")
self.first_level_item_len = (
first_level_train.select("item_idx").distinct().count()
)
self.first_level_user_len = (
first_level_train.select("user_idx").distinct().count()
)
log.cache()
first_level_train.cache()
second_level_positive.cache()
self.cached_list.extend(
[log, first_level_train, second_level_positive]
)
if user_features is not None:
user_features.cache()
self.cached_list.append(user_features)
if item_features is not None:
item_features.cache()
self.cached_list.append(item_features)
self.first_level_item_features_transformer.fit(item_features)
self.first_level_user_features_transformer.fit(user_features)
first_level_item_features = cache_if_exists(
self.first_level_item_features_transformer.transform(item_features)
)
first_level_user_features = cache_if_exists(
self.first_level_user_features_transformer.transform(user_features)
)
for base_model in [
*self.first_level_models,
self.random_model,
self.fallback_model,
]:
base_model._fit_wrap(
log=first_level_train,
user_features=first_level_user_features.filter(
sf.col("user_idx") < self.first_level_user_len
),
item_features=first_level_item_features.filter(
sf.col("item_idx") < self.first_level_item_len
),
)
self.logger.info("Generate negative examples")
negatives_source = (
self.first_level_models[0]
if self.negatives_type == "first_level"
else self.random_model
)
first_level_candidates = self._get_first_level_candidates(
model=negatives_source,
log=first_level_train,
k=self.num_negatives,
users=log.select("user_idx").distinct(),
items=log.select("item_idx").distinct(),
user_features=first_level_user_features,
item_features=first_level_item_features,
log_to_filter=first_level_train,
).select("user_idx", "item_idx")
unpersist_if_exists(first_level_user_features)
unpersist_if_exists(first_level_item_features)
self.logger.info("Crate train dataset for second level")
second_level_train = (
first_level_candidates.join(
second_level_positive.select(
"user_idx", "item_idx"
).withColumn("target", sf.lit(1.0)),
on=["user_idx", "item_idx"],
how="left",
).fillna(0.0, subset="target")
).cache()
self.cached_list.append(second_level_train)
self.logger.info(
"Distribution of classes in second-level train dataset:/n %s",
(
second_level_train.groupBy("target")
.agg(sf.count(sf.col("target")).alias("count_for_class"))
.take(2)
),
)
self.features_processor.fit(
log=first_level_train,
user_features=user_features,
item_features=item_features,
)
self.logger.info("Adding features to second-level train dataset")
second_level_train_to_convert = self._add_features_for_second_level(
log_to_add_features=second_level_train,
log_for_first_level_models=first_level_train,
user_features=user_features,
item_features=item_features,
).cache()
self.cached_list.append(second_level_train_to_convert)
self.second_stage_model.fit(second_level_train_to_convert)
for dataframe in self.cached_list:
unpersist_if_exists(dataframe)
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
State().logger.debug(msg="Generating candidates to rerank")
first_level_user_features = cache_if_exists(
self.first_level_user_features_transformer.transform(user_features)
)
first_level_item_features = cache_if_exists(
self.first_level_item_features_transformer.transform(item_features)
)
candidates = self._get_first_level_candidates(
model=self.first_level_models[0],
log=log,
k=self.num_negatives,
users=users,
items=items,
user_features=first_level_user_features,
item_features=first_level_item_features,
log_to_filter=log,
).select("user_idx", "item_idx")
candidates_cached = candidates.cache()
unpersist_if_exists(first_level_user_features)
unpersist_if_exists(first_level_item_features)
self.logger.info("Adding features")
candidates_features = self._add_features_for_second_level(
log_to_add_features=candidates_cached,
log_for_first_level_models=log,
user_features=user_features,
item_features=item_features,
)
candidates_features.cache()
candidates_cached.unpersist()
self.logger.info(
"Generated %s candidates for %s users",
candidates_features.count(),
candidates_features.select("user_idx").distinct().count(),
)
return self.second_stage_model.predict(data=candidates_features, k=k)
def fit_predict(
self,
log: AnyDataFrame,
k: int,
users: Optional[Union[AnyDataFrame, Iterable]] = None,
items: Optional[Union[AnyDataFrame, Iterable]] = None,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
"""
:param log: input DataFrame ``[user_id, item_id, timestamp, relevance]``
:param k: length of a recommendation list, must be smaller than the number of ``items``
:param users: users to get recommendations for
:param items: items to get recommendations for
:param user_features: user features``[user_id]`` + feature columns
:param item_features: item features``[item_id]`` + feature columns
:param filter_seen_items: flag to removed seen items from recommendations
:return: DataFrame ``[user_id, item_id, relevance]``
"""
self.fit(log, user_features, item_features)
return self.predict(
log,
k,
users,
items,
user_features,
item_features,
filter_seen_items,
)
@staticmethod
def _optimize_one_model(
model: BaseRecommender,
train: AnyDataFrame,
test: AnyDataFrame,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
param_borders: Optional[Dict[str, List[Any]]] = None,
criterion: Metric = Precision(),
k: int = 10,
budget: int = 10,
new_study: bool = True,
):
params = model.optimize(
train,
test,
user_features,
item_features,
param_borders,
criterion,
k,
budget,
new_study,
)
return params
# pylint: disable=too-many-arguments, too-many-locals
def optimize(
self,
train: AnyDataFrame,
test: AnyDataFrame,
user_features: Optional[AnyDataFrame] = None,
item_features: Optional[AnyDataFrame] = None,
param_borders: Optional[List[Dict[str, List[Any]]]] = None,
criterion: Metric = Precision(),
k: int = 10,
budget: int = 10,
new_study: bool = True,
) -> Tuple[List[Dict[str, Any]], Optional[Dict[str, Any]]]:
"""
Optimize first level models with optuna.
:param train: train DataFrame ``[user_id, item_id, timestamp, relevance]``
:param test: test DataFrame ``[user_id, item_id, timestamp, relevance]``
:param user_features: user features ``[user_id , timestamp]`` + feature columns
:param item_features: item features``[item_id]`` + feature columns
:param param_borders: list with param grids for first level models and a fallback model.
Empty dict skips optimization for that model.
Param grid is a dict ``{param: [low, high]}``.
:param criterion: metric to optimize
:param k: length of a recommendation list
:param budget: number of points to train each model
:param new_study: keep searching with previous study or start a new study
:return: list of dicts of parameters
"""
number_of_models = len(self.first_level_models)
if self.fallback_model is not None:
number_of_models += 1
if number_of_models != len(param_borders):
raise ValueError(
"Provide search grid or None for every first level model"
)
first_level_user_features_tr = ToNumericFeatureTransformer()
first_level_user_features = first_level_user_features_tr.fit_transform(
user_features
)
first_level_item_features_tr = ToNumericFeatureTransformer()
first_level_item_features = first_level_item_features_tr.fit_transform(
item_features
)
first_level_user_features = cache_if_exists(first_level_user_features)
first_level_item_features = cache_if_exists(first_level_item_features)
params_found = []
for i, model in enumerate(self.first_level_models):
if param_borders[i] is None or (
isinstance(param_borders[i], dict) and param_borders[i]
):
self.logger.info(
"Optimizing first level model number %s, %s",
i,
model.__str__(),
)
params_found.append(
self._optimize_one_model(
model=model,
train=train,
test=test,
user_features=first_level_user_features,
item_features=first_level_item_features,
param_borders=param_borders[i],
criterion=criterion,
k=k,
budget=budget,
new_study=new_study,
)
)
else:
params_found.append(None)
if self.fallback_model is None or (
isinstance(param_borders[-1], dict) and not param_borders[-1]
):
return params_found, None
self.logger.info("Optimizing fallback-model")
fallback_params = self._optimize_one_model(
model=self.fallback_model,
train=train,
test=test,
user_features=first_level_user_features,
item_features=first_level_item_features,
param_borders=param_borders[-1],
criterion=criterion,
new_study=new_study,
)
unpersist_if_exists(first_level_item_features)
unpersist_if_exists(first_level_user_features)
return params_found, fallback_params | /replay_rec-0.11.0-py3-none-any.whl/replay/scenarios/two_stages/two_stages_scenario.py | 0.930592 | 0.397529 | two_stages_scenario.py | pypi |
import math
from typing import Any, Dict, List, Optional
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from replay.metrics import Metric, NDCG
from replay.models.base_rec import NonPersonalizedRecommender
class UCB(NonPersonalizedRecommender):
"""Simple bandit model, which caclulate item relevance as upper confidence bound
(`UCB <https://medium.com/analytics-vidhya/multi-armed-bandit-analysis-of-upper-confidence-bound-algorithm-4b84be516047>`_)
for the confidence interval of true fraction of positive ratings.
Should be used in iterative (online) mode to achive proper recommendation quality.
``relevance`` from log must be converted to binary 0-1 form.
.. math::
pred_i = ctr_i + \\sqrt{\\frac{c\\ln{n}}{n_i}}
:math:`pred_i` -- predicted relevance of item :math:`i`
:math:`c` -- exploration coeficient
:math:`n` -- number of interactions in log
:math:`n_i` -- number of interactions with item :math:`i`
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_idx": [1, 2, 3, 3], "item_idx": [1, 2, 1, 2], "relevance": [1, 0, 0, 0]})
>>> from replay.utils import convert2spark
>>> data_frame = convert2spark(data_frame)
>>> model = UCB()
>>> model.fit(data_frame)
>>> model.predict(data_frame,k=2,users=[1,2,3,4], items=[1,2,3]
... ).toPandas().sort_values(["user_idx","relevance","item_idx"],
... ascending=[True,False,True]).reset_index(drop=True)
user_idx item_idx relevance
0 1 3 2.665109
1 1 2 1.177410
2 2 3 2.665109
3 2 1 1.677410
4 3 3 2.665109
5 4 3 2.665109
6 4 1 1.677410
"""
# attributes which are needed for refit method
full_count: int
items_counts_aggr: DataFrame
def __init__(
self,
exploration_coef: float = 2,
sample: bool = False,
seed: Optional[int] = None,
):
"""
:param exploration_coef: exploration coefficient
:param sample: flag to choose recommendation strategy.
If True, items are sampled with a probability proportional
to the calculated predicted relevance.
Could be changed after model training by setting the `sample` attribute.
:param seed: random seed. Provides reproducibility if fixed
"""
# pylint: disable=super-init-not-called
self.coef = exploration_coef
self.sample = sample
self.seed = seed
super().__init__(add_cold_items=True, cold_weight=1)
@property
def _init_args(self):
return {
"exploration_coef": self.coef,
"sample": self.sample,
"seed": self.seed,
}
# pylint: disable=too-many-arguments
def optimize(
self,
train: DataFrame,
test: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
param_borders: Optional[Dict[str, List[Any]]] = None,
criterion: Metric = NDCG(),
k: int = 10,
budget: int = 10,
new_study: bool = True,
) -> None:
"""
Searches best parameters with optuna.
:param train: train data
:param test: test data
:param user_features: user features
:param item_features: item features
:param param_borders: a dictionary with search borders, where
key is the parameter name and value is the range of possible values
``{param: [low, high]}``. In case of categorical parameters it is
all possible values: ``{cat_param: [cat_1, cat_2, cat_3]}``.
:param criterion: metric to use for optimization
:param k: recommendation list length
:param budget: number of points to try
:param new_study: keep searching with previous study or start a new study
:return: dictionary with best parameters
"""
self.logger.warning(
"The UCB model has only exploration coefficient parameter, "
"which cannot not be directly optimized"
)
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self._check_relevance(log)
# we save this dataframe for the refit() method
self.items_counts_aggr = log.groupby("item_idx").agg(
sf.sum("relevance").alias("pos"),
sf.count("relevance").alias("total"),
)
# we save this variable for the refit() method
self.full_count = log.count()
self._calc_item_popularity()
def refit(
self,
log: DataFrame,
) -> None:
"""Iteratively refit with new part of log.
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:return:
"""
self._check_relevance(log)
# aggregate new log part
items_counts_aggr = log.groupby("item_idx").agg(
sf.sum("relevance").alias("pos"),
sf.count("relevance").alias("total"),
)
# combine old and new aggregations and aggregate
self.items_counts_aggr = (
self.items_counts_aggr.union(items_counts_aggr)
.groupby("item_idx")
.agg(
sf.sum("pos").alias("pos"),
sf.sum("total").alias("total"),
)
)
# sum old and new log lengths
self.full_count += log.count()
self._calc_item_popularity()
def _calc_item_popularity(self):
items_counts = self.items_counts_aggr.withColumn(
"relevance",
(
sf.col("pos") / sf.col("total")
+ sf.sqrt(
self.coef
* sf.log(sf.lit(self.full_count))
/ sf.col("total")
)
),
)
self.item_popularity = items_counts.drop("pos", "total")
self.item_popularity.cache().count()
self.fill = 1 + math.sqrt(self.coef * math.log(self.full_count)) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/ucb.py | 0.926728 | 0.628635 | ucb.py | pypi |
from typing import Iterable, List, Optional, Union
import numpy as np
import pyspark.sql.functions as sf
from pyspark.sql import DataFrame
from pyspark.sql.window import Window
from replay.models.base_rec import NeighbourRec
class AssociationRulesItemRec(NeighbourRec):
"""
Item-to-item recommender based on association rules.
Calculate pairs confidence, lift and confidence_gain defined as
confidence(a, b)/confidence(!a, b) to get top-k associated items.
Predict items for users using lift, confidence or confidence_gain metrics.
Forecasts will be based on indicators: lift, confidence or confidence_gain.
It all depends on your choice.
During class initialization, you can explicitly specify the metric to be used as
a similarity for calculating the predict.
You can change your selection before calling `.predict()` or `.predict_pairs()`
if you set a new value for the `similarity_metric` parameter.
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_idx": [1, 1, 2, 3], "item_idx": [1, 2, 2, 3], "relevance": [2, 1, 4, 1]})
>>> data_frame_for_predict = pd.DataFrame({"user_idx": [2], "item_idx": [1]})
>>> data_frame
user_idx item_idx relevance
0 1 1 2
1 1 2 1
2 2 2 4
3 3 3 1
>>> from replay.utils import convert2spark
>>> from replay.models import AssociationRulesItemRec
>>> data_frame = convert2spark(data_frame)
>>> data_frame_for_predict = convert2spark(data_frame_for_predict)
>>> model = AssociationRulesItemRec(min_item_count=1, min_pair_count=0)
>>> res = model.fit(data_frame)
>>> model.similarity.show()
+------------+------------+----------+----+---------------+
|item_idx_one|item_idx_two|confidence|lift|confidence_gain|
+------------+------------+----------+----+---------------+
| 1| 2| 1.0| 1.5| 2.0|
| 2| 1| 0.5| 1.5| Infinity|
+------------+------------+----------+----+---------------+
>>> model.similarity_metric = "confidence"
>>> model.predict_pairs(data_frame_for_predict, data_frame).show()
+--------+--------+---------+
|user_idx|item_idx|relevance|
+--------+--------+---------+
| 2| 1| 0.5|
+--------+--------+---------+
>>> model.similarity_metric = "lift"
>>> model.predict_pairs(data_frame_for_predict, data_frame).show()
+--------+--------+---------+
|user_idx|item_idx|relevance|
+--------+--------+---------+
| 2| 1| 1.5|
+--------+--------+---------+
Classical model uses items co-occurrence in sessions for
confidence, lift and confidence_gain calculation
but relevance could also be passed to the model, e.g.
if you want to apply time smoothing and treat old sessions as less important.
In this case all items in sessions should have the same relevance.
"""
can_predict_item_to_item = True
item_to_item_metrics: List[str] = ["lift", "confidence", "confidence_gain"]
similarity: DataFrame
can_change_metric = True
_search_space = {
"min_item_count": {"type": "int", "args": [3, 10]},
"min_pair_count": {"type": "int", "args": [3, 10]},
"num_neighbours": {"type": "int", "args": [300, 2000]},
"use_relevance": {"type": "categorical", "args": [True, False]},
"similarity_metric": {
"type": "categorical",
"args": ["confidence", "lift"],
},
}
# pylint: disable=too-many-arguments,
def __init__(
self,
session_col: Optional[str] = None,
min_item_count: int = 5,
min_pair_count: int = 5,
num_neighbours: Optional[int] = 1000,
use_relevance: bool = False,
similarity_metric: str = "confidence",
) -> None:
"""
:param session_col: name of column to group sessions.
Items are combined by the ``user_id`` column if ``session_col`` is not defined.
:param min_item_count: items with fewer sessions will be filtered out
:param min_pair_count: pairs with fewer sessions will be filtered out
:param num_neighbours: maximal number of neighbours to save for each item
:param use_relevance: flag to use relevance values instead of co-occurrence count
If true, pair relevance in session is minimal relevance of item in pair.
Item relevance is sum of relevance in all sessions.
:param similarity_metric: `lift` of 'confidence'
The metric used as a similarity to calculate the prediction,
one of [``lift``, ``confidence``, ``confidence_gain``]
"""
self.session_col = (
session_col if session_col is not None else "user_idx"
)
self.min_item_count = min_item_count
self.min_pair_count = min_pair_count
self.num_neighbours = num_neighbours
self.use_relevance = use_relevance
self.similarity_metric = similarity_metric
@property
def _init_args(self):
return {
"session_col": self.session_col,
"min_item_count": self.min_item_count,
"min_pair_count": self.min_pair_count,
"num_neighbours": self.num_neighbours,
"use_relevance": self.use_relevance,
"similarity_metric": self.similarity_metric,
}
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
"""
1) Filter log items by ``min_item_count`` threshold
2) Calculate items support, pairs confidence, lift and confidence_gain defined as
confidence(a, b)/confidence(!a, b).
"""
log = (
log.withColumn(
"relevance",
sf.col("relevance") if self.use_relevance else sf.lit(1),
)
.select(self.session_col, "item_idx", "relevance")
.distinct()
)
num_sessions = log.select(self.session_col).distinct().count()
frequent_items_cached = (
log.groupBy("item_idx")
.agg(
sf.count("item_idx").alias("item_count"),
sf.sum("relevance").alias("item_relevance"),
)
.filter(sf.col("item_count") >= self.min_item_count)
.drop("item_count")
).cache()
frequent_items_log = log.join(
frequent_items_cached.select("item_idx"), on="item_idx"
)
frequent_item_pairs = (
frequent_items_log.withColumnRenamed("item_idx", "antecedent")
.withColumnRenamed("relevance", "antecedent_rel")
.join(
frequent_items_log.withColumnRenamed(
self.session_col, self.session_col + "_cons"
)
.withColumnRenamed("item_idx", "consequent")
.withColumnRenamed("relevance", "consequent_rel"),
on=[
sf.col(self.session_col)
== sf.col(self.session_col + "_cons"),
sf.col("antecedent") < sf.col("consequent"),
],
)
# taking minimal relevance of item for pair
.withColumn(
"relevance",
sf.least(sf.col("consequent_rel"), sf.col("antecedent_rel")),
)
.drop(
self.session_col + "_cons", "consequent_rel", "antecedent_rel"
)
)
pairs_count = (
frequent_item_pairs.groupBy("antecedent", "consequent")
.agg(
sf.count("consequent").alias("pair_count"),
sf.sum("relevance").alias("pair_relevance"),
)
.filter(sf.col("pair_count") >= self.min_pair_count)
).drop("pair_count")
pairs_metrics = pairs_count.unionByName(
pairs_count.select(
sf.col("consequent").alias("antecedent"),
sf.col("antecedent").alias("consequent"),
sf.col("pair_relevance"),
)
)
pairs_metrics = pairs_metrics.join(
frequent_items_cached.withColumnRenamed(
"item_relevance", "antecedent_relevance"
),
on=[sf.col("antecedent") == sf.col("item_idx")],
).drop("item_idx")
pairs_metrics = pairs_metrics.join(
frequent_items_cached.withColumnRenamed(
"item_relevance", "consequent_relevance"
),
on=[sf.col("consequent") == sf.col("item_idx")],
).drop("item_idx")
pairs_metrics = pairs_metrics.withColumn(
"confidence",
sf.col("pair_relevance") / sf.col("antecedent_relevance"),
).withColumn(
"lift",
num_sessions
* sf.col("confidence")
/ sf.col("consequent_relevance"),
)
if self.num_neighbours is not None:
pairs_metrics = (
pairs_metrics.withColumn(
"similarity_order",
sf.row_number().over(
Window.partitionBy("antecedent").orderBy(
sf.col("lift").desc(),
sf.col("consequent").desc(),
)
),
)
.filter(sf.col("similarity_order") <= self.num_neighbours)
.drop("similarity_order")
)
self.similarity = pairs_metrics.withColumn(
"confidence_gain",
sf.when(
sf.col("consequent_relevance") - sf.col("pair_relevance") == 0,
sf.lit(np.inf),
).otherwise(
sf.col("confidence")
* (num_sessions - sf.col("antecedent_relevance"))
/ (sf.col("consequent_relevance") - sf.col("pair_relevance"))
),
).select(
sf.col("antecedent").alias("item_idx_one"),
sf.col("consequent").alias("item_idx_two"),
"confidence",
"lift",
"confidence_gain",
)
self.similarity.cache().count()
frequent_items_cached.unpersist()
@property
def get_similarity(self):
"""
Return matrix with calculated confidence, lift and confidence gain.
:return: association rules measures calculated during ``fit`` stage
"""
return self.similarity
def get_nearest_items(
self,
items: Union[DataFrame, Iterable],
k: int,
metric: str = "lift",
candidates: Optional[Union[DataFrame, Iterable]] = None,
) -> DataFrame:
"""
Get k most similar items be the `metric` for each of the `items`.
:param items: spark dataframe or list of item ids to find neighbors
:param k: number of neighbors
:param metric: `lift` of 'confidence_gain'
:param candidates: spark dataframe or list of items
to consider as similar, e.g. popular/new items. If None,
all items presented during model training are used.
:return: dataframe with the most similar items an distance,
where bigger value means greater similarity.
spark-dataframe with columns ``[item_id, neighbour_item_id, similarity]``
"""
if metric not in self.item_to_item_metrics:
raise ValueError(
f"Select one of the valid distance metrics: "
f"{self.item_to_item_metrics}"
)
return self._get_nearest_items_wrap(
items=items,
k=k,
metric=metric,
candidates=candidates,
)
def _get_nearest_items(
self,
items: DataFrame,
metric: Optional[str] = None,
candidates: Optional[DataFrame] = None,
) -> DataFrame:
"""
Return metric for all available associated items filtered by `candidates`.
:param items: items to find associated
:param metric: `lift` of 'confidence_gain'
:param candidates: items to consider as candidates
:return: associated items
"""
pairs_to_consider = self.similarity
if candidates is not None:
pairs_to_consider = self.similarity.join(
sf.broadcast(
candidates.withColumnRenamed("item_idx", "item_idx_two")
),
on="item_idx_two",
)
return pairs_to_consider.join(
sf.broadcast(items.withColumnRenamed("item_idx", "item_idx_one")),
on="item_idx_one",
)
@property
def _dataframes(self):
return {"similarity": self.similarity} | /replay_rec-0.11.0-py3-none-any.whl/replay/models/association_rules.py | 0.960777 | 0.63559 | association_rules.py | pypi |
from typing import Optional
import numpy as np
import pandas as pd
from pyspark.sql import DataFrame
from pyspark.sql import types as st
from scipy.sparse import csc_matrix
from sklearn.linear_model import ElasticNet
from replay.models.base_rec import NeighbourRec
from replay.session_handler import State
class SLIM(NeighbourRec):
"""`SLIM: Sparse Linear Methods for Top-N Recommender Systems
<http://glaros.dtc.umn.edu/gkhome/fetch/papers/SLIM2011icdm.pdf>`_"""
_search_space = {
"beta": {"type": "loguniform", "args": [1e-6, 5]},
"lambda_": {"type": "loguniform", "args": [1e-6, 2]},
}
def __init__(
self,
beta: float = 0.01,
lambda_: float = 0.01,
seed: Optional[int] = None,
):
"""
:param beta: l2 regularization
:param lambda_: l1 regularization
:param seed: random seed
"""
if beta < 0 or lambda_ <= 0:
raise ValueError("Invalid regularization parameters")
self.beta = beta
self.lambda_ = lambda_
self.seed = seed
@property
def _init_args(self):
return {"beta": self.beta, "lambda_": self.lambda_, "seed": self.seed}
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
pandas_log = log.select("user_idx", "item_idx", "relevance").toPandas()
interactions_matrix = csc_matrix(
(pandas_log.relevance, (pandas_log.user_idx, pandas_log.item_idx)),
shape=(self._user_dim, self._item_dim),
)
similarity = (
State()
.session.createDataFrame(pandas_log.item_idx, st.IntegerType())
.withColumnRenamed("value", "item_idx_one")
)
alpha = self.beta + self.lambda_
l1_ratio = self.lambda_ / alpha
regression = ElasticNet(
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=False,
max_iter=5000,
random_state=self.seed,
selection="random",
positive=True,
)
def slim_column(pandas_df: pd.DataFrame) -> pd.DataFrame:
"""
fit similarity matrix with ElasticNet
:param pandas_df: pd.Dataframe
:return: pd.Dataframe
"""
idx = int(pandas_df["item_idx_one"][0])
column = interactions_matrix[:, idx]
column_arr = column.toarray().ravel()
interactions_matrix[
interactions_matrix[:, idx].nonzero()[0], idx
] = 0
regression.fit(interactions_matrix, column_arr)
interactions_matrix[:, idx] = column
good_idx = np.argwhere(regression.coef_ > 0).reshape(-1)
good_values = regression.coef_[good_idx]
similarity_row = {
"item_idx_one": good_idx,
"item_idx_two": idx,
"similarity": good_values,
}
return pd.DataFrame(data=similarity_row)
self.similarity = similarity.groupby("item_idx_one").applyInPandas(
slim_column,
"item_idx_one int, item_idx_two int, similarity double",
)
self.similarity.cache().count() | /replay_rec-0.11.0-py3-none-any.whl/replay/models/slim.py | 0.925188 | 0.485356 | slim.py | pypi |
from typing import Optional, Tuple
import numba as nb
import numpy as np
import pandas as pd
from pyspark.sql import DataFrame
from scipy.sparse import coo_matrix, csr_matrix
from replay.models.base_rec import NeighbourRec
from replay.session_handler import State
# pylint: disable=too-many-arguments, too-many-locals
@nb.njit(parallel=True)
def _main_iteration(
inv_matrix,
p_x,
mat_b,
mat_c,
mat_gamma,
rho,
eps_abs,
eps_rel,
lambda_1,
items_count,
threshold,
multiplicator,
): # pragma: no cover
# calculate mat_b
mat_b = p_x + np.dot(inv_matrix, rho * mat_c - mat_gamma)
vec_gamma = np.diag(mat_b) / np.diag(inv_matrix)
mat_b -= inv_matrix * vec_gamma
# calculate mat_c
prev_mat_c = mat_c
mat_c = mat_b + mat_gamma / rho
coef = lambda_1 / rho
mat_c = np.maximum(mat_c - coef, 0.0) - np.maximum(-mat_c - coef, 0.0)
# calculate mat_gamma
mat_gamma += rho * (mat_b - mat_c)
# calculate residuals
r_primal = np.linalg.norm(mat_b - mat_c)
r_dual = np.linalg.norm(-rho * (mat_c - prev_mat_c))
eps_primal = eps_abs * items_count + eps_rel * max(
np.linalg.norm(mat_b), np.linalg.norm(mat_c)
)
eps_dual = eps_abs * items_count + eps_rel * np.linalg.norm(mat_gamma)
if r_primal > threshold * r_dual:
rho *= multiplicator
elif threshold * r_primal < r_dual:
rho /= multiplicator
return (
mat_b,
mat_c,
mat_gamma,
rho,
r_primal,
r_dual,
eps_primal,
eps_dual,
)
# pylint: disable=too-many-instance-attributes
class ADMMSLIM(NeighbourRec):
"""`ADMM SLIM: Sparse Recommendations for Many Users
<http://www.cs.columbia.edu/~jebara/papers/wsdm20_ADMM.pdf>`_
This is a modification for the basic SLIM model.
Recommendations are improved with Alternating Direction Method of Multipliers.
"""
rho: float
threshold: float = 5
multiplicator: float = 2
eps_abs: float = 1.0e-3
eps_rel: float = 1.0e-3
max_iteration: int = 100
_mat_c: np.ndarray
_mat_b: np.ndarray
_mat_gamma: np.ndarray
_search_space = {
"lambda_1": {"type": "loguniform", "args": [1e-9, 50]},
"lambda_2": {"type": "loguniform", "args": [1e-9, 5000]},
}
def __init__(
self,
lambda_1: float = 5,
lambda_2: float = 5000,
seed: Optional[int] = None,
):
"""
:param lambda_1: l1 regularization term
:param lambda_2: l2 regularization term
:param seed: random seed
"""
if lambda_1 < 0 or lambda_2 <= 0:
raise ValueError("Invalid regularization parameters")
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.rho = lambda_2
self.seed = seed
@property
def _init_args(self):
return {
"lambda_1": self.lambda_1,
"lambda_2": self.lambda_2,
"seed": self.seed,
}
# pylint: disable=too-many-locals
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self.logger.debug("Fitting ADMM SLIM")
pandas_log = log.select("user_idx", "item_idx", "relevance").toPandas()
interactions_matrix = csr_matrix(
(
pandas_log["relevance"],
(pandas_log["user_idx"], pandas_log["item_idx"]),
),
shape=(self._user_dim, self._item_dim),
)
self.logger.debug("Gram matrix")
xtx = (interactions_matrix.T @ interactions_matrix).toarray()
self.logger.debug("Inverse matrix")
inv_matrix = np.linalg.inv(
xtx + (self.lambda_2 + self.rho) * np.eye(self._item_dim)
)
self.logger.debug("Main calculations")
p_x = inv_matrix @ xtx
mat_b, mat_c, mat_gamma = self._init_matrix(self._item_dim)
r_primal = np.linalg.norm(mat_b - mat_c)
r_dual = np.linalg.norm(self.rho * mat_c)
eps_primal, eps_dual = 0.0, 0.0
iteration = 0
while (
r_primal > eps_primal or r_dual > eps_dual
) and iteration < self.max_iteration:
iteration += 1
(
mat_b,
mat_c,
mat_gamma,
self.rho,
r_primal,
r_dual,
eps_primal,
eps_dual,
) = _main_iteration(
inv_matrix,
p_x,
mat_b,
mat_c,
mat_gamma,
self.rho,
self.eps_abs,
self.eps_rel,
self.lambda_1,
self._item_dim,
self.threshold,
self.multiplicator,
)
result_message = (
f"Iteration: {iteration}. primal gap: "
f"{r_primal - eps_primal:.5}; dual gap: "
f" {r_dual - eps_dual:.5}; rho: {self.rho}"
)
self.logger.debug(result_message)
mat_c_sparse = coo_matrix(mat_c)
mat_c_pd = pd.DataFrame(
{
"item_idx_one": mat_c_sparse.row.astype(np.int32),
"item_idx_two": mat_c_sparse.col.astype(np.int32),
"similarity": mat_c_sparse.data,
}
)
self.similarity = State().session.createDataFrame(
mat_c_pd,
schema="item_idx_one int, item_idx_two int, similarity double",
)
self.similarity.cache().count()
def _init_matrix(
self, size: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Matrix initialization"""
if self.seed is not None:
np.random.seed(self.seed)
mat_b = np.random.rand(size, size) # type: ignore
mat_c = np.random.rand(size, size) # type: ignore
mat_gamma = np.random.rand(size, size) # type: ignore
return mat_b, mat_c, mat_gamma | /replay_rec-0.11.0-py3-none-any.whl/replay/models/admm_slim.py | 0.898401 | 0.456591 | admm_slim.py | pypi |
from abc import abstractmethod
from typing import Any, Dict, Optional
import numpy as np
import pandas as pd
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
import torch
from torch import nn
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from replay.constants import REC_SCHEMA
from replay.models.base_rec import Recommender
from replay.session_handler import State
class TorchRecommender(Recommender):
"""Base class for neural recommenders"""
model: Any
device: torch.device
def __init__(self):
self.logger.info(
"The model is neural network with non-distributed training"
)
self.checkpoint_path = State().session.conf.get("spark.local.dir")
self.device = State().device
def _run_train_step(self, batch, optimizer):
self.model.train()
optimizer.zero_grad()
model_result = self._batch_pass(batch, self.model)
loss = self._loss(**model_result)
loss.backward()
optimizer.step()
return loss.item()
def _run_validation(
self, valid_data_loader: DataLoader, epoch: int
) -> float:
self.model.eval()
valid_loss = 0
with torch.no_grad():
for batch in valid_data_loader:
model_result = self._batch_pass(batch, self.model)
valid_loss += self._loss(**model_result)
valid_loss /= len(valid_data_loader)
valid_debug_message = f"""Epoch[{epoch}] validation
average loss: {valid_loss:.5f}"""
self.logger.debug(valid_debug_message)
return valid_loss.item()
# pylint: disable=too-many-arguments
def train(
self,
train_data_loader: DataLoader,
valid_data_loader: DataLoader,
optimizer: Optimizer,
lr_scheduler: ReduceLROnPlateau,
epochs: int,
model_name: str,
) -> None:
"""
Run training loop
:param train_data_loader: data loader for training
:param valid_data_loader: data loader for validation
:param optimizer: optimizer
:param lr_scheduler: scheduler used to decrease learning rate
:param lr_scheduler: scheduler used to decrease learning rate
:param epochs: num training epochs
:param model_name: model name for checkpoint saving
:return:
"""
best_valid_loss = np.inf
for epoch in range(epochs):
for batch in train_data_loader:
train_loss = self._run_train_step(batch, optimizer)
train_debug_message = f"""Epoch[{epoch}] current loss:
{train_loss:.5f}"""
self.logger.debug(train_debug_message)
valid_loss = self._run_validation(valid_data_loader, epoch)
lr_scheduler.step(valid_loss)
if valid_loss < best_valid_loss:
best_checkpoint = "/".join(
[
self.checkpoint_path,
f"/best_{model_name}_{epoch+1}_loss={valid_loss}.pt",
]
)
self._save_model(best_checkpoint)
best_valid_loss = valid_loss
self._load_model(best_checkpoint)
@abstractmethod
def _batch_pass(self, batch, model) -> Dict[str, Any]:
"""
Apply model to a single batch.
:param batch: data batch
:param model: model object
:return: dictionary used to calculate loss.
"""
@abstractmethod
def _loss(self, **kwargs) -> torch.Tensor:
"""
Returns loss value
:param **kwargs: dictionary used to calculate loss
:return: 1x1 tensor
"""
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
items_consider_in_pred = items.toPandas()["item_idx"].values
items_count = self._item_dim
model = self.model.cpu()
agg_fn = self._predict_by_user
def grouped_map(pandas_df: pd.DataFrame) -> pd.DataFrame:
return agg_fn(
pandas_df, model, items_consider_in_pred, k, items_count
)[["user_idx", "item_idx", "relevance"]]
self.logger.debug("Predict started")
# do not apply map on cold users for MultVAE predict
join_type = "inner" if str(self) == "MultVAE" else "left"
recs = (
users.join(log, how=join_type, on="user_idx")
.select("user_idx", "item_idx")
.groupby("user_idx")
.applyInPandas(grouped_map, REC_SCHEMA)
)
return recs
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
items_count = self._item_dim
model = self.model.cpu()
agg_fn = self._predict_by_user_pairs
users = pairs.select("user_idx").distinct()
def grouped_map(pandas_df: pd.DataFrame) -> pd.DataFrame:
return agg_fn(pandas_df, model, items_count)[
["user_idx", "item_idx", "relevance"]
]
self.logger.debug("Calculate relevance for user-item pairs")
user_history = (
users.join(log, how="inner", on="user_idx")
.groupBy("user_idx")
.agg(sf.collect_list("item_idx").alias("item_idx_history"))
)
user_pairs = pairs.groupBy("user_idx").agg(
sf.collect_list("item_idx").alias("item_idx_to_pred")
)
full_df = user_pairs.join(user_history, on="user_idx", how="inner")
recs = full_df.groupby("user_idx").applyInPandas(
grouped_map, REC_SCHEMA
)
return recs
@staticmethod
@abstractmethod
def _predict_by_user(
pandas_df: pd.DataFrame,
model: nn.Module,
items_np: np.ndarray,
k: int,
item_count: int,
) -> pd.DataFrame:
"""
Calculate predictions.
:param pandas_df: DataFrame with user-item interactions ``[user_idx, item_idx]``
:param model: trained model
:param items_np: items available for recommendations
:param k: length of recommendation list
:param item_count: total number of items
:return: DataFrame ``[user_idx , item_idx , relevance]``
"""
@staticmethod
@abstractmethod
def _predict_by_user_pairs(
pandas_df: pd.DataFrame,
model: nn.Module,
item_count: int,
) -> pd.DataFrame:
"""
Get relevance for provided pairs
:param pandas_df: DataFrame with rated items and items that need prediction
``[user_idx, item_idx_history, item_idx_to_pred]``
:param model: trained model
:param item_count: total number of items
:return: DataFrame ``[user_idx , item_idx , relevance]``
"""
def load_model(self, path: str) -> None:
"""
Load model from file
:param path: path to model
:return:
"""
self.logger.debug("-- Loading model from file")
self.model.load_state_dict(torch.load(path))
def _save_model(self, path: str) -> None:
torch.save(self.model.state_dict(), path) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/base_torch_rec.py | 0.940216 | 0.368661 | base_torch_rec.py | pypi |
import tqdm
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, Optional
import numpy as np
import pandas as pd
import scipy.sparse as sp
import torch
from pandas import DataFrame
from pyspark.sql import functions as sf
from pytorch_ranger import Ranger
from torch import nn
from replay.constants import REC_SCHEMA
from replay.models.base_torch_rec import Recommender
def to_np(tensor: torch.Tensor) -> np.array:
"""Converts torch.Tensor to numpy."""
return tensor.detach().cpu().numpy()
class ReplayBuffer:
"""
Stores transitions for training RL model.
Usually transition is (state, action, reward, next_state).
In this implementation we compute state using embedding of user
and embeddings of `memory_size` latest relevant items.
Thereby in this ReplayBuffer we store (user, memory) instead of state.
"""
# pylint: disable=too-many-arguments
def __init__(self, capacity: int = 1000000, prob_alpha: float = 0.6):
self.prob_alpha = prob_alpha
self.capacity = capacity
self.buffer = {
"user": [],
"memory": [],
"action": [],
"reward": [],
"next_user": [],
"next_memory": [],
"done": [],
}
self.pos = 0
self.priorities = np.zeros((capacity,), dtype=np.float32)
def push(self, user, memory, action, reward, next_user, next_memory, done):
"""Add transition to buffer."""
max_priority = (
self.priorities.max() if len(self.buffer["user"]) > 0 else 1.0
)
if len(self.buffer) < self.capacity:
self.buffer["user"].append(user)
self.buffer["memory"].append(memory)
self.buffer["action"].append(action)
self.buffer["reward"].append(reward)
self.buffer["next_user"].append(next_user)
self.buffer["next_memory"].append(next_memory)
self.buffer["done"].append(done)
else:
self.buffer["user"][self.pos] = user
self.buffer["memory"][self.pos] = memory
self.buffer["action"][self.pos] = action
self.buffer["reward"][self.pos] = reward
self.buffer["next_user"][self.pos] = next_user
self.buffer["next_memory"][self.pos] = next_memory
self.buffer["done"][self.pos] = done
self.priorities[self.pos] = max_priority
self.pos = (self.pos + 1) % self.capacity
# pylint: disable=too-many-locals
def sample(self, batch_size, beta=0.4):
"""Sample transition from buffer."""
current_buffer_len = len(self.buffer["user"])
if current_buffer_len == self.capacity:
priorities = self.priorities
else:
priorities = self.priorities[: self.pos]
probs = priorities ** self.prob_alpha
probs /= probs.sum()
indices = np.random.choice(current_buffer_len, batch_size, p=probs)
weights = (current_buffer_len * probs[indices]) ** (-beta)
weights /= weights.max()
weights = np.array(weights, dtype=np.float32)
return {
"user": np.concatenate([self.buffer["user"][i] for i in indices]),
"memory": np.concatenate(
[self.buffer["memory"][i] for i in indices]
),
"action": np.array([self.buffer["action"][i] for i in indices]),
"reward": np.array([self.buffer["reward"][i] for i in indices]),
"next_user": np.concatenate(
[self.buffer["next_user"][i] for i in indices]
),
"next_memory": np.concatenate(
[self.buffer["next_memory"][i] for i in indices]
),
"done": np.array([self.buffer["done"][i] for i in indices]),
}
def __len__(self):
return len(self.buffer["user"])
# pylint: disable=too-many-instance-attributes,too-many-arguments,not-callable
class OUNoise:
"""https://github.com/vitchyr/rlkit/blob/master/rlkit/exploration_strategies/ou_strategy.py"""
def __init__(
self,
action_dim,
theta=0.15,
max_sigma=0.4,
min_sigma=0.4,
noise_type="ou",
decay_period=10,
):
self.theta = theta
self.sigma = max_sigma
self.max_sigma = max_sigma
self.min_sigma = min_sigma
self.decay_period = decay_period
self.action_dim = action_dim
self.noise_type = noise_type
self.state = np.zeros(action_dim)
def reset(self):
"""Fill state with zeros."""
self.state = np.zeros(self.action_dim)
def evolve_state(self):
"""Perform OU discrete approximation step"""
x = self.state
d_x = -self.theta * x + self.sigma * np.random.randn(self.action_dim)
self.state = x + d_x
return self.state
def get_action(self, action, step=0):
"""Get state after applying noise."""
action = to_np(action)
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(
1.0, step / self.decay_period
)
if self.noise_type == "ou":
ou_state = self.evolve_state()
return torch.tensor([action + ou_state]).float()
elif self.noise_type == "gauss":
return torch.tensor(
[self.sigma * np.random.randn(self.action_dim)]
).float()
else:
raise ValueError("noise_type must be one of ['ou', 'gauss']")
class ActorDRR(nn.Module):
"""
DDPG Actor model (based on `DRR
<https://arxiv.org/pdf/1802.05814.pdf>`_).
"""
def __init__(
self, user_num, item_num, embedding_dim, hidden_dim, memory_size
):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(embedding_dim * 3, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, embedding_dim),
)
self.state_repr = StateReprModule(
user_num, item_num, embedding_dim, memory_size
)
self.initialize()
self.environment = Env(item_num, user_num, memory_size)
def initialize(self):
"""weight init"""
for layer in self.layers:
if isinstance(layer, nn.Linear):
nn.init.kaiming_uniform_(layer.weight)
def forward(self, user, memory):
"""
:param user: user batch
:param memory: memory batch
:return: output, vector of the size `embedding_dim`
"""
state = self.state_repr(user, memory)
return self.layers(state)
# pylint: disable=not-callable
def get_action(self, action_emb, items, return_scores=False):
"""
:param action_emb: output of the .forward()
:param items: items batch
:param return_scores: whether to return scores of items
:return: output, prediction (and scores if return_scores)
"""
items = torch.tensor(items).long()
scores = torch.bmm(
self.state_repr.item_embeddings(items).unsqueeze(0),
action_emb.T.unsqueeze(0),
).squeeze(0)
if return_scores:
return scores, torch.gather(items, 0, scores.argmax(0))
else:
return torch.gather(items, 0, scores.argmax(0))
class CriticDRR(nn.Module):
"""
DDPG Critic model (based on `DRR
<https://arxiv.org/pdf/1802.05814.pdf>`_).
"""
def __init__(self, state_repr_dim, action_emb_dim, hidden_dim):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(state_repr_dim + action_emb_dim, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1),
)
self.initialize()
def initialize(self):
"""weight init"""
for layer in self.layers:
if isinstance(layer, nn.Linear):
nn.init.kaiming_uniform_(layer.weight)
def forward(self, state, action):
"""
:param state: state batch
:param action: action batch
:return: x, Q values for given states and actions
"""
x = torch.cat([state, action], 1)
x = self.layers(x)
return x
# pylint: disable=too-many-instance-attributes, not-callable
class Env:
"""
RL environment for recommender systems.
Keep users' latest relevant items (memory).
"""
matrix: np.array
related_items: np.array
nonrelated_items: np.array
available_items: list
user_id: int
num_rele: int
def __init__(self, item_num, user_num, memory_size):
"""
Initialize memory as ['item_num'] * 'memory_size' for each user.
'item_num' is a padding index in StateReprModule.
It will result in zero embeddings.
:param item_num: number of items
:param user_num: number of users
:param memory_size: maximum number of items in memory
:param memory: np.array with users' latest relevant items
:param matrix: sparse matrix with users-item ratings
:param user_id: users_id number
:param related_items: relevant items for user_id
:param nonrelated_items: non-relevant items for user_id
:param num_rele: number of related_items
:param available_items: non-seen items
"""
self.item_count = item_num
self.user_count = user_num
self.memory_size = memory_size
self.memory = np.ones([user_num, memory_size]) * item_num
def update_env(self, matrix=None, item_count=None):
"""Update some of Env attributes."""
if item_count is not None:
self.item_count = item_count
if matrix is not None:
self.matrix = matrix.copy()
def reset(self, user_id):
"""
:param user_id: user_id number
:return: user, memory
"""
self.user_id = user_id
self.related_items = np.argwhere(self.matrix[self.user_id] > 0)[:, 1]
self.num_rele = len(self.related_items)
self.nonrelated_items = np.random.choice(
list(set(range(self.item_count)) - set(self.related_items)),
self.num_rele,
)
self.available_items = list(np.zeros(self.num_rele * 2))
self.available_items[::2] = self.related_items
self.available_items[1::2] = self.nonrelated_items
return torch.tensor([self.user_id]), torch.tensor(
self.memory[[self.user_id], :]
)
def step(self, action, action_emb=None, buffer=None):
"""Execute step and return (user, memory) for new state"""
initial_user = self.user_id
initial_memory = self.memory[[initial_user], :]
reward = float(to_np(action[0]) in self.related_items)
if reward:
self.memory[self.user_id] = list(self.memory[self.user_id][1:]) + [
action
]
self.available_items.remove(to_np(action[0]))
if buffer is not None:
buffer.push(
np.array([initial_user]),
np.array(initial_memory),
to_np(action_emb[0]),
np.array([reward]),
np.array([self.user_id]),
self.memory[[self.user_id], :],
np.array([reward]),
)
return (
torch.tensor([self.user_id]),
torch.tensor(self.memory[[self.user_id], :]),
reward,
0,
)
class StateReprModule(nn.Module):
"""
Compute state for RL environment. Based on `DRR paper
<https://arxiv.org/pdf/1810.12027.pdf>`_
Computes State is a concatenation of user embedding,
weighted average pooling of `memory_size` latest relevant items
and their pairwise product.
"""
def __init__(
self,
user_num,
item_num,
embedding_dim,
memory_size,
):
super().__init__()
self.user_embeddings = nn.Embedding(user_num, embedding_dim)
self.item_embeddings = nn.Embedding(
item_num + 1, embedding_dim, padding_idx=int(item_num)
)
self.drr_ave = torch.nn.Conv1d(
in_channels=memory_size, out_channels=1, kernel_size=1
)
self.initialize()
def initialize(self):
"""weight init"""
nn.init.normal_(self.user_embeddings.weight, std=0.01)
self.item_embeddings.weight.data[-1].zero_()
nn.init.normal_(self.item_embeddings.weight, std=0.01)
nn.init.uniform_(self.drr_ave.weight)
self.drr_ave.bias.data.zero_()
def forward(self, user, memory):
"""
:param user: user batch
:param memory: memory batch
:return: vector of dimension 3 * embedding_dim
"""
user_embedding = self.user_embeddings(user.long())
item_embeddings = self.item_embeddings(memory.long())
drr_ave = self.drr_ave(item_embeddings).squeeze(1)
return torch.cat(
(user_embedding, user_embedding * drr_ave, drr_ave), 1
)
# pylint: disable=too-many-arguments
class DDPG(Recommender):
"""
`Deep Deterministic Policy Gradient
<https://arxiv.org/pdf/1810.12027.pdf>`_
This implementation enhanced by more advanced noise strategy.
"""
batch_size: int = 512
embedding_dim: int = 8
hidden_dim: int = 16
value_lr: float = 1e-5
value_decay: float = 1e-5
policy_lr: float = 1e-5
policy_decay: float = 1e-6
gamma: float = 0.8
memory_size: int = 5
min_value: int = -10
max_value: int = 10
buffer_size: int = 1000000
_search_space = {
"noise_sigma": {"type": "uniform", "args": [0.1, 0.6]},
"noise_theta": {"type": "uniform", "args": [0.05, 0.15]},
}
checkpoint_step: int = 10000
replay_buffer: ReplayBuffer
ou_noise: OUNoise
model: ActorDRR
target_model: ActorDRR
value_net: CriticDRR
target_value_net: CriticDRR
policy_optimizer: Ranger
value_optimizer: Ranger
# pylint: disable=too-many-arguments
def __init__(
self,
noise_sigma: float = 0.2,
noise_theta: float = 0.05,
noise_type: str = "ou",
seed: int = 9,
user_num: int = 10,
item_num: int = 10,
log_dir: str = "logs/tmp",
exact_embeddings_size=True,
):
"""
:param noise_sigma: Ornstein-Uhlenbeck noise sigma value
:param noise_theta: Ornstein-Uhlenbeck noise theta value
:param noise_type: type of action noise, one of ["ou", "gauss"]
:param seed: random seed
:param user_num: number of users, specify when using ``exact_embeddings_size``
:param item_num: number of items, specify when using ``exact_embeddings_size``
:param log_dir: dir to save models
:exact_embeddings_size: flag whether to set user/item_num from training log
"""
super().__init__()
np.random.seed(seed)
torch.manual_seed(seed)
self.noise_theta = noise_theta
self.noise_sigma = noise_sigma
self.noise_type = noise_type
self.seed = seed
self.user_num = user_num
self.item_num = item_num
self.log_dir = Path(log_dir)
self.exact_embeddings_size = exact_embeddings_size
@property
def _init_args(self):
return {
"noise_sigma": self.noise_sigma,
"noise_theta": self.noise_theta,
"noise_type": self.noise_type,
"seed": self.seed,
"user_num": self.user_num,
"item_num": self.item_num,
"log_dir": self.log_dir,
"exact_embeddings_size": self.exact_embeddings_size,
}
# pylint: disable=too-many-locals
def _batch_pass(self, batch: dict) -> Dict[str, Any]:
user = torch.FloatTensor(batch["user"])
memory = torch.FloatTensor(batch["memory"])
action = torch.FloatTensor(batch["action"])
reward = torch.FloatTensor(batch["reward"])
next_user = torch.FloatTensor(batch["next_user"])
next_memory = torch.FloatTensor(batch["next_memory"])
done = torch.FloatTensor(batch["done"])
state = self.model.state_repr(user, memory)
policy_loss = self.value_net(state, self.model(user, memory))
policy_loss = -policy_loss.mean()
next_state = self.model.state_repr(next_user, next_memory)
next_action = self.target_model(next_user, next_memory)
target_value = self.target_value_net(next_state, next_action.detach())
expected_value = reward + (1.0 - done) * self.gamma * target_value
expected_value = torch.clamp(
expected_value, self.min_value, self.max_value
)
value = self.value_net(state, action)
value_loss = (value - expected_value.detach()).squeeze(1).pow(2).mean()
return policy_loss, value_loss
@staticmethod
# pylint: disable=not-callable
def _predict_pairs_inner(
model,
user_idx: int,
items_np: np.ndarray,
) -> DataFrame:
with torch.no_grad():
user_batch = torch.LongTensor([user_idx])
action_emb = model(
user_batch,
torch.tensor(model.environment.memory)[
to_np(user_batch).astype(int), :
],
)
user_recs, _ = model.get_action(action_emb, items_np, True)
user_recs = user_recs.squeeze(1)
return pd.DataFrame(
{
"user_idx": user_recs.shape[0] * [user_idx],
"item_idx": items_np,
"relevance": user_recs,
}
)
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
items_consider_in_pred = items.toPandas()["item_idx"].values
model = self.model.cpu()
def grouped_map(pandas_df: pd.DataFrame) -> pd.DataFrame:
return DDPG._predict_pairs_inner(
model=model,
user_idx=pandas_df["user_idx"][0],
items_np=items_consider_in_pred,
)[["user_idx", "item_idx", "relevance"]]
self.logger.debug("Predict started")
recs = (
users.join(log, how="left", on="user_idx")
.select("user_idx", "item_idx")
.groupby("user_idx")
.applyInPandas(grouped_map, REC_SCHEMA)
)
return recs
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
model = self.model.cpu()
def grouped_map(pandas_df: pd.DataFrame) -> pd.DataFrame:
return DDPG._predict_pairs_inner(
model=model,
user_idx=pandas_df["user_idx"][0],
items_np=np.array(pandas_df["item_idx_to_pred"][0]),
)
self.logger.debug("Calculate relevance for user-item pairs")
recs = (
pairs.groupBy("user_idx")
.agg(sf.collect_list("item_idx").alias("item_idx_to_pred"))
.join(
log.select("user_idx").distinct(), on="user_idx", how="inner"
)
.groupby("user_idx")
.applyInPandas(grouped_map, REC_SCHEMA)
)
return recs
@staticmethod
def _get_beta(idx, beta_start=0.4, beta_steps=100000):
return min(1.0, beta_start + idx * (1.0 - beta_start) / beta_steps)
@staticmethod
def _preprocess_log(log):
"""
:param log: pyspark DataFrame
"""
data = log.toPandas()[["user_idx", "item_idx", "relevance"]]
train_data = data.values.tolist()
user_num = data["user_idx"].max() + 1
item_num = data["item_idx"].max() + 1
train_mat = defaultdict(float)
for user, item, rel in train_data:
train_mat[user, item] = rel
train_matrix = sp.dok_matrix((user_num, item_num), dtype=np.float32)
dict.update(train_matrix, train_mat)
appropriate_users = data["user_idx"].unique()
return train_matrix, user_num, item_num, appropriate_users
def _get_batch(self, step: int = 0) -> dict:
beta = self._get_beta(step)
batch = self.replay_buffer.sample(self.batch_size, beta)
return batch
def _run_train_step(self, batch: dict) -> None:
policy_loss, value_loss = self._batch_pass(batch)
self.policy_optimizer.zero_grad()
policy_loss.backward(retain_graph=True)
self.policy_optimizer.step()
self.value_optimizer.zero_grad()
value_loss.backward()
self.value_optimizer.step()
self._target_update(self.target_value_net, self.value_net)
self._target_update(self.target_model, self.model)
@staticmethod
def _target_update(target_net, net, soft_tau=1e-3):
for target_param, param in zip(
target_net.parameters(), net.parameters()
):
target_param.data.copy_(
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
)
def _init_inner(self):
self.replay_buffer = ReplayBuffer(self.buffer_size)
self.ou_noise = OUNoise(
self.embedding_dim,
theta=self.noise_theta,
max_sigma=self.noise_sigma,
min_sigma=self.noise_sigma,
noise_type=self.noise_type,
)
self.model = ActorDRR(
self.user_num,
self.item_num,
self.embedding_dim,
self.hidden_dim,
self.memory_size,
)
self.target_model = ActorDRR(
self.user_num,
self.item_num,
self.embedding_dim,
self.hidden_dim,
self.memory_size,
)
self.value_net = CriticDRR(
self.embedding_dim * 3, self.embedding_dim, self.hidden_dim
)
self.target_value_net = CriticDRR(
self.embedding_dim * 3, self.embedding_dim, self.hidden_dim
)
self._target_update(self.target_value_net, self.value_net, soft_tau=1)
self._target_update(self.target_model, self.model, soft_tau=1)
self.policy_optimizer = Ranger(
self.model.parameters(),
lr=self.policy_lr,
weight_decay=self.policy_decay,
)
self.value_optimizer = Ranger(
self.value_net.parameters(),
lr=self.value_lr,
weight_decay=self.value_decay,
)
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
train_matrix, user_num, item_num, users = self._preprocess_log(log)
if self.exact_embeddings_size:
self.user_num = user_num
self.item_num = item_num
self._init_inner()
self.model.environment.update_env(matrix=train_matrix)
users = np.random.permutation(users)
self.logger.debug("Training DDPG")
self.train(users)
def train(self, users: np.array) -> None:
"""
Run training loop
:param users: array with users for training
:return:
"""
self.log_dir.mkdir(parents=True, exist_ok=True)
step = 0
for user in tqdm.auto.tqdm(users):
user, memory = self.model.environment.reset(user)
self.ou_noise.reset()
for user_step in range(len(self.model.environment.related_items)):
action_emb = self.model(user, memory)
action_emb = self.ou_noise.get_action(action_emb[0], user_step)
action = self.model.get_action(
action_emb,
self.model.environment.available_items,
)
user, memory, _, _ = self.model.environment.step(
action, action_emb, self.replay_buffer
)
if len(self.replay_buffer) > self.batch_size:
batch = self._get_batch(step)
self._run_train_step(batch)
if step % self.checkpoint_step == 0 and step > 0:
self._save_model(self.log_dir / f"model_{step}.pt")
step += 1
self._save_model(self.log_dir / "model_final.pt")
def _save_model(self, path: str) -> None:
self.logger.debug(
"-- Saving model to file (user_num=%d, item_num=%d)",
self.user_num,
self.item_num,
)
torch.save(
{
"actor": self.model.state_dict(),
"critic": self.value_net.state_dict(),
"memory": self.model.environment.memory,
"policy_optimizer": self.policy_optimizer.state_dict(),
"value_optimizer": self.value_optimizer.state_dict(),
},
path,
)
def _load_model(self, path: str) -> None:
self.logger.debug("-- Loading model from file")
self._init_inner()
checkpoint = torch.load(path)
self.model.load_state_dict(checkpoint["actor"])
self.value_net.load_state_dict(checkpoint["critic"])
self.model.environment.memory = checkpoint["memory"]
self.policy_optimizer.load_state_dict(checkpoint["policy_optimizer"])
self.value_optimizer.load_state_dict(checkpoint["value_optimizer"])
self._target_update(self.target_value_net, self.value_net, soft_tau=1)
self._target_update(self.target_model, self.model, soft_tau=1) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/ddpg.py | 0.937925 | 0.397003 | ddpg.py | pypi |
from typing import List, Optional
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from pyspark.sql import DataFrame
from sklearn.model_selection import train_test_split
from torch import LongTensor, Tensor, nn
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader, TensorDataset
from replay.models.base_torch_rec import TorchRecommender
EMBED_DIM = 128
def xavier_init_(layer: nn.Module):
"""
Xavier initialization
:param layer: net layer
"""
if isinstance(layer, (nn.Embedding, nn.Linear)):
nn.init.xavier_normal_(layer.weight.data)
if isinstance(layer, nn.Linear):
layer.bias.data.normal_(0.0, 0.001)
class GMF(nn.Module):
"""Generalized Matrix Factorization"""
def __init__(self, user_count: int, item_count: int, embedding_dim: int):
"""
:param user_count: number of users
:param item_count: number of items
:param embedding_dim: embedding size
"""
super().__init__()
self.user_embedding = nn.Embedding(
num_embeddings=user_count, embedding_dim=embedding_dim
)
self.item_embedding = nn.Embedding(
num_embeddings=item_count, embedding_dim=embedding_dim
)
self.item_biases = nn.Embedding(
num_embeddings=item_count, embedding_dim=1
)
self.user_biases = nn.Embedding(
num_embeddings=user_count, embedding_dim=1
)
xavier_init_(self.user_embedding)
xavier_init_(self.item_embedding)
self.user_biases.weight.data.zero_()
self.item_biases.weight.data.zero_()
# pylint: disable=arguments-differ
def forward(self, user: Tensor, item: Tensor) -> Tensor: # type: ignore
"""
:param user: user id batch
:param item: item id batch
:return: model output
"""
user_emb = self.user_embedding(user) + self.user_biases(user)
item_emb = self.item_embedding(item) + self.item_biases(item)
element_product = torch.mul(user_emb, item_emb)
return element_product
class MLP(nn.Module):
"""Multi-Layer Perceptron"""
def __init__(
self,
user_count: int,
item_count: int,
embedding_dim: int,
hidden_dims: Optional[List[int]] = None,
):
"""
:param user_count: number of users
:param item_count: number of items
:param embedding_dim: embedding size
:param hidden_dims: list of hidden dimension sizes
"""
super().__init__()
self.user_embedding = nn.Embedding(
num_embeddings=user_count, embedding_dim=embedding_dim
)
self.item_embedding = nn.Embedding(
num_embeddings=item_count, embedding_dim=embedding_dim
)
self.item_biases = nn.Embedding(
num_embeddings=item_count, embedding_dim=1
)
self.user_biases = nn.Embedding(
num_embeddings=user_count, embedding_dim=1
)
if hidden_dims:
full_hidden_dims = [2 * embedding_dim] + hidden_dims
self.hidden_layers = nn.ModuleList(
[
nn.Linear(d_in, d_out)
for d_in, d_out in zip(
full_hidden_dims[:-1], full_hidden_dims[1:]
)
]
)
else:
self.hidden_layers = nn.ModuleList()
self.activation = nn.ReLU()
xavier_init_(self.user_embedding)
xavier_init_(self.item_embedding)
self.user_biases.weight.data.zero_()
self.item_biases.weight.data.zero_()
for layer in self.hidden_layers:
xavier_init_(layer)
# pylint: disable=arguments-differ
def forward(self, user: Tensor, item: Tensor) -> Tensor: # type: ignore
"""
:param user: user id batch
:param item: item id batch
:return: output
"""
user_emb = self.user_embedding(user) + self.user_biases(user)
item_emb = self.item_embedding(item) + self.item_biases(item)
hidden = torch.cat([user_emb, item_emb], dim=-1)
for layer in self.hidden_layers:
hidden = layer(hidden)
hidden = self.activation(hidden)
return hidden
class NMF(nn.Module):
"""NMF = MLP + GMF"""
# pylint: disable=too-many-arguments
def __init__(
self,
user_count: int,
item_count: int,
embedding_gmf_dim: Optional[int] = None,
embedding_mlp_dim: Optional[int] = None,
hidden_mlp_dims: Optional[List[int]] = None,
):
"""
:param user_count: number of users
:param item_count: number of items
:param embedding_gmf_dim: embedding size for gmf
:param embedding_mlp_dim: embedding size for mlp
:param hidden_mlp_dims: list of hidden dimension sizes for mlp
"""
self.gmf: Optional[GMF] = None
self.mlp: Optional[MLP] = None
super().__init__()
merged_dim = 0
if embedding_gmf_dim:
self.gmf = GMF(user_count, item_count, embedding_gmf_dim)
merged_dim += embedding_gmf_dim
if embedding_mlp_dim:
self.mlp = MLP(
user_count, item_count, embedding_mlp_dim, hidden_mlp_dims
)
merged_dim += (
hidden_mlp_dims[-1]
if hidden_mlp_dims
else 2 * embedding_mlp_dim
)
self.last_layer = nn.Linear(merged_dim, 1)
xavier_init_(self.last_layer)
# pylint: disable=arguments-differ
def forward(self, user: Tensor, item: Tensor) -> Tensor: # type: ignore
"""
:param user: user id batch
:param item: item id batch
:return: output
"""
batch_size = len(user)
if self.gmf:
gmf_vector = self.gmf(user, item)
else:
gmf_vector = torch.zeros(batch_size, 0).to(user.device)
if self.mlp:
mlp_vector = self.mlp(user, item)
else:
mlp_vector = torch.zeros(batch_size, 0).to(user.device)
merged_vector = torch.cat([gmf_vector, mlp_vector], dim=1)
merged_vector = self.last_layer(merged_vector).squeeze(dim=1)
merged_vector = torch.sigmoid(merged_vector)
return merged_vector
# pylint: disable=too-many-instance-attributes
class NeuroMF(TorchRecommender):
"""
Neural Matrix Factorization model (NeuMF, NCF).
In this implementation MLP and GMF modules are optional.
"""
num_workers: int = 0
batch_size_users: int = 100000
patience: int = 3
n_saved: int = 2
valid_split_size: float = 0.1
seed: int = 42
_search_space = {
"embedding_gmf_dim": {"type": "int", "args": [EMBED_DIM, EMBED_DIM]},
"embedding_mlp_dim": {"type": "int", "args": [EMBED_DIM, EMBED_DIM]},
"learning_rate": {"type": "loguniform", "args": [0.0001, 0.5]},
"l2_reg": {"type": "loguniform", "args": [1e-9, 5]},
"count_negative_sample": {"type": "int", "args": [1, 20]},
"factor": {"type": "uniform", "args": [0.2, 0.2]},
"patience": {"type": "int", "args": [3, 3]},
}
# pylint: disable=too-many-arguments
def __init__(
self,
learning_rate: float = 0.05,
epochs: int = 20,
embedding_gmf_dim: Optional[int] = None,
embedding_mlp_dim: Optional[int] = None,
hidden_mlp_dims: Optional[List[int]] = None,
l2_reg: float = 0,
count_negative_sample: int = 1,
factor: float = 0.2,
patience: int = 3,
):
"""
MLP or GMF model can be ignored if
its embedding size (embedding_mlp_dim or embedding_gmf_dim) is set to ``None``.
Default variant is MLP + GMF with embedding size 128.
:param learning_rate: learning rate
:param epochs: number of epochs to train model
:param embedding_gmf_dim: embedding size for gmf
:param embedding_mlp_dim: embedding size for mlp
:param hidden_mlp_dims: list of hidden dimension sized for mlp
:param l2_reg: l2 regularization term
:param count_negative_sample: number of negative samples to use
:param factor: ReduceLROnPlateau reducing factor. new_lr = lr * factor
:param patience: number of non-improved epochs before reducing lr
"""
super().__init__()
if not embedding_gmf_dim and not embedding_mlp_dim:
embedding_gmf_dim, embedding_mlp_dim = EMBED_DIM, EMBED_DIM
if (embedding_gmf_dim is None or embedding_gmf_dim < 0) and (
embedding_mlp_dim is None or embedding_mlp_dim < 0
):
raise ValueError(
"embedding_gmf_dim and embedding_mlp_dim must be positive"
)
self.learning_rate = learning_rate
self.epochs = epochs
self.embedding_gmf_dim = embedding_gmf_dim
self.embedding_mlp_dim = embedding_mlp_dim
self.hidden_mlp_dims = hidden_mlp_dims
self.l2_reg = l2_reg
self.count_negative_sample = count_negative_sample
self.factor = factor
self.patience = patience
@property
def _init_args(self):
return {
"learning_rate": self.learning_rate,
"epochs": self.epochs,
"embedding_gmf_dim": self.embedding_gmf_dim,
"embedding_mlp_dim": self.embedding_mlp_dim,
"hidden_mlp_dims": self.hidden_mlp_dims,
"l2_reg": self.l2_reg,
"count_negative_sample": self.count_negative_sample,
"factor": self.factor,
"patience": self.patience,
}
def _data_loader(
self, data: pd.DataFrame, shuffle: bool = True
) -> DataLoader:
user_batch = LongTensor(data["user_idx"].values) # type: ignore
item_batch = LongTensor(data["item_idx"].values) # type: ignore
dataset = TensorDataset(user_batch, item_batch)
loader = DataLoader(
dataset,
batch_size=self.batch_size_users,
shuffle=shuffle,
num_workers=self.num_workers,
)
return loader
def _get_neg_batch(self, batch: Tensor) -> Tensor:
return torch.from_numpy(
np.random.choice(
self._fit_items_np, batch.shape[0] * self.count_negative_sample
)
)
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self.logger.debug("Create DataLoaders")
tensor_data = log.select("user_idx", "item_idx").toPandas()
train_tensor_data, valid_tensor_data = train_test_split(
tensor_data,
test_size=self.valid_split_size,
random_state=self.seed,
)
train_data_loader = self._data_loader(train_tensor_data)
valid_data_loader = self._data_loader(valid_tensor_data)
# pylint: disable=attribute-defined-outside-init
self._fit_items_np = self.fit_items.toPandas().to_numpy().ravel()
self.logger.debug("Training NeuroMF")
self.model = NMF(
user_count=self._user_dim,
item_count=self._item_dim,
embedding_gmf_dim=self.embedding_gmf_dim,
embedding_mlp_dim=self.embedding_mlp_dim,
hidden_mlp_dims=self.hidden_mlp_dims,
).to(self.device)
optimizer = Adam(
self.model.parameters(),
lr=self.learning_rate,
weight_decay=self.l2_reg / self.batch_size_users,
)
lr_scheduler = ReduceLROnPlateau(
optimizer, factor=self.factor, patience=self.patience
)
self.train(
train_data_loader,
valid_data_loader,
optimizer,
lr_scheduler,
self.epochs,
"neuromf",
)
del self._fit_items_np
# pylint: disable=arguments-differ
@staticmethod
def _loss(y_pred, y_true):
return F.binary_cross_entropy(y_pred, y_true).mean()
def _batch_pass(self, batch, model):
user_batch, pos_item_batch = batch
neg_item_batch = self._get_neg_batch(user_batch)
pos_relevance = model(
user_batch.to(self.device), pos_item_batch.to(self.device)
)
neg_relevance = model(
user_batch.repeat([self.count_negative_sample]).to(self.device),
neg_item_batch.to(self.device),
)
y_pred = torch.cat((pos_relevance, neg_relevance), 0)
y_true_pos = torch.ones_like(pos_item_batch).to(self.device)
y_true_neg = torch.zeros_like(neg_item_batch).to(self.device)
y_true = torch.cat((y_true_pos, y_true_neg), 0).float()
return {"y_pred": y_pred, "y_true": y_true}
@staticmethod
def _predict_pairs_inner(
model: nn.Module,
user_idx: int,
items_np: np.ndarray,
cnt: Optional[int] = None,
) -> DataFrame:
model.eval()
with torch.no_grad():
user_batch = LongTensor([user_idx] * len(items_np))
item_batch = LongTensor(items_np)
user_recs = torch.reshape(
model(user_batch, item_batch).detach(),
[
-1,
],
)
if cnt is not None:
best_item_idx = (
torch.argsort(user_recs, descending=True)[:cnt]
).numpy()
user_recs = user_recs[best_item_idx]
items_np = items_np[best_item_idx]
return pd.DataFrame(
{
"user_idx": user_recs.shape[0] * [user_idx],
"item_idx": items_np,
"relevance": user_recs,
}
)
@staticmethod
def _predict_by_user(
pandas_df: pd.DataFrame,
model: nn.Module,
items_np: np.ndarray,
k: int,
item_count: int,
) -> pd.DataFrame:
return NeuroMF._predict_pairs_inner(
model=model,
user_idx=pandas_df["user_idx"][0],
items_np=items_np,
cnt=min(len(pandas_df) + k, len(items_np)),
)
@staticmethod
def _predict_by_user_pairs(
pandas_df: pd.DataFrame, model: nn.Module, item_count: int
) -> pd.DataFrame:
return NeuroMF._predict_pairs_inner(
model=model,
user_idx=pandas_df["user_idx"][0],
items_np=np.array(pandas_df["item_idx_to_pred"][0]),
cnt=None,
)
def _load_model(self, path: str):
self.model = NMF(
user_count=self._user_dim,
item_count=self._item_dim,
embedding_gmf_dim=self.embedding_gmf_dim,
embedding_mlp_dim=self.embedding_mlp_dim,
hidden_mlp_dims=self.hidden_mlp_dims,
).to(self.device)
self.model.load_state_dict(torch.load(path))
self.model.eval() | /replay_rec-0.11.0-py3-none-any.whl/replay/models/neuromf.py | 0.963049 | 0.455017 | neuromf.py | pypi |
import logging
from abc import ABC, abstractmethod
from copy import deepcopy
from os.path import join
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
Union,
Sequence,
Set,
Tuple,
)
import numpy as np
import pandas as pd
from numpy.random import default_rng
from optuna import create_study
from optuna.samplers import TPESampler
from pyspark.sql import DataFrame, Window
from pyspark.sql import functions as sf
from pyspark.sql.column import Column
from replay.constants import REC_SCHEMA
from replay.metrics import Metric, NDCG
from replay.optuna_objective import SplitData, MainObjective
from replay.session_handler import State
from replay.utils import (
cache_temp_view,
convert2spark,
cosine_similarity,
drop_temp_view,
filter_cold,
get_unique_entities,
get_top_k,
get_top_k_recs,
return_recs,
vector_euclidean_distance_similarity,
vector_dot, save_picklable_to_parquet, load_pickled_from_parquet,
)
# pylint: disable=too-few-public-methods
class IsSavable(ABC):
"""
Common methods and attributes for saving and loading RePlay models
"""
@property
@abstractmethod
def _init_args(self):
"""
Dictionary of the model attributes passed during model initialization.
Used for model saving and loading
"""
@property
def _dataframes(self):
"""
Dictionary of the model dataframes required for inference.
Used for model saving and loading
"""
return {}
def _save_model(self, path: str):
pass
def _load_model(self, path: str):
pass
class RecommenderCommons:
"""
Common methods and attributes of RePlay models for caching, setting parameters and logging
"""
_logger: Optional[logging.Logger] = None
cached_dfs: Optional[Set] = None
def set_params(self, **params: Dict[str, Any]) -> None:
"""
Set model parameters
:param params: dictionary param name - param value
:return:
"""
for param, value in params.items():
setattr(self, param, value)
self._clear_cache()
def _clear_cache(self):
"""
Clear spark cache
"""
def __str__(self):
return type(self).__name__
@property
def logger(self) -> logging.Logger:
"""
:returns: get library logger
"""
if self._logger is None:
self._logger = logging.getLogger("replay")
return self._logger
def _cache_model_temp_view(self, df: DataFrame, df_name: str) -> None:
"""
Create Spark SQL temporary view for df, cache it and add temp view name to self.cached_dfs.
Temp view name is : "id_<python object id>_model_<RePlay model name>_<df_name>"
"""
full_name = f"id_{id(self)}_model_{str(self)}_{df_name}"
cache_temp_view(df, full_name)
if self.cached_dfs is None:
self.cached_dfs = set()
self.cached_dfs.add(full_name)
def _clear_model_temp_view(self, df_name: str) -> None:
"""
Uncache and drop Spark SQL temporary view and remove from self.cached_dfs
Temp view to replace will be constructed as
"id_<python object id>_model_<RePlay model name>_<df_name>"
"""
full_name = f"id_{id(self)}_model_{str(self)}_{df_name}"
drop_temp_view(full_name)
if self.cached_dfs is not None:
self.cached_dfs.discard(full_name)
# pylint: disable=too-many-instance-attributes
class BaseRecommender(RecommenderCommons, IsSavable, ABC):
"""Base recommender"""
model: Any
can_predict_cold_users: bool = False
can_predict_cold_items: bool = False
_search_space: Optional[
Dict[str, Union[str, Sequence[Union[str, int, float]]]]
] = None
_objective = MainObjective
study = None
fit_users: DataFrame
fit_items: DataFrame
_num_users: int
_num_items: int
_user_dim_size: int
_item_dim_size: int
# pylint: disable=too-many-arguments, too-many-locals, no-member
def optimize(
self,
train: DataFrame,
test: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
param_borders: Optional[Dict[str, List[Any]]] = None,
criterion: Metric = NDCG(),
k: int = 10,
budget: int = 10,
new_study: bool = True,
) -> Optional[Dict[str, Any]]:
"""
Searches the best parameters with optuna.
:param train: train data
:param test: test data
:param user_features: user features
:param item_features: item features
:param param_borders: a dictionary with search borders, where
key is the parameter name and value is the range of possible values
``{param: [low, high]}``. In case of categorical parameters it is
all possible values: ``{cat_param: [cat_1, cat_2, cat_3]}``.
:param criterion: metric to use for optimization
:param k: recommendation list length
:param budget: number of points to try
:param new_study: keep searching with previous study or start a new study
:return: dictionary with best parameters
"""
if self._search_space is None:
self.logger.warning(
"%s has no hyper parameters to optimize", str(self)
)
return None
if self.study is None or new_study:
self.study = create_study(
direction="maximize", sampler=TPESampler()
)
search_space = self._prepare_param_borders(param_borders)
if (
self._init_params_in_search_space(search_space)
and not self._params_tried()
):
self.study.enqueue_trial(self._init_args)
split_data = self._prepare_split_data(
train, test, user_features, item_features
)
objective = self._objective(
search_space=search_space,
split_data=split_data,
recommender=self,
criterion=criterion,
k=k,
)
self.study.optimize(objective, budget)
best_params = self.study.best_params
self.set_params(**best_params)
return best_params
def _init_params_in_search_space(self, search_space):
"""Check if model params are inside search space"""
params = self._init_args # pylint: disable=no-member
outside_search_space = {}
for param, value in params.items():
if param not in search_space:
continue
borders = search_space[param]["args"]
param_type = search_space[param]["type"]
extra_category = (
param_type == "categorical" and value not in borders
)
param_out_of_bounds = param_type != "categorical" and (
value < borders[0] or value > borders[1]
)
if extra_category or param_out_of_bounds:
outside_search_space[param] = {
"borders": borders,
"value": value,
}
if outside_search_space:
self.logger.debug(
"Model is initialized with parameters outside the search space: %s."
"Initial parameters will not be evaluated during optimization."
"Change search spare with 'param_borders' argument if necessary",
outside_search_space,
)
return False
else:
return True
def _prepare_param_borders(
self, param_borders: Optional[Dict[str, List[Any]]] = None
) -> Dict[str, Dict[str, List[Any]]]:
"""
Checks if param borders are valid and convert them to a search_space format
:param param_borders: a dictionary with search grid, where
key is the parameter name and value is the range of possible values
``{param: [low, high]}``.
:return:
"""
search_space = deepcopy(self._search_space)
if param_borders is None:
return search_space
for param, borders in param_borders.items():
self._check_borders(param, borders)
search_space[param]["args"] = borders
# Optuna trials should contain all searchable parameters
# to be able to correctly return best params
# If used didn't specify some params to be tested optuna still needs to suggest them
# This part makes sure this suggestion will be constant
args = self._init_args
missing_borders = {
param: args[param]
for param in search_space
if param not in param_borders
}
for param, value in missing_borders.items():
if search_space[param]["type"] == "categorical":
search_space[param]["args"] = [value]
else:
search_space[param]["args"] = [value, value]
return search_space
def _check_borders(self, param, borders):
"""Raise value error if param borders are not valid"""
if param not in self._search_space:
raise ValueError(
f"Hyper parameter {param} is not defined for {str(self)}"
)
if not isinstance(borders, list):
raise ValueError(f"Parameter {param} borders are not a list")
if (
self._search_space[param]["type"] != "categorical"
and len(borders) != 2
):
raise ValueError(
f"""
Hyper parameter {param} is numerical
but bounds are not in ([lower, upper]) format
"""
)
def _prepare_split_data(
self,
train: DataFrame,
test: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> SplitData:
"""
This method converts data to spark and packs it into a named tuple to pass into optuna.
:param train: train data
:param test: test data
:param user_features: user features
:param item_features: item features
:return: packed PySpark DataFrames
"""
user_features_train, user_features_test = self._train_test_features(
train, test, user_features, "user_idx"
)
item_features_train, item_features_test = self._train_test_features(
train, test, item_features, "item_idx"
)
users = test.select("user_idx").distinct()
items = test.select("item_idx").distinct()
split_data = SplitData(
train,
test,
users,
items,
user_features_train,
user_features_test,
item_features_train,
item_features_test,
)
return split_data
@staticmethod
def _train_test_features(
train: DataFrame,
test: DataFrame,
features: Optional[DataFrame],
column: Union[str, Column],
) -> Tuple[Optional[DataFrame], Optional[DataFrame]]:
"""
split dataframe with features into two dataframes representing
features for train and tests subset entities, defined by `column`
:param train: spark dataframe with the train subset
:param test: spark dataframe with the train subset
:param features: spark dataframe with users'/items' features
:param column: column name to use as a key for join (e.g., user_idx or item_idx)
:return: features for train and test subsets
"""
if features is not None:
features_train = features.join(
train.select(column).distinct(), on=column
)
features_test = features.join(
test.select(column).distinct(), on=column
)
else:
features_train = None
features_test = None
return features_train, features_test
def _fit_wrap(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
"""
Wrapper for fit to allow for fewer arguments in a model.
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param user_features: user features
``[user_idx, timestamp]`` + feature columns
:param item_features: item features
``[item_idx, timestamp]`` + feature columns
:return:
"""
self.logger.debug("Starting fit %s", type(self).__name__)
if user_features is None:
users = log.select("user_idx").distinct()
else:
users = (
log.select("user_idx")
.union(user_features.select("user_idx"))
.distinct()
)
if item_features is None:
items = log.select("item_idx").distinct()
else:
items = (
log.select("item_idx")
.union(item_features.select("item_idx"))
.distinct()
)
self.fit_users = sf.broadcast(users)
self.fit_items = sf.broadcast(items)
self._num_users = self.fit_users.count()
self._num_items = self.fit_items.count()
self._user_dim_size = (
self.fit_users.agg({"user_idx": "max"}).collect()[0][0] + 1
)
self._item_dim_size = (
self.fit_items.agg({"item_idx": "max"}).collect()[0][0] + 1
)
self._fit(log, user_features, item_features)
@abstractmethod
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
"""
Inner method where model actually fits.
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param user_features: user features
``[user_idx, timestamp]`` + feature columns
:param item_features: item features
``[item_idx, timestamp]`` + feature columns
:return:
"""
def _filter_seen(
self, recs: DataFrame, log: DataFrame, k: int, users: DataFrame
):
"""
Filter seen items (presented in log) out of the users' recommendations.
For each user return from `k` to `k + number of seen by user` recommendations.
"""
users_log = log.join(users, on="user_idx")
self._cache_model_temp_view(users_log, "filter_seen_users_log")
num_seen = users_log.groupBy("user_idx").agg(
sf.count("item_idx").alias("seen_count")
)
self._cache_model_temp_view(num_seen, "filter_seen_num_seen")
# count maximal number of items seen by users
max_seen = 0
if num_seen.count() > 0:
max_seen = num_seen.select(sf.max("seen_count")).collect()[0][0]
# crop recommendations to first k + max_seen items for each user
recs = recs.withColumn(
"temp_rank",
sf.row_number().over(
Window.partitionBy("user_idx").orderBy(
sf.col("relevance").desc()
)
),
).filter(sf.col("temp_rank") <= sf.lit(max_seen + k))
# leave k + number of items seen by user recommendations in recs
recs = (
recs.join(num_seen, on="user_idx", how="left")
.fillna(0)
.filter(sf.col("temp_rank") <= sf.col("seen_count") + sf.lit(k))
.drop("temp_rank", "seen_count")
)
# filter recommendations presented in interactions log
recs = recs.join(
users_log.withColumnRenamed("item_idx", "item")
.withColumnRenamed("user_idx", "user")
.select("user", "item"),
on=(sf.col("user_idx") == sf.col("user"))
& (sf.col("item_idx") == sf.col("item")),
how="anti",
).drop("user", "item")
return recs
# pylint: disable=too-many-arguments
def _predict_wrap(
self,
log: Optional[DataFrame],
k: int,
users: Optional[Union[DataFrame, Iterable]] = None,
items: Optional[Union[DataFrame, Iterable]] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
recs_file_path: Optional[str] = None,
) -> Optional[DataFrame]:
"""
Predict wrapper to allow for fewer parameters in models
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param k: number of recommendations for each user
:param users: users to create recommendations for
dataframe containing ``[user_idx]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_idx]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be ``0``.
:param user_features: user features
``[user_idx , timestamp]`` + feature columns
:param item_features: item features
``[item_idx , timestamp]`` + feature columns
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
self.logger.debug("Starting predict %s", type(self).__name__)
user_data = users or log or user_features or self.fit_users
users = get_unique_entities(user_data, "user_idx")
users, log = self._filter_cold_for_predict(users, log, "user")
item_data = items or self.fit_items
items = get_unique_entities(item_data, "item_idx")
items, log = self._filter_cold_for_predict(items, log, "item")
num_items = items.count()
if num_items < k:
message = f"k = {k} > number of items = {num_items}"
self.logger.debug(message)
recs = self._predict(
log,
k,
users,
items,
user_features,
item_features,
filter_seen_items,
)
if filter_seen_items and log:
recs = self._filter_seen(recs=recs, log=log, users=users, k=k)
recs = get_top_k_recs(recs, k=k).select(
"user_idx", "item_idx", "relevance"
)
output = return_recs(recs, recs_file_path)
self._clear_model_temp_view("filter_seen_users_log")
self._clear_model_temp_view("filter_seen_num_seen")
return output
def _filter_cold_for_predict(
self,
main_df: DataFrame,
log_df: Optional[DataFrame],
entity: str,
suffix: str = "idx",
):
"""
Filter out cold entities (users/items) from the `main_df` and `log_df`
if the model does not predict cold.
Warn if cold entities are present in the `main_df`.
"""
if getattr(self, f"can_predict_cold_{entity}s"):
return main_df, log_df
fit_entities = getattr(self, f"fit_{entity}s")
num_new, main_df = filter_cold(
main_df, fit_entities, col_name=f"{entity}_{suffix}"
)
if num_new > 0:
self.logger.info(
"%s model can't predict cold %ss, they will be ignored",
self,
entity,
)
_, log_df = filter_cold(
log_df, fit_entities, col_name=f"{entity}_{suffix}"
)
return main_df, log_df
# pylint: disable=too-many-arguments
@abstractmethod
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
"""
Inner method where model actually predicts.
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param k: number of recommendations for each user
:param users: users to create recommendations for
dataframe containing ``[user_idx]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_idx]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be ``0``.
:param user_features: user features
``[user_idx , timestamp]`` + feature columns
:param item_features: item features
``[item_idx , timestamp]`` + feature columns
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:return: recommendation dataframe
``[user_idx, item_idx, relevance]``
"""
def _get_fit_counts(self, entity: str) -> int:
if not hasattr(self, f"_num_{entity}s"):
setattr(
self,
f"_num_{entity}s",
getattr(self, f"fit_{entity}s").count(),
)
return getattr(self, f"_num_{entity}s")
@property
def users_count(self) -> int:
"""
:returns: number of users the model was trained on
"""
return self._get_fit_counts("user")
@property
def items_count(self) -> int:
"""
:returns: number of items the model was trained on
"""
return self._get_fit_counts("item")
def _get_fit_dims(self, entity: str) -> int:
if not hasattr(self, f"_{entity}_dim_size"):
setattr(
self,
f"_{entity}_dim_size",
getattr(self, f"fit_{entity}s")
.agg({f"{entity}_idx": "max"})
.collect()[0][0]
+ 1,
)
return getattr(self, f"_{entity}_dim_size")
@property
def _user_dim(self) -> int:
"""
:returns: dimension of users matrix (maximal user idx + 1)
"""
return self._get_fit_dims("user")
@property
def _item_dim(self) -> int:
"""
:returns: dimension of items matrix (maximal item idx + 1)
"""
return self._get_fit_dims("item")
def _fit_predict(
self,
log: DataFrame,
k: int,
users: Optional[Union[DataFrame, Iterable]] = None,
items: Optional[Union[DataFrame, Iterable]] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
recs_file_path: Optional[str] = None,
) -> Optional[DataFrame]:
self._fit_wrap(log, user_features, item_features)
return self._predict_wrap(
log,
k,
users,
items,
user_features,
item_features,
filter_seen_items,
recs_file_path=recs_file_path,
)
def _predict_pairs_wrap(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
recs_file_path: Optional[str] = None,
k: Optional[int] = None,
) -> Optional[DataFrame]:
"""
This method
1) converts data to spark
2) removes cold users and items if model does not predict them
3) calls inner _predict_pairs method of a model
:param pairs: user-item pairs to get relevance for,
dataframe containing``[user_idx, item_idx]``.
:param log: train data
``[user_idx, item_idx, timestamp, relevance]``.
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:return: cached dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
log, user_features, item_features, pairs = [
convert2spark(df)
for df in [log, user_features, item_features, pairs]
]
if sorted(pairs.columns) != ["item_idx", "user_idx"]:
raise ValueError(
"pairs must be a dataframe with columns strictly [user_idx, item_idx]"
)
pairs, log = self._filter_cold_for_predict(pairs, log, "user")
pairs, log = self._filter_cold_for_predict(pairs, log, "item")
pred = self._predict_pairs(
pairs=pairs,
log=log,
user_features=user_features,
item_features=item_features,
)
if k:
pred = get_top_k(
dataframe=pred,
partition_by_col=sf.col("user_idx"),
order_by_col=[
sf.col("relevance").desc(),
],
k=k,
)
if recs_file_path is not None:
pred.write.parquet(path=recs_file_path, mode="overwrite")
return None
pred.cache().count()
return pred
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
"""
Fallback method to use in case ``_predict_pairs`` is not implemented.
Simply joins ``predict`` with given ``pairs``.
:param pairs: user-item pairs to get relevance for,
dataframe containing``[user_idx, item_idx]``.
:param log: train data
``[user_idx, item_idx, timestamp, relevance]``.
"""
message = (
"native predict_pairs is not implemented for this model. "
"Falling back to usual predict method and filtering the results."
)
self.logger.warning(message)
users = pairs.select("user_idx").distinct()
items = pairs.select("item_idx").distinct()
k = items.count()
pred = self._predict(
log=log,
k=k,
users=users,
items=items,
user_features=user_features,
item_features=item_features,
filter_seen_items=False,
)
pred = pred.join(
pairs.select("user_idx", "item_idx"),
on=["user_idx", "item_idx"],
how="inner",
)
return pred
def _get_features_wrap(
self, ids: DataFrame, features: Optional[DataFrame]
) -> Optional[Tuple[DataFrame, int]]:
if "user_idx" not in ids.columns and "item_idx" not in ids.columns:
raise ValueError("user_idx or item_idx missing")
vectors, rank = self._get_features(ids, features)
return vectors, rank
# pylint: disable=unused-argument
def _get_features(
self, ids: DataFrame, features: Optional[DataFrame]
) -> Tuple[Optional[DataFrame], Optional[int]]:
"""
Get embeddings from model
:param ids: id ids to get embeddings for Spark DataFrame containing user_idx or item_idx
:param features: user or item features
:return: DataFrame with biases and embeddings, and vector size
"""
self.logger.info(
"get_features method is not defined for the model %s. Features will not be returned.",
str(self),
)
return None, None
def _get_nearest_items_wrap(
self,
items: Union[DataFrame, Iterable],
k: int,
metric: Optional[str] = "cosine_similarity",
candidates: Optional[Union[DataFrame, Iterable]] = None,
) -> Optional[DataFrame]:
"""
Convert indexes and leave top-k nearest items for each item in `items`.
"""
items = get_unique_entities(items, "item_idx")
if candidates is not None:
candidates = get_unique_entities(candidates, "item_idx")
nearest_items_to_filter = self._get_nearest_items(
items=items,
metric=metric,
candidates=candidates,
)
rel_col_name = metric if metric is not None else "similarity"
nearest_items = get_top_k(
dataframe=nearest_items_to_filter,
partition_by_col=sf.col("item_idx_one"),
order_by_col=[
sf.col(rel_col_name).desc(),
sf.col("item_idx_two").desc(),
],
k=k,
)
nearest_items = nearest_items.withColumnRenamed(
"item_idx_two", "neighbour_item_idx"
)
nearest_items = nearest_items.withColumnRenamed(
"item_idx_one", "item_idx"
)
return nearest_items
def _get_nearest_items(
self,
items: DataFrame,
metric: Optional[str] = None,
candidates: Optional[DataFrame] = None,
) -> Optional[DataFrame]:
raise NotImplementedError(
f"item-to-item prediction is not implemented for {self}"
)
def _params_tried(self):
"""check if current parameters were already evaluated"""
if self.study is None:
return False
params = {
name: value
for name, value in self._init_args.items()
if name in self._search_space
}
for trial in self.study.trials:
if params == trial.params:
return True
return False
class ItemVectorModel(BaseRecommender):
"""Parent for models generating items' vector representations"""
can_predict_item_to_item: bool = True
item_to_item_metrics: List[str] = [
"euclidean_distance_sim",
"cosine_similarity",
"dot_product",
]
@abstractmethod
def _get_item_vectors(self) -> DataFrame:
"""
Return dataframe with items' vectors as a
spark dataframe with columns ``[item_idx, item_vector]``
"""
def get_nearest_items(
self,
items: Union[DataFrame, Iterable],
k: int,
metric: Optional[str] = "cosine_similarity",
candidates: Optional[Union[DataFrame, Iterable]] = None,
) -> Optional[DataFrame]:
"""
Get k most similar items be the `metric` for each of the `items`.
:param items: spark dataframe or list of item ids to find neighbors
:param k: number of neighbors
:param metric: 'euclidean_distance_sim', 'cosine_similarity', 'dot_product'
:param candidates: spark dataframe or list of items
to consider as similar, e.g. popular/new items. If None,
all items presented during model training are used.
:return: dataframe with the most similar items,
where bigger value means greater similarity.
spark-dataframe with columns ``[item_idx, neighbour_item_idx, similarity]``
"""
if metric not in self.item_to_item_metrics:
raise ValueError(
f"Select one of the valid distance metrics: "
f"{self.item_to_item_metrics}"
)
return self._get_nearest_items_wrap(
items=items,
k=k,
metric=metric,
candidates=candidates,
)
def _get_nearest_items(
self,
items: DataFrame,
metric: str = "cosine_similarity",
candidates: Optional[DataFrame] = None,
) -> DataFrame:
"""
Return distance metric value for all available close items filtered by `candidates`.
:param items: ids to find neighbours, spark dataframe with column ``item_idx``
:param metric: 'euclidean_distance_sim' calculated as 1/(1 + euclidean_distance),
'cosine_similarity', 'dot_product'
:param candidates: items among which we are looking for similar,
e.g. popular/new items. If None, all items presented during model training are used.
:return: dataframe with neighbours,
spark-dataframe with columns ``[item_idx_one, item_idx_two, similarity]``
"""
dist_function = cosine_similarity
if metric == "euclidean_distance_sim":
dist_function = vector_euclidean_distance_similarity
elif metric == "dot_product":
dist_function = vector_dot
items_vectors = self._get_item_vectors()
left_part = (
items_vectors.withColumnRenamed("item_idx", "item_idx_one")
.withColumnRenamed("item_vector", "item_vector_one")
.join(
items.select(sf.col("item_idx").alias("item_idx_one")),
on="item_idx_one",
)
)
right_part = items_vectors.withColumnRenamed(
"item_idx", "item_idx_two"
).withColumnRenamed("item_vector", "item_vector_two")
if candidates is not None:
right_part = right_part.join(
candidates.withColumnRenamed("item_idx", "item_idx_two"),
on="item_idx_two",
)
joined_factors = left_part.join(
right_part, on=sf.col("item_idx_one") != sf.col("item_idx_two")
)
joined_factors = joined_factors.withColumn(
metric,
dist_function(
sf.col("item_vector_one"), sf.col("item_vector_two")
),
)
similarity_matrix = joined_factors.select(
"item_idx_one", "item_idx_two", metric
)
return similarity_matrix
# pylint: disable=abstract-method
class HybridRecommender(BaseRecommender, ABC):
"""Base class for models that can use extra features"""
def fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
"""
Fit a recommendation model
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param user_features: user features
``[user_idx, timestamp]`` + feature columns
:param item_features: item features
``[item_idx, timestamp]`` + feature columns
:return:
"""
self._fit_wrap(
log=log,
user_features=user_features,
item_features=item_features,
)
# pylint: disable=too-many-arguments
def predict(
self,
log: DataFrame,
k: int,
users: Optional[Union[DataFrame, Iterable]] = None,
items: Optional[Union[DataFrame, Iterable]] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
recs_file_path: Optional[str] = None,
) -> Optional[DataFrame]:
"""
Get recommendations
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param k: number of recommendations for each user
:param users: users to create recommendations for
dataframe containing ``[user_idx]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_idx]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be ``0``.
:param user_features: user features
``[user_idx , timestamp]`` + feature columns
:param item_features: item features
``[item_idx , timestamp]`` + feature columns
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
return self._predict_wrap(
log=log,
k=k,
users=users,
items=items,
user_features=user_features,
item_features=item_features,
filter_seen_items=filter_seen_items,
recs_file_path=recs_file_path,
)
def fit_predict(
self,
log: DataFrame,
k: int,
users: Optional[Union[DataFrame, Iterable]] = None,
items: Optional[Union[DataFrame, Iterable]] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
recs_file_path: Optional[str] = None,
) -> Optional[DataFrame]:
"""
Fit model and get recommendations
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param k: number of recommendations for each user
:param users: users to create recommendations for
dataframe containing ``[user_idx]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_idx]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be ``0``.
:param user_features: user features
``[user_idx , timestamp]`` + feature columns
:param item_features: item features
``[item_idx , timestamp]`` + feature columns
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
return self._fit_predict(
log=log,
k=k,
users=users,
items=items,
user_features=user_features,
item_features=item_features,
filter_seen_items=filter_seen_items,
recs_file_path=recs_file_path,
)
def predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
recs_file_path: Optional[str] = None,
k: Optional[int] = None,
) -> Optional[DataFrame]:
"""
Get recommendations for specific user-item ``pairs``.
If a model can't produce recommendation
for specific pair it is removed from the resulting dataframe.
:param pairs: dataframe with pairs to calculate relevance for, ``[user_idx, item_idx]``.
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param user_features: user features
``[user_idx , timestamp]`` + feature columns
:param item_features: item features
``[item_idx , timestamp]`` + feature columns
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:param k: top-k items for each user from pairs.
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
return self._predict_pairs_wrap(
pairs=pairs,
log=log,
user_features=user_features,
item_features=item_features,
recs_file_path=recs_file_path,
k=k,
)
def get_features(
self, ids: DataFrame, features: Optional[DataFrame]
) -> Optional[Tuple[DataFrame, int]]:
"""
Returns user or item feature vectors as a Column with type ArrayType
:param ids: Spark DataFrame with unique ids
:param features: Spark DataFrame with features for provided ids
:return: feature vectors
If a model does not have a vector for some ids they are not present in the final result.
"""
return self._get_features_wrap(ids, features)
# pylint: disable=abstract-method
class Recommender(BaseRecommender, ABC):
"""Usual recommender class for models without features."""
def fit(self, log: DataFrame) -> None:
"""
Fit a recommendation model
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:return:
"""
self._fit_wrap(
log=log,
user_features=None,
item_features=None,
)
# pylint: disable=too-many-arguments
def predict(
self,
log: DataFrame,
k: int,
users: Optional[Union[DataFrame, Iterable]] = None,
items: Optional[Union[DataFrame, Iterable]] = None,
filter_seen_items: bool = True,
recs_file_path: Optional[str] = None,
) -> Optional[DataFrame]:
"""
Get recommendations
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param k: number of recommendations for each user
:param users: users to create recommendations for
dataframe containing ``[user_idx]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_idx]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be ``0``.
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
return self._predict_wrap(
log=log,
k=k,
users=users,
items=items,
user_features=None,
item_features=None,
filter_seen_items=filter_seen_items,
recs_file_path=recs_file_path,
)
def predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
recs_file_path: Optional[str] = None,
k: Optional[int] = None,
) -> Optional[DataFrame]:
"""
Get recommendations for specific user-item ``pairs``.
If a model can't produce recommendation
for specific pair it is removed from the resulting dataframe.
:param pairs: dataframe with pairs to calculate relevance for, ``[user_idx, item_idx]``.
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:param k: top-k items for each user from pairs.
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
return self._predict_pairs_wrap(
pairs=pairs,
log=log,
recs_file_path=recs_file_path,
k=k,
)
# pylint: disable=too-many-arguments
def fit_predict(
self,
log: DataFrame,
k: int,
users: Optional[Union[DataFrame, Iterable]] = None,
items: Optional[Union[DataFrame, Iterable]] = None,
filter_seen_items: bool = True,
recs_file_path: Optional[str] = None,
) -> Optional[DataFrame]:
"""
Fit model and get recommendations
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param k: number of recommendations for each user
:param users: users to create recommendations for
dataframe containing ``[user_idx]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_idx]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be ``0``.
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
return self._fit_predict(
log=log,
k=k,
users=users,
items=items,
user_features=None,
item_features=None,
filter_seen_items=filter_seen_items,
recs_file_path=recs_file_path,
)
def get_features(self, ids: DataFrame) -> Optional[Tuple[DataFrame, int]]:
"""
Returns user or item feature vectors as a Column with type ArrayType
:param ids: Spark DataFrame with unique ids
:return: feature vectors.
If a model does not have a vector for some ids they are not present in the final result.
"""
return self._get_features_wrap(ids, None)
class UserRecommender(BaseRecommender, ABC):
"""Base class for models that use user features
but not item features. ``log`` is not required for this class."""
def fit(
self,
log: DataFrame,
user_features: DataFrame,
) -> None:
"""
Finds user clusters and calculates item similarity in that clusters.
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param user_features: user features
``[user_idx, timestamp]`` + feature columns
:return:
"""
self._fit_wrap(log=log, user_features=user_features)
# pylint: disable=too-many-arguments
def predict(
self,
user_features: DataFrame,
k: int,
log: Optional[DataFrame] = None,
users: Optional[Union[DataFrame, Iterable]] = None,
items: Optional[Union[DataFrame, Iterable]] = None,
filter_seen_items: bool = True,
recs_file_path: Optional[str] = None,
) -> Optional[DataFrame]:
"""
Get recommendations
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param k: number of recommendations for each user
:param users: users to create recommendations for
dataframe containing ``[user_idx]`` or ``array-like``;
if ``None``, recommend to all users from ``log``
:param items: candidate items for recommendations
dataframe containing ``[item_idx]`` or ``array-like``;
if ``None``, take all items from ``log``.
If it contains new items, ``relevance`` for them will be ``0``.
:param user_features: user features
``[user_idx , timestamp]`` + feature columns
:param filter_seen_items: flag to remove seen items from recommendations based on ``log``.
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
return self._predict_wrap(
log=log,
user_features=user_features,
k=k,
filter_seen_items=filter_seen_items,
users=users,
items=items,
recs_file_path=recs_file_path,
)
def predict_pairs(
self,
pairs: DataFrame,
user_features: DataFrame,
log: Optional[DataFrame] = None,
recs_file_path: Optional[str] = None,
k: Optional[int] = None,
) -> Optional[DataFrame]:
"""
Get recommendations for specific user-item ``pairs``.
If a model can't produce recommendation
for specific pair it is removed from the resulting dataframe.
:param pairs: dataframe with pairs to calculate relevance for, ``[user_idx, item_idx]``.
:param user_features: user features
``[user_idx , timestamp]`` + feature columns
:param log: historical log of interactions
``[user_idx, item_idx, timestamp, relevance]``
:param recs_file_path: save recommendations at the given absolute path as parquet file.
If None, cached and materialized recommendations dataframe will be returned
:param k: top-k items for each user from pairs.
:return: cached recommendation dataframe with columns ``[user_idx, item_idx, relevance]``
or None if `file_path` is provided
"""
return self._predict_pairs_wrap(
pairs=pairs,
log=log,
user_features=user_features,
recs_file_path=recs_file_path,
k=k,
)
class NeighbourRec(Recommender, ABC):
"""Base class that requires log at prediction time"""
similarity: Optional[DataFrame]
can_predict_item_to_item: bool = True
can_predict_cold_users: bool = True
can_change_metric: bool = False
item_to_item_metrics = ["similarity"]
_similarity_metric = "similarity"
@property
def _dataframes(self):
return {"similarity": self.similarity}
def _clear_cache(self):
if hasattr(self, "similarity"):
self.similarity.unpersist()
# pylint: disable=missing-function-docstring
@property
def similarity_metric(self):
return self._similarity_metric
@similarity_metric.setter
def similarity_metric(self, value):
if not self.can_change_metric:
raise ValueError(
"This class does not support changing similarity metrics"
)
if value not in self.item_to_item_metrics:
raise ValueError(
f"Select one of the valid metrics for predict: "
f"{self.item_to_item_metrics}"
)
self._similarity_metric = value
def _predict_pairs_inner(
self,
log: DataFrame,
filter_df: DataFrame,
condition: Column,
users: DataFrame,
) -> DataFrame:
"""
Get recommendations for all provided users
and filter results with ``filter_df`` by ``condition``.
It allows to implement both ``predict_pairs`` and usual ``predict``@k.
:param log: historical interactions, DataFrame
``[user_idx, item_idx, timestamp, relevance]``.
:param filter_df: DataFrame use to filter items:
``[item_idx_filter]`` or ``[user_idx_filter, item_idx_filter]``.
:param condition: condition used for inner join with ``filter_df``
:param users: users to calculate recommendations for
:return: DataFrame ``[user_idx, item_idx, relevance]``
"""
if log is None:
raise ValueError(
"log is not provided, but it is required for prediction"
)
recs = (
log.join(users, how="inner", on="user_idx")
.join(
self.similarity,
how="inner",
on=sf.col("item_idx") == sf.col("item_idx_one"),
)
.join(
filter_df,
how="inner",
on=condition,
)
.groupby("user_idx", "item_idx_two")
.agg(sf.sum(self.similarity_metric).alias("relevance"))
.withColumnRenamed("item_idx_two", "item_idx")
)
return recs
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
return self._predict_pairs_inner(
log=log,
filter_df=items.withColumnRenamed("item_idx", "item_idx_filter"),
condition=sf.col("item_idx_two") == sf.col("item_idx_filter"),
users=users,
)
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
if log is None:
raise ValueError(
"log is not provided, but it is required for prediction"
)
return self._predict_pairs_inner(
log=log,
filter_df=(
pairs.withColumnRenamed(
"user_idx", "user_idx_filter"
).withColumnRenamed("item_idx", "item_idx_filter")
),
condition=(sf.col("user_idx") == sf.col("user_idx_filter"))
& (sf.col("item_idx_two") == sf.col("item_idx_filter")),
users=pairs.select("user_idx").distinct(),
)
def get_nearest_items(
self,
items: Union[DataFrame, Iterable],
k: int,
metric: Optional[str] = None,
candidates: Optional[Union[DataFrame, Iterable]] = None,
) -> DataFrame:
"""
Get k most similar items be the `metric` for each of the `items`.
:param items: spark dataframe or list of item ids to find neighbors
:param k: number of neighbors
:param metric: metric is not used to find neighbours in NeighbourRec,
the parameter is ignored
:param candidates: spark dataframe or list of items
to consider as similar, e.g. popular/new items. If None,
all items presented during model training are used.
:return: dataframe with the most similar items an distance,
where bigger value means greater similarity.
spark-dataframe with columns ``[item_idx, neighbour_item_idx, similarity]``
"""
if metric is not None:
self.logger.debug(
"Metric is not used to determine nearest items in %s model",
str(self),
)
return self._get_nearest_items_wrap(
items=items,
k=k,
metric=metric,
candidates=candidates,
)
def _get_nearest_items(
self,
items: DataFrame,
metric: Optional[str] = None,
candidates: Optional[DataFrame] = None,
) -> DataFrame:
similarity_filtered = self.similarity.join(
items.withColumnRenamed("item_idx", "item_idx_one"),
on="item_idx_one",
)
if candidates is not None:
similarity_filtered = similarity_filtered.join(
candidates.withColumnRenamed("item_idx", "item_idx_two"),
on="item_idx_two",
)
return similarity_filtered.select(
"item_idx_one",
"item_idx_two",
"similarity" if metric is None else metric,
)
class NonPersonalizedRecommender(Recommender, ABC):
"""Base class for non-personalized recommenders with popularity statistics."""
can_predict_cold_users = True
can_predict_cold_items = True
item_popularity: DataFrame
add_cold_items: bool
cold_weight: float
sample: bool
fill: float
seed: Optional[int] = None
def __init__(self, add_cold_items: bool, cold_weight: float):
self.add_cold_items = add_cold_items
if 0 < cold_weight <= 1:
self.cold_weight = cold_weight
else:
raise ValueError(
"`cold_weight` value should be in interval (0, 1]"
)
@property
def _dataframes(self):
return {"item_popularity": self.item_popularity}
def _save_model(self, path: str):
save_picklable_to_parquet(self.fill, join(path, "params.dump"))
def _load_model(self, path: str):
self.fill = load_pickled_from_parquet(join(path, "params.dump"))
def _clear_cache(self):
if hasattr(self, "item_popularity"):
self.item_popularity.unpersist()
@staticmethod
def _calc_fill(item_popularity: DataFrame, weight: float) -> float:
"""
Calculating a fill value a the minimal relevance
calculated during model training multiplied by weight.
"""
return (
item_popularity.select(sf.min("relevance")).collect()[0][0]
* weight
)
@staticmethod
def _check_relevance(log: DataFrame):
vals = log.select("relevance").where(
(sf.col("relevance") != 1) & (sf.col("relevance") != 0)
)
if vals.count() > 0:
raise ValueError("Relevance values in log must be 0 or 1")
def _get_selected_item_popularity(self, items: DataFrame) -> DataFrame:
"""
Choose only required item from `item_popularity` dataframe
for further recommendations generation.
"""
return self.item_popularity.join(
items,
on="item_idx",
how="right" if self.add_cold_items else "inner",
).fillna(value=self.fill, subset=["relevance"])
@staticmethod
def _calc_max_hist_len(log: DataFrame, users: DataFrame) -> int:
max_hist_len = (
(
log.join(users, on="user_idx")
.groupBy("user_idx")
.agg(sf.countDistinct("item_idx").alias("items_count"))
)
.select(sf.max("items_count"))
.collect()[0][0]
)
# all users have empty history
if max_hist_len is None:
max_hist_len = 0
return max_hist_len
# pylint: disable=too-many-arguments
def _predict_without_sampling(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
filter_seen_items: bool = True,
) -> DataFrame:
"""
Regular prediction for popularity-based models,
top-k most relevant items from `items` are chosen for each user
"""
selected_item_popularity = self._get_selected_item_popularity(items)
selected_item_popularity = selected_item_popularity.withColumn(
"rank",
sf.row_number().over(
Window.orderBy(
sf.col("relevance").desc(), sf.col("item_idx").desc()
)
),
)
if filter_seen_items and log is not None:
user_to_num_items = (
log.join(users, on="user_idx")
.groupBy("user_idx")
.agg(sf.countDistinct("item_idx").alias("num_items"))
)
users = users.join(user_to_num_items, on="user_idx", how="left")
users = users.fillna(0, "num_items")
# 'selected_item_popularity' truncation by k + max_seen
max_seen = users.select(sf.coalesce(sf.max("num_items"), sf.lit(0))).collect()[0][0]
selected_item_popularity = selected_item_popularity\
.filter(sf.col("rank") <= k + max_seen)
return users.join(
selected_item_popularity, on=(sf.col("rank") <= k + sf.col("num_items")), how="left"
)
return users.crossJoin(
selected_item_popularity.filter(sf.col("rank") <= k)
).drop("rank")
def _predict_with_sampling(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
filter_seen_items: bool = True,
) -> DataFrame:
"""
Randomized prediction for popularity-based models,
top-k items from `items` are sampled for each user based with
probability proportional to items' popularity
"""
selected_item_popularity = self._get_selected_item_popularity(items)
selected_item_popularity = selected_item_popularity.withColumn(
"relevance",
sf.when(sf.col("relevance") == sf.lit(0.0), 0.1**6).otherwise(
sf.col("relevance")
),
)
items_pd = selected_item_popularity.withColumn(
"probability",
sf.col("relevance")
/ selected_item_popularity.select(sf.sum("relevance")).first()[0],
).toPandas()
if items_pd.shape[0] == 0:
return State().session.createDataFrame([], REC_SCHEMA)
seed = self.seed
class_name = self.__class__.__name__
def grouped_map(pandas_df: pd.DataFrame) -> pd.DataFrame:
user_idx = pandas_df["user_idx"][0]
cnt = pandas_df["cnt"][0]
if seed is not None:
local_rng = default_rng(seed + user_idx)
else:
local_rng = default_rng()
items_positions = local_rng.choice(
np.arange(items_pd.shape[0]),
size=cnt,
p=items_pd["probability"].values,
replace=False,
)
# workaround to unify RandomRec and UCB
if class_name == "RandomRec":
relevance = 1 / np.arange(1, cnt + 1)
else:
relevance = items_pd["probability"].values[items_positions]
return pd.DataFrame(
{
"user_idx": cnt * [user_idx],
"item_idx": items_pd["item_idx"].values[items_positions],
"relevance": relevance,
}
)
if log is not None and filter_seen_items:
recs = (
log.select("user_idx", "item_idx")
.distinct()
.join(users, how="right", on="user_idx")
.groupby("user_idx")
.agg(sf.countDistinct("item_idx").alias("cnt"))
.selectExpr(
"user_idx",
f"LEAST(cnt + {k}, {items_pd.shape[0]}) AS cnt",
)
)
else:
recs = users.withColumn("cnt", sf.lit(min(k, items_pd.shape[0])))
return recs.groupby("user_idx").applyInPandas(grouped_map, REC_SCHEMA)
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
if self.sample:
return self._predict_with_sampling(
log=log,
k=k,
users=users,
items=items,
filter_seen_items=filter_seen_items,
)
else:
return self._predict_without_sampling(
log, k, users, items, filter_seen_items
)
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
return (
pairs.join(
self.item_popularity,
on="item_idx",
how="left" if self.add_cold_items else "inner",
)
.fillna(value=self.fill, subset=["relevance"])
.select("user_idx", "item_idx", "relevance")
) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/base_rec.py | 0.853516 | 0.266387 | base_rec.py | pypi |
from typing import Optional
from pyspark.ml.feature import Word2Vec
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from pyspark.sql import types as st
from pyspark.ml.stat import Summarizer
from replay.models.base_rec import Recommender, ItemVectorModel
from replay.utils import vector_dot, multiply_scala_udf, join_with_col_renaming
# pylint: disable=too-many-instance-attributes
class Word2VecRec(Recommender, ItemVectorModel):
"""
Trains word2vec model where items ar treated as words and users as sentences.
"""
idf: DataFrame
vectors: DataFrame
can_predict_cold_users = True
_search_space = {
"rank": {"type": "int", "args": [50, 300]},
"window_size": {"type": "int", "args": [1, 100]},
"use_idf": {"type": "categorical", "args": [True, False]},
}
# pylint: disable=too-many-arguments
def __init__(
self,
rank: int = 100,
min_count: int = 5,
step_size: int = 0.025,
max_iter: int = 1,
window_size: int = 1,
use_idf: bool = False,
seed: Optional[int] = None,
num_partitions: Optional[int] = None,
):
"""
:param rank: embedding size
:param min_count: the minimum number of times a token must
appear to be included in the word2vec model's vocabulary
:param step_size: step size to be used for each iteration of optimization
:param max_iter: max number of iterations
:param window_size: window size
:param use_idf: flag to use inverse document frequency
:param seed: random seed
"""
self.rank = rank
self.window_size = window_size
self.use_idf = use_idf
self.min_count = min_count
self.step_size = step_size
self.max_iter = max_iter
self._seed = seed
self._num_partitions = num_partitions
@property
def _init_args(self):
return {
"rank": self.rank,
"window_size": self.window_size,
"use_idf": self.use_idf,
"min_count": self.min_count,
"step_size": self.step_size,
"max_iter": self.max_iter,
"seed": self._seed,
}
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self.idf = (
log.groupBy("item_idx")
.agg(sf.countDistinct("user_idx").alias("count"))
.withColumn(
"idf",
sf.log(sf.lit(self.users_count) / sf.col("count"))
if self.use_idf
else sf.lit(1.0),
)
.select("item_idx", "idf")
)
self.idf.cache().count()
log_by_users = (
log.groupBy("user_idx")
.agg(
sf.collect_list(sf.struct("timestamp", "item_idx")).alias(
"ts_item_idx"
)
)
.withColumn("ts_item_idx", sf.array_sort("ts_item_idx"))
.withColumn(
"items",
sf.col("ts_item_idx.item_idx").cast(
st.ArrayType(st.StringType())
),
)
.drop("ts_item_idx")
)
self.logger.debug("Model training")
if self._num_partitions is None:
self._num_partitions = log_by_users.rdd.getNumPartitions()
word_2_vec = Word2Vec(
vectorSize=self.rank,
minCount=self.min_count,
numPartitions=self._num_partitions,
stepSize=self.step_size,
maxIter=self.max_iter,
inputCol="items",
outputCol="w2v_vector",
windowSize=self.window_size,
seed=self._seed,
)
self.vectors = (
word_2_vec.fit(log_by_users)
.getVectors()
.select(sf.col("word").cast("int").alias("item"), "vector")
)
self.vectors.cache().count()
def _clear_cache(self):
if hasattr(self, "idf") and hasattr(self, "vectors"):
self.idf.unpersist()
self.vectors.unpersist()
@property
def _dataframes(self):
return {"idf": self.idf, "vectors": self.vectors}
def _get_user_vectors(
self,
users: DataFrame,
log: DataFrame,
) -> DataFrame:
"""
:param users: user ids, dataframe ``[user_idx]``
:param log: interaction dataframe
``[user_idx, item_idx, timestamp, relevance]``
:return: user embeddings dataframe
``[user_idx, user_vector]``
"""
res = join_with_col_renaming(
log, users, on_col_name="user_idx", how="inner"
)
res = join_with_col_renaming(
res, self.idf, on_col_name="item_idx", how="inner"
)
res = res.join(
self.vectors.hint("broadcast"),
how="inner",
on=sf.col("item_idx") == sf.col("item"),
).drop("item")
return (
res.groupby("user_idx")
.agg(
Summarizer.mean(
multiply_scala_udf(sf.col("idf"), sf.col("vector"))
).alias("user_vector")
)
.select("user_idx", "user_vector")
)
def _predict_pairs_inner(
self,
pairs: DataFrame,
log: DataFrame,
) -> DataFrame:
if log is None:
raise ValueError(
f"log is not provided, {self} predict requires log."
)
user_vectors = self._get_user_vectors(
pairs.select("user_idx").distinct(), log
)
pairs_with_vectors = join_with_col_renaming(
pairs, user_vectors, on_col_name="user_idx", how="inner"
)
pairs_with_vectors = pairs_with_vectors.join(
self.vectors, on=sf.col("item_idx") == sf.col("item"), how="inner"
).drop("item")
return pairs_with_vectors.select(
"user_idx",
sf.col("item_idx"),
(
vector_dot(sf.col("vector"), sf.col("user_vector"))
+ sf.lit(self.rank)
).alias("relevance"),
)
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
return self._predict_pairs_inner(users.crossJoin(items), log)
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
return self._predict_pairs_inner(pairs, log)
def _get_item_vectors(self):
return self.vectors.withColumnRenamed(
"vector", "item_vector"
).withColumnRenamed("item", "item_idx") | /replay_rec-0.11.0-py3-none-any.whl/replay/models/word2vec.py | 0.932821 | 0.445469 | word2vec.py | pypi |
from typing import Optional, Tuple
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from pyspark.sql import DataFrame
from scipy.sparse import csr_matrix
from sklearn.model_selection import GroupShuffleSplit
from torch import nn
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader, TensorDataset
from replay.models.base_torch_rec import TorchRecommender
class VAE(nn.Module):
"""Base variational autoencoder"""
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(
self,
item_count: int,
latent_dim: int,
hidden_dim: int = 600,
dropout: float = 0.3,
):
"""
:param item_count: number of items
:param latent_dim: latent dimension size
:param hidden_dim: hidden dimension size for encoder and decoder
:param dropout: dropout coefficient
"""
super().__init__()
self.latent_dim = latent_dim
self.encoder_dims = [item_count, hidden_dim, latent_dim * 2]
self.decoder_dims = [latent_dim, hidden_dim, item_count]
self.encoder = nn.ModuleList(
[
nn.Linear(d_in, d_out)
for d_in, d_out in zip(
self.encoder_dims[:-1], self.encoder_dims[1:]
)
]
)
self.decoder = nn.ModuleList(
[
nn.Linear(d_in, d_out)
for d_in, d_out in zip(
self.decoder_dims[:-1], self.decoder_dims[1:]
)
]
)
self.dropout = nn.Dropout(dropout)
self.activation = torch.nn.ReLU()
for layer in self.encoder:
self.weight_init(layer)
for layer in self.decoder:
self.weight_init(layer)
def encode(self, batch: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode"""
hidden = F.normalize(batch, p=2, dim=1)
hidden = self.dropout(hidden)
for layer in self.encoder[:-1]:
hidden = layer(hidden)
hidden = self.activation(hidden)
hidden = self.encoder[-1](hidden)
mu_latent = hidden[:, : self.latent_dim]
logvar_latent = hidden[:, self.latent_dim :]
return mu_latent, logvar_latent
def reparameterize(
self, mu_latent: torch.Tensor, logvar_latent: torch.Tensor
) -> torch.Tensor:
"""Reparametrization trick"""
if self.training:
std = torch.exp(0.5 * logvar_latent)
eps = torch.randn_like(std)
return eps * std + mu_latent
return mu_latent
def decode(self, z_latent: torch.Tensor) -> torch.Tensor:
"""Decode"""
hidden = z_latent
for layer in self.decoder[:-1]:
hidden = layer(hidden)
hidden = self.activation(hidden)
return self.decoder[-1](hidden) # type: ignore
# pylint: disable=arguments-differ
def forward( # type: ignore
self, batch: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param batch: user batch
:return: output, expectation and logarithm of variation
"""
mu_latent, logvar_latent = self.encode(batch)
z_latent = self.reparameterize(mu_latent, logvar_latent)
return self.decode(z_latent), mu_latent, logvar_latent
@staticmethod
def weight_init(layer: nn.Module):
"""
Xavier initialization
:param layer: layer of a model
"""
if isinstance(layer, nn.Linear):
nn.init.xavier_normal_(layer.weight.data)
layer.bias.data.normal_(0.0, 0.001)
# pylint: disable=too-many-instance-attributes
class MultVAE(TorchRecommender):
"""`Variational Autoencoders for Collaborative Filtering
<https://arxiv.org/pdf/1802.05814.pdf>`_"""
num_workers: int = 0
batch_size_users: int = 5000
patience: int = 10
n_saved: int = 2
valid_split_size: float = 0.1
seed: int = 42
can_predict_cold_users = True
train_user_batch: csr_matrix
valid_user_batch: csr_matrix
_search_space = {
"learning_rate": {"type": "loguniform", "args": [0.0001, 0.5]},
"epochs": {"type": "int", "args": [100, 100]},
"latent_dim": {"type": "int", "args": [200, 200]},
"hidden_dim": {"type": "int", "args": [600, 600]},
"dropout": {"type": "uniform", "args": [0, 0.5]},
"anneal": {"type": "uniform", "args": [0.2, 1]},
"l2_reg": {"type": "loguniform", "args": [1e-9, 5]},
"factor": {"type": "uniform", "args": [0.2, 0.2]},
"patience": {"type": "int", "args": [3, 3]},
}
# pylint: disable=too-many-arguments
def __init__(
self,
learning_rate: float = 0.01,
epochs: int = 100,
latent_dim: int = 200,
hidden_dim: int = 600,
dropout: float = 0.3,
anneal: float = 0.1,
l2_reg: float = 0,
factor: float = 0.2,
patience: int = 3,
):
"""
:param learning_rate: learning rate
:param epochs: number of epochs to train model
:param latent_dim: latent dimension size for user vectors
:param hidden_dim: hidden dimension size for encoder and decoder
:param dropout: dropout coefficient
:param anneal: anneal coefficient [0,1]
:param l2_reg: l2 regularization term
:param factor: ReduceLROnPlateau reducing factor. new_lr = lr * factor
:param patience: number of non-improved epochs before reducing lr
"""
super().__init__()
self.learning_rate = learning_rate
self.epochs = epochs
self.latent_dim = latent_dim
self.hidden_dim = hidden_dim
self.dropout = dropout
self.anneal = anneal
self.l2_reg = l2_reg
self.factor = factor
self.patience = patience
@property
def _init_args(self):
return {
"learning_rate": self.learning_rate,
"epochs": self.epochs,
"latent_dim": self.latent_dim,
"hidden_dim": self.hidden_dim,
"dropout": self.dropout,
"anneal": self.anneal,
"l2_reg": self.l2_reg,
"factor": self.factor,
"patience": self.patience,
}
def _get_data_loader(
self, data: pd.DataFrame, shuffle: bool = True
) -> Tuple[csr_matrix, DataLoader, np.ndarray]:
"""get data loader and matrix with data"""
users_count = data["user_idx"].value_counts().count()
user_idx = data["user_idx"].astype("category").cat # type: ignore
user_batch = csr_matrix(
(
np.ones(len(data["user_idx"])),
([user_idx.codes.values, data["item_idx"].values]),
),
shape=(users_count, self._item_dim),
)
data_loader = DataLoader(
TensorDataset(torch.arange(users_count).long()),
batch_size=self.batch_size_users,
shuffle=shuffle,
num_workers=self.num_workers,
)
return user_batch, data_loader, user_idx.categories.values
# pylint: disable=too-many-locals
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self.logger.debug("Creating batch")
data = log.select("user_idx", "item_idx").toPandas()
splitter = GroupShuffleSplit(
n_splits=1, test_size=self.valid_split_size, random_state=self.seed
)
train_idx, valid_idx = next(
splitter.split(data, groups=data["user_idx"])
)
train_data, valid_data = data.iloc[train_idx], data.iloc[valid_idx]
self.train_user_batch, train_data_loader, _ = self._get_data_loader(
train_data
)
self.valid_user_batch, valid_data_loader, _ = self._get_data_loader(
valid_data, False
)
self.logger.debug("Training VAE")
self.model = VAE(
item_count=self._item_dim,
latent_dim=self.latent_dim,
hidden_dim=self.hidden_dim,
dropout=self.dropout,
).to(self.device)
optimizer = Adam(
self.model.parameters(),
lr=self.learning_rate,
weight_decay=self.l2_reg / self.batch_size_users,
)
lr_scheduler = ReduceLROnPlateau(
optimizer, factor=self.factor, patience=self.patience
)
self.train(
train_data_loader,
valid_data_loader,
optimizer,
lr_scheduler,
self.epochs,
"multvae",
)
# pylint: disable=arguments-differ
def _loss(self, y_pred, y_true, mu_latent, logvar_latent):
log_softmax_var = F.log_softmax(y_pred, dim=1)
bce = -(log_softmax_var * y_true).sum(dim=1).mean()
kld = (
-0.5
* torch.sum(
1 + logvar_latent - mu_latent.pow(2) - logvar_latent.exp(),
dim=1,
).mean()
)
return bce + self.anneal * kld
def _batch_pass(self, batch, model):
if model.training:
full_batch = self.train_user_batch
else:
full_batch = self.valid_user_batch
user_batch = torch.FloatTensor(full_batch[batch[0]].toarray()).to(
self.device
)
pred_user_batch, latent_mu, latent_logvar = self.model.forward(
user_batch
)
return {
"y_pred": pred_user_batch,
"y_true": user_batch,
"mu_latent": latent_mu,
"logvar_latent": latent_logvar,
}
@staticmethod
def _predict_pairs_inner(
model: nn.Module,
user_idx: int,
items_np_history: np.ndarray,
items_np_to_pred: np.ndarray,
item_count: int,
cnt: Optional[int] = None,
) -> DataFrame:
model.eval()
with torch.no_grad():
user_batch = torch.zeros((1, item_count))
user_batch[0, items_np_history] = 1
user_recs = F.softmax(model(user_batch)[0][0].detach(), dim=0)
if cnt is not None:
best_item_idx = (
torch.argsort(
user_recs[items_np_to_pred], descending=True
)[:cnt]
).numpy()
items_np_to_pred = items_np_to_pred[best_item_idx]
return pd.DataFrame(
{
"user_idx": np.array(
items_np_to_pred.shape[0] * [user_idx]
),
"item_idx": items_np_to_pred,
"relevance": user_recs[items_np_to_pred],
}
)
@staticmethod
def _predict_by_user(
pandas_df: pd.DataFrame,
model: nn.Module,
items_np: np.ndarray,
k: int,
item_count: int,
) -> pd.DataFrame:
return MultVAE._predict_pairs_inner(
model=model,
user_idx=pandas_df["user_idx"][0],
items_np_history=pandas_df["item_idx"].values,
items_np_to_pred=items_np,
item_count=item_count,
cnt=min(len(pandas_df) + k, len(items_np)),
)
@staticmethod
def _predict_by_user_pairs(
pandas_df: pd.DataFrame,
model: nn.Module,
item_count: int,
) -> pd.DataFrame:
return MultVAE._predict_pairs_inner(
model=model,
user_idx=pandas_df["user_idx"][0],
items_np_history=np.array(pandas_df["item_idx_history"][0]),
items_np_to_pred=np.array(pandas_df["item_idx_to_pred"][0]),
item_count=item_count,
cnt=None,
)
def _load_model(self, path: str):
self.model = VAE(
item_count=self._item_dim,
latent_dim=self.latent_dim,
hidden_dim=self.hidden_dim,
dropout=self.dropout,
).to(self.device)
self.model.load_state_dict(torch.load(path))
self.model.eval() | /replay_rec-0.11.0-py3-none-any.whl/replay/models/mult_vae.py | 0.967241 | 0.618924 | mult_vae.py | pypi |
from typing import Optional
from pandas import DataFrame
from pyspark.ml.clustering import KMeans, KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import functions as sf
from replay.models.base_rec import UserRecommender
class ClusterRec(UserRecommender):
"""
Generate recommendations for cold users using k-means clusters
"""
can_predict_cold_users = True
_search_space = {
"num_clusters": {"type": "int", "args": [2, 20]},
}
item_rel_in_cluster: DataFrame
def __init__(self, num_clusters: int = 10):
"""
:param num_clusters: number of clusters
"""
self.num_clusters = num_clusters
@property
def _init_args(self):
return {"num_clusters": self.num_clusters}
def _save_model(self, path: str):
self.model.write().overwrite().save(path)
def _load_model(self, path: str):
self.model = KMeansModel.load(path)
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
kmeans = KMeans().setK(self.num_clusters).setFeaturesCol("features")
user_features_vector = self._transform_features(user_features)
self.model = kmeans.fit(user_features_vector)
users_clusters = (
self.model.transform(user_features_vector)
.select("user_idx", "prediction")
.withColumnRenamed("prediction", "cluster")
)
log = log.join(users_clusters, on="user_idx", how="left")
self.item_rel_in_cluster = log.groupBy(["cluster", "item_idx"]).agg(
sf.count("item_idx").alias("item_count")
)
max_count_per_cluster = self.item_rel_in_cluster.groupby(
"cluster"
).agg(sf.max("item_count").alias("max_count_in_cluster"))
self.item_rel_in_cluster = self.item_rel_in_cluster.join(
max_count_per_cluster, on="cluster"
)
self.item_rel_in_cluster = self.item_rel_in_cluster.withColumn(
"relevance", sf.col("item_count") / sf.col("max_count_in_cluster")
).drop("item_count", "max_count_in_cluster")
self.item_rel_in_cluster.cache().count()
def _clear_cache(self):
if hasattr(self, "item_rel_in_cluster"):
self.item_rel_in_cluster.unpersist()
@property
def _dataframes(self):
return {"item_rel_in_cluster": self.item_rel_in_cluster}
@staticmethod
def _transform_features(user_features):
feature_columns = user_features.drop("user_idx").columns
vec = VectorAssembler(inputCols=feature_columns, outputCol="features")
return vec.transform(user_features).select("user_idx", "features")
def _make_user_clusters(self, users, user_features):
usr_cnt_in_fv = (user_features
.select("user_idx")
.distinct()
.join(users.distinct(), on="user_idx").count())
user_cnt = users.distinct().count()
if usr_cnt_in_fv < user_cnt:
self.logger.info("% user(s) don't "
"have a feature vector. "
"The results will not be calculated for them.",
user_cnt - usr_cnt_in_fv)
user_features_vector = self._transform_features(
user_features.join(users, on="user_idx")
)
return (
self.model.transform(user_features_vector)
.select("user_idx", "prediction")
.withColumnRenamed("prediction", "cluster")
)
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
user_clusters = self._make_user_clusters(users, user_features)
filtered_items = self.item_rel_in_cluster.join(items, on="item_idx")
pred = user_clusters.join(filtered_items, on="cluster").drop("cluster")
return pred
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
if not user_features:
raise ValueError("User features are missing for predict")
user_clusters = self._make_user_clusters(pairs.select("user_idx").distinct(), user_features)
pairs_with_clusters = pairs.join(user_clusters, on="user_idx")
filtered_items = (self.item_rel_in_cluster
.join(pairs.select("item_idx").distinct(), on="item_idx"))
pred = (pairs_with_clusters
.join(filtered_items, on=["cluster", "item_idx"])
.select("user_idx","item_idx","relevance"))
return pred | /replay_rec-0.11.0-py3-none-any.whl/replay/models/cluster.py | 0.886513 | 0.470858 | cluster.py | pypi |
from typing import Optional, Tuple
import pyspark.sql.functions as sf
from pyspark.sql import DataFrame
from pyspark.sql.types import DoubleType
from replay.models.base_rec import Recommender, ItemVectorModel
from replay.spark_custom_models.recommendation import ALS, ALSModel
from replay.utils import list_to_vector_udf
class ALSWrap(Recommender, ItemVectorModel):
"""Wrapper for `Spark ALS
<https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.recommendation.ALS>`_.
"""
_seed: Optional[int] = None
_search_space = {
"rank": {"type": "loguniform_int", "args": [8, 256]},
}
# pylint: disable=too-many-arguments
def __init__(
self,
rank: int = 10,
implicit_prefs: bool = True,
seed: Optional[int] = None,
num_item_blocks: Optional[int] = None,
num_user_blocks: Optional[int] = None,
):
"""
:param rank: hidden dimension for the approximate matrix
:param implicit_prefs: flag to use implicit feedback
:param seed: random seed
:param num_item_blocks: number of blocks the items will be partitioned into in order
to parallelize computation.
if None then will be init with number of partitions of log.
:param num_user_blocks: number of blocks the users will be partitioned into in order
to parallelize computation.
if None then will be init with number of partitions of log.
"""
self.rank = rank
self.implicit_prefs = implicit_prefs
self._seed = seed
self._num_item_blocks = num_item_blocks
self._num_user_blocks = num_user_blocks
@property
def _init_args(self):
return {
"rank": self.rank,
"implicit_prefs": self.implicit_prefs,
"seed": self._seed,
}
def _save_model(self, path: str):
self.model.write().overwrite().save(path)
def _load_model(self, path: str):
self.model = ALSModel.load(path)
self.model.itemFactors.cache()
self.model.userFactors.cache()
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
if self._num_item_blocks is None:
self._num_item_blocks = log.rdd.getNumPartitions()
if self._num_user_blocks is None:
self._num_user_blocks = log.rdd.getNumPartitions()
self.model = ALS(
rank=self.rank,
numItemBlocks=self._num_item_blocks,
numUserBlocks=self._num_user_blocks,
userCol="user_idx",
itemCol="item_idx",
ratingCol="relevance",
implicitPrefs=self.implicit_prefs,
seed=self._seed,
coldStartStrategy="drop",
).fit(log)
self.model.itemFactors.cache()
self.model.userFactors.cache()
self.model.itemFactors.count()
self.model.userFactors.count()
def _clear_cache(self):
if hasattr(self, "model"):
self.model.itemFactors.unpersist()
self.model.userFactors.unpersist()
# pylint: disable=too-many-arguments
def _predict(
self,
log: Optional[DataFrame],
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
max_seen = 0
if filter_seen_items and log is not None:
max_seen_in_log = (
log.join(users, on="user_idx")
.groupBy("user_idx")
.agg(sf.count("user_idx").alias("num_seen"))
.select(sf.max("num_seen"))
.collect()[0][0]
)
max_seen = (
max_seen_in_log if max_seen_in_log is not None else 0
)
recs_als = self.model.recommendItemsForUserItemSubset(
users, items, k + max_seen
)
return (
recs_als.withColumn(
"recommendations", sf.explode("recommendations")
)
.withColumn("item_idx", sf.col("recommendations.item_idx"))
.withColumn(
"relevance",
sf.col("recommendations.rating").cast(DoubleType()),
)
.select("user_idx", "item_idx", "relevance")
)
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
return (
self.model.transform(pairs)
.withColumn("relevance", sf.col("prediction").cast(DoubleType()))
.drop("prediction")
)
def _get_features(
self, ids: DataFrame, features: Optional[DataFrame]
) -> Tuple[Optional[DataFrame], Optional[int]]:
entity = "user" if "user_idx" in ids.columns else "item"
als_factors = getattr(self.model, f"{entity}Factors")
als_factors = als_factors.withColumnRenamed(
"id", f"{entity}_idx"
).withColumnRenamed("features", f"{entity}_factors")
return (
als_factors.join(ids, how="right", on=f"{entity}_idx"),
self.model.rank,
)
def _get_item_vectors(self):
return self.model.itemFactors.select(
sf.col("id").alias("item_idx"),
list_to_vector_udf(sf.col("features")).alias("item_vector"),
) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/als.py | 0.914377 | 0.416441 | als.py | pypi |
from typing import Optional
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from replay.models.base_rec import NonPersonalizedRecommender
class RandomRec(NonPersonalizedRecommender):
"""
Recommend random items, either weighted by item popularity or uniform.
.. math::
P\\left(i\\right)\\propto N_i + \\alpha
:math:`N_i` --- number of users who rated item :math:`i`
:math:`\\alpha` --- bigger :math:`\\alpha` values increase amount of rare items in recommendations.
Must be bigger than -1. Default value is :math:`\\alpha = 0`.
Model without seed provides non-determenistic recommendations,
model with fixed seed provides reproducible recommendataions.
As the recommendations from `predict` are cached, save them to disk, or create a checkpoint
and unpersist them to get different recommendations after another `predict` call.
>>> from replay.session_handler import get_spark_session, State
>>> spark = get_spark_session(1, 1)
>>> state = State(spark)
>>> import pandas as pd
>>> from replay.utils import convert2spark
>>>
>>> log = convert2spark(pd.DataFrame({
... "user_idx": [1, 1, 2, 2, 3, 4],
... "item_idx": [1, 2, 2, 3, 3, 3]
... }))
>>> log.show()
+--------+--------+
|user_idx|item_idx|
+--------+--------+
| 1| 1|
| 1| 2|
| 2| 2|
| 2| 3|
| 3| 3|
| 4| 3|
+--------+--------+
<BLANKLINE>
>>> random_pop = RandomRec(distribution="popular_based", alpha=-1)
Traceback (most recent call last):
...
ValueError: alpha must be bigger than -1
>>> random_pop = RandomRec(distribution="abracadabra")
Traceback (most recent call last):
...
ValueError: distribution can be one of [popular_based, relevance, uniform]
>>> random_pop = RandomRec(distribution="popular_based", alpha=1.0, seed=777)
>>> random_pop.fit(log)
>>> random_pop.item_popularity.show()
+--------+------------------+
|item_idx| relevance|
+--------+------------------+
| 1|0.2222222222222222|
| 2|0.3333333333333333|
| 3|0.4444444444444444|
+--------+------------------+
<BLANKLINE>
>>> recs = random_pop.predict(log, 2)
>>> recs.show()
+--------+--------+------------------+
|user_idx|item_idx| relevance|
+--------+--------+------------------+
| 1| 3|0.3333333333333333|
| 2| 1| 0.5|
| 3| 2| 1.0|
| 3| 1|0.3333333333333333|
| 4| 2| 1.0|
| 4| 1| 0.5|
+--------+--------+------------------+
<BLANKLINE>
>>> recs = random_pop.predict(log, 2, users=[1], items=[7, 8])
>>> recs.show()
+--------+--------+---------+
|user_idx|item_idx|relevance|
+--------+--------+---------+
| 1| 7| 1.0|
| 1| 8| 0.5|
+--------+--------+---------+
<BLANKLINE>
>>> random_pop = RandomRec(seed=555)
>>> random_pop.fit(log)
>>> random_pop.item_popularity.show()
+--------+------------------+
|item_idx| relevance|
+--------+------------------+
| 1|0.3333333333333333|
| 2|0.3333333333333333|
| 3|0.3333333333333333|
+--------+------------------+
<BLANKLINE>
"""
_search_space = {
"distribution": {
"type": "categorical",
"args": ["popular_based", "relevance", "uniform"],
},
"alpha": {"type": "uniform", "args": [-0.5, 100]},
}
sample: bool = True
# pylint: disable=too-many-arguments
def __init__(
self,
distribution: str = "uniform",
alpha: float = 0.0,
seed: Optional[int] = None,
add_cold_items: bool = True,
cold_weight: float = 0.5,
):
"""
:param distribution: recommendation strategy:
"uniform" - all items are sampled uniformly
"popular_based" - recommend popular items more
:param alpha: bigger values adjust model towards less popular items
:param seed: random seed
:param add_cold_items: flag to consider cold items in recommendations building
if present in `items` parameter of `predict` method
or `pairs` parameter of `predict_pairs` methods.
If true, cold items are assigned relevance equals to the less relevant item relevance
multiplied by `cold_weight` and may appear among top-K recommendations.
Otherwise cold items are filtered out.
Could be changed after model training by setting the `add_cold_items` attribute.
:param cold_weight: if `add_cold_items` is True,
cold items are added with reduced relevance.
The relevance for cold items is equal to the relevance
of a least relevant item multiplied by a `cold_weight` value.
`Cold_weight` value should be in interval (0, 1].
"""
if distribution not in ("popular_based", "relevance", "uniform"):
raise ValueError(
"distribution can be one of [popular_based, relevance, uniform]"
)
if alpha <= -1.0 and distribution == "popular_based":
raise ValueError("alpha must be bigger than -1")
self.distribution = distribution
self.alpha = alpha
self.seed = seed
super().__init__(
add_cold_items=add_cold_items, cold_weight=cold_weight
)
@property
def _init_args(self):
return {
"distribution": self.distribution,
"alpha": self.alpha,
"seed": self.seed,
"add_cold_items": self.add_cold_items,
"cold_weight": self.cold_weight,
}
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
if self.distribution == "popular_based":
self.item_popularity = (
log.groupBy("item_idx")
.agg(sf.countDistinct("user_idx").alias("user_count"))
.select(
sf.col("item_idx"),
(
sf.col("user_count").astype("float")
+ sf.lit(self.alpha)
).alias("relevance"),
)
)
elif self.distribution == "relevance":
self.item_popularity = (
log.groupBy("item_idx")
.agg(sf.sum("relevance").alias("relevance"))
.select("item_idx", "relevance")
)
else:
self.item_popularity = (
log.select("item_idx")
.distinct()
.withColumn("relevance", sf.lit(1.0))
)
self.item_popularity = self.item_popularity.withColumn(
"relevance",
sf.col("relevance")
/ self.item_popularity.agg(sf.sum("relevance")).first()[0],
)
self.item_popularity.cache().count()
self.fill = self._calc_fill(self.item_popularity, self.cold_weight) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/random_rec.py | 0.932676 | 0.593904 | random_rec.py | pypi |
from typing import Optional
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from scipy.stats import norm
from replay.models.pop_rec import PopRec
class Wilson(PopRec):
"""
Calculates lower confidence bound for the confidence interval
of true fraction of positive ratings.
``relevance`` must be converted to binary 0-1 form.
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_idx": [1, 2], "item_idx": [1, 2], "relevance": [1, 1]})
>>> from replay.utils import convert2spark
>>> data_frame = convert2spark(data_frame)
>>> model = Wilson()
>>> model.fit_predict(data_frame,k=1).toPandas()
user_idx item_idx relevance
0 1 2 0.206549
1 2 1 0.206549
"""
# pylint: disable=too-many-arguments
def __init__(
self,
alpha=0.05,
add_cold_items: bool = True,
cold_weight: float = 0.5,
sample: bool = False,
seed: Optional[int] = None,
):
"""
:param alpha: significance level, default 0.05
:param add_cold_items: flag to consider cold items in recommendations building
if present in `items` parameter of `predict` method
or `pairs` parameter of `predict_pairs` methods.
If true, cold items are assigned relevance equals to the less relevant item relevance
multiplied by cold_weight and may appear among top-K recommendations.
Otherwise cold items are filtered out.
Could be changed after model training by setting the `add_cold_items` attribute.
: param cold_weight: if `add_cold_items` is True,
cold items are added with reduced relevance.
The relevance for cold items is equal to the relevance
of a least relevant item multiplied by a `cold_weight` value.
`Cold_weight` value should be in interval (0, 1].
:param sample: flag to choose recommendation strategy.
If True, items are sampled with a probability proportional
to the calculated predicted relevance.
Could be changed after model training by setting the `sample` attribute.
:param seed: random seed. Provides reproducibility if fixed
"""
self.alpha = alpha
self.sample = sample
self.seed = seed
super().__init__(
add_cold_items=add_cold_items, cold_weight=cold_weight
)
@property
def _init_args(self):
return {
"alpha": self.alpha,
"add_cold_items": self.add_cold_items,
"cold_weight": self.cold_weight,
"sample": self.sample,
"seed": self.seed,
}
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self._check_relevance(log)
items_counts = log.groupby("item_idx").agg(
sf.sum("relevance").alias("pos"),
sf.count("relevance").alias("total"),
)
# https://en.wikipedia.org/w/index.php?title=Binomial_proportion_confidence_interval
crit = norm.isf(self.alpha / 2.0)
items_counts = items_counts.withColumn(
"relevance",
(sf.col("pos") + sf.lit(0.5 * crit**2))
/ (sf.col("total") + sf.lit(crit**2))
- sf.lit(crit)
/ (sf.col("total") + sf.lit(crit**2))
* sf.sqrt(
(sf.col("total") - sf.col("pos"))
* sf.col("pos")
/ sf.col("total")
+ crit**2 / 4
),
)
self.item_popularity = items_counts.drop("pos", "total")
self.item_popularity.cache().count()
self.fill = self._calc_fill(self.item_popularity, self.cold_weight) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/wilson.py | 0.95275 | 0.588653 | wilson.py | pypi |
from os.path import join
from typing import Optional
import pandas as pd
from pyspark.sql import DataFrame
from replay.models.base_rec import Recommender
from replay.utils import to_csr, save_picklable_to_parquet, load_pickled_from_parquet
from replay.constants import REC_SCHEMA
class ImplicitWrap(Recommender):
"""Wrapper for `implicit
<https://github.com/benfred/implicit>`_
Example:
>>> import implicit
>>> model = implicit.als.AlternatingLeastSquares(factors=5)
>>> als = ImplicitWrap(model)
This way you can use implicit models as any other in replay
with conversions made under the hood.
>>> import pandas as pd
>>> from replay.utils import convert2spark
>>> df = pd.DataFrame({"user_idx": [1, 1, 2, 2], "item_idx": [1, 2, 2, 3], "relevance": [1, 1, 1, 1]})
>>> df = convert2spark(df)
>>> als.fit_predict(df, 1, users=[1])[["user_idx", "item_idx"]].toPandas()
user_idx item_idx
0 1 3
"""
def __init__(self, model):
"""Provide initialized ``implicit`` model."""
self.model = model
self.logger.info(
"The model is a wrapper of a non-distributed model which may affect performance"
)
@property
def _init_args(self):
return {"model": None}
def _save_model(self, path: str):
save_picklable_to_parquet(self.model, join(path, "model"))
def _load_model(self, path: str):
self.model = load_pickled_from_parquet(join(path, "model"))
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
matrix = to_csr(log)
self.model.fit(matrix)
@staticmethod
def _pd_func(model, items_to_use=None, user_item_data=None, filter_seen_items=False):
def predict_by_user_item(pandas_df):
user = int(pandas_df["user_idx"].iloc[0])
items = items_to_use if items_to_use else pandas_df.item_idx.to_list()
items_res, rel = model.recommend(
userid=user,
user_items=user_item_data[user] if filter_seen_items else None,
N=len(items),
filter_already_liked_items=filter_seen_items,
items=items,
)
return pd.DataFrame(
{
"user_idx": [user] * len(items_res),
"item_idx": items_res,
"relevance": rel,
}
)
return predict_by_user_item
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
items_to_use = items.distinct().toPandas().item_idx.tolist()
user_item_data = to_csr(log)
model = self.model
return (
users.select("user_idx")
.groupby("user_idx")
.applyInPandas(self._pd_func(
model=model,
items_to_use=items_to_use,
user_item_data=user_item_data,
filter_seen_items=filter_seen_items), REC_SCHEMA)
)
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
model = self.model
return pairs.groupby("user_idx").applyInPandas(
self._pd_func(model=model, filter_seen_items=False),
REC_SCHEMA) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/implicit_wrap.py | 0.887473 | 0.372962 | implicit_wrap.py | pypi |
from typing import Optional
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from replay.models.base_rec import Recommender
class UserPopRec(Recommender):
"""
Recommends old objects from each user's personal top.
Input is the number of interactions between users and items.
Popularity for item :math:`i` and user :math:`u` is defined as the
fraction of actions with item :math:`i` among all interactions of user :math:`u`:
.. math::
Popularity(i_u) = \\dfrac{N_iu}{N_u}
:math:`N_iu` - number of interactions of user :math:`u` with item :math:`i`.
:math:`N_u` - total number of interactions of user :math:`u`.
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_idx": [1, 1, 3], "item_idx": [1, 2, 3], "relevance": [2, 1, 1]})
>>> data_frame
user_idx item_idx relevance
0 1 1 2
1 1 2 1
2 3 3 1
>>> from replay.utils import convert2spark
>>> data_frame = convert2spark(data_frame)
>>> model = UserPopRec()
>>> res = model.fit_predict(data_frame, 1, filter_seen_items=False)
>>> model.user_item_popularity.count()
3
>>> res.toPandas().sort_values("user_idx", ignore_index=True)
user_idx item_idx relevance
0 1 1 0.666667
1 3 3 1.000000
"""
user_item_popularity: DataFrame
@property
def _init_args(self):
return {}
@property
def _dataframes(self):
return {"user_item_popularity": self.user_item_popularity}
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
user_relevance_sum = (
log.groupBy("user_idx")
.agg(sf.sum("relevance").alias("user_rel_sum"))
.withColumnRenamed("user_idx", "user")
.select("user", "user_rel_sum")
)
self.user_item_popularity = (
log.groupBy("user_idx", "item_idx")
.agg(sf.sum("relevance").alias("user_item_rel_sum"))
.join(
user_relevance_sum,
how="inner",
on=sf.col("user_idx") == sf.col("user"),
)
.select(
"user_idx",
"item_idx",
(sf.col("user_item_rel_sum") / sf.col("user_rel_sum")).alias(
"relevance"
),
)
)
self.user_item_popularity.cache().count()
def _clear_cache(self):
if hasattr(self, "user_item_popularity"):
self.user_item_popularity.unpersist()
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
if filter_seen_items:
self.logger.warning(
"UserPopRec can't predict new items, recommendations will not be filtered"
)
return self.user_item_popularity.join(users, on="user_idx").join(
items, on="item_idx"
) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/user_pop_rec.py | 0.932997 | 0.548008 | user_pop_rec.py | pypi |
from typing import Optional
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from replay.models.base_rec import NonPersonalizedRecommender
class PopRec(NonPersonalizedRecommender):
"""
Recommend objects using their popularity.
Popularity of an item is a probability that random user rated this item.
.. math::
Popularity(i) = \\dfrac{N_i}{N}
:math:`N_i` - number of users who rated item :math:`i`
:math:`N` - total number of users
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_idx": [1, 1, 2, 2, 3, 4], "item_idx": [1, 2, 2, 3, 3, 3], "relevance": [0.5, 1, 0.1, 0.8, 0.7, 1]})
>>> data_frame
user_idx item_idx relevance
0 1 1 0.5
1 1 2 1.0
2 2 2 0.1
3 2 3 0.8
4 3 3 0.7
5 4 3 1.0
>>> from replay.utils import convert2spark
>>> data_frame = convert2spark(data_frame)
>>> res = PopRec().fit_predict(data_frame, 1)
>>> res.toPandas().sort_values("user_idx", ignore_index=True)
user_idx item_idx relevance
0 1 3 0.75
1 2 1 0.25
2 3 2 0.50
3 4 2 0.50
>>> res = PopRec().fit_predict(data_frame, 1, filter_seen_items=False)
>>> res.toPandas().sort_values("user_idx", ignore_index=True)
user_idx item_idx relevance
0 1 3 0.75
1 2 3 0.75
2 3 3 0.75
3 4 3 0.75
>>> res = PopRec(use_relevance=True).fit_predict(data_frame, 1)
>>> res.toPandas().sort_values("user_idx", ignore_index=True)
user_idx item_idx relevance
0 1 3 0.625
1 2 1 0.125
2 3 2 0.275
3 4 2 0.275
"""
sample: bool = False
def __init__(
self,
use_relevance: bool = False,
add_cold_items: bool = True,
cold_weight: float = 0.5,
):
"""
:param use_relevance: flag to use relevance values as is or to treat them as 1
:param add_cold_items: flag to consider cold items in recommendations building
if present in `items` parameter of `predict` method
or `pairs` parameter of `predict_pairs` methods.
If true, cold items are assigned relevance equals to the less relevant item relevance
multiplied by cold_weight and may appear among top-K recommendations.
Otherwise cold items are filtered out.
Could be changed after model training by setting the `add_cold_items` attribute.
: param cold_weight: if `add_cold_items` is True,
cold items are added with reduced relevance.
The relevance for cold items is equal to the relevance
of a least relevant item multiplied by a `cold_weight` value.
`Cold_weight` value should be in interval (0, 1].
"""
self.use_relevance = use_relevance
super().__init__(
add_cold_items=add_cold_items, cold_weight=cold_weight
)
@property
def _init_args(self):
return {
"use_relevance": self.use_relevance,
"add_cold_items": self.add_cold_items,
"cold_weight": self.cold_weight,
}
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
agg_func = sf.countDistinct("user_idx").alias("relevance")
if self.use_relevance:
agg_func = sf.sum("relevance").alias("relevance")
self.item_popularity = (
log.groupBy("item_idx")
.agg(agg_func)
.withColumn(
"relevance", sf.col("relevance") / sf.lit(self.users_count)
)
)
self.item_popularity.cache().count()
self.fill = self._calc_fill(self.item_popularity, self.cold_weight) | /replay_rec-0.11.0-py3-none-any.whl/replay/models/pop_rec.py | 0.938181 | 0.587352 | pop_rec.py | pypi |
from typing import Optional
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from pyspark.sql.window import Window
from replay.models.base_rec import NeighbourRec
from replay.optuna_objective import ItemKNNObjective
class ItemKNN(NeighbourRec):
"""Item-based ItemKNN with modified cosine similarity measure."""
all_items: Optional[DataFrame]
dot_products: Optional[DataFrame]
item_norms: Optional[DataFrame]
bm25_k1 = 1.2
bm25_b = 0.75
_objective = ItemKNNObjective
_search_space = {
"num_neighbours": {"type": "int", "args": [1, 100]},
"shrink": {"type": "int", "args": [0, 100]},
"weighting": {"type": "categorical", "args": [None, "tf_idf", "bm25"]}
}
def __init__(
self,
num_neighbours: int = 10,
use_relevance: bool = False,
shrink: float = 0.0,
weighting: str = None,
):
"""
:param num_neighbours: number of neighbours
:param use_relevance: flag to use relevance values as is or to treat them as 1
:param shrink: term added to the denominator when calculating similarity
:param weighting: item reweighting type, one of [None, 'tf_idf', 'bm25']
"""
self.shrink = shrink
self.use_relevance = use_relevance
self.num_neighbours = num_neighbours
valid_weightings = self._search_space["weighting"]["args"]
if weighting not in valid_weightings:
raise ValueError(f"weighting must be one of {valid_weightings}")
self.weighting = weighting
@property
def _init_args(self):
return {
"shrink": self.shrink,
"use_relevance": self.use_relevance,
"num_neighbours": self.num_neighbours,
"weighting": self.weighting,
}
@staticmethod
def _shrink(dot_products: DataFrame, shrink: float) -> DataFrame:
return dot_products.withColumn(
"similarity",
sf.col("dot_product")
/ (sf.col("norm1") * sf.col("norm2") + shrink),
).select("item_idx_one", "item_idx_two", "similarity")
def _get_similarity(self, log: DataFrame) -> DataFrame:
"""
Calculate item similarities
:param log: DataFrame with interactions, `[user_idx, item_idx, relevance]`
:return: similarity matrix `[item_idx_one, item_idx_two, similarity]`
"""
dot_products = self._get_products(log)
similarity = self._shrink(dot_products, self.shrink)
return similarity
def _reweight_log(self, log: DataFrame):
"""
Reweight relevance according to TD-IDF or BM25 weighting.
:param log: DataFrame with interactions, `[user_idx, item_idx, relevance]`
:return: log `[user_idx, item_idx, relevance]`
"""
if self.weighting == "bm25":
log = self._get_tf_bm25(log)
idf = self._get_idf(log)
log = log.join(idf, how="inner", on="user_idx").withColumn(
"relevance",
sf.col("relevance") * sf.col("idf"),
)
return log
def _get_tf_bm25(self, log: DataFrame):
"""
Adjust relevance by BM25 term frequency.
:param log: DataFrame with interactions, `[user_idx, item_idx, relevance]`
:return: log `[user_idx, item_idx, relevance]`
"""
item_stats = log.groupBy("item_idx").agg(
sf.count("user_idx").alias("n_users_per_item")
)
avgdl = item_stats.select(sf.mean("n_users_per_item")).take(1)[0][0]
log = log.join(item_stats, how="inner", on="item_idx")
log = (
log.withColumn(
"relevance",
sf.col("relevance") * (self.bm25_k1 + 1) / (
sf.col("relevance") + self.bm25_k1 * (
1 - self.bm25_b + self.bm25_b * (
sf.col("n_users_per_item") / avgdl
)
)
)
)
.drop("n_users_per_item")
)
return log
def _get_idf(self, log: DataFrame):
"""
Return inverse document score for log reweighting.
:param log: DataFrame with interactions, `[user_idx, item_idx, relevance]`
:return: idf `[idf]`
:raises: ValueError if self.weighting not in ["tf_idf", "bm25"]
"""
df = log.groupBy("user_idx").agg(sf.count("item_idx").alias("DF"))
n_items = log.select("item_idx").distinct().count()
if self.weighting == "tf_idf":
idf = (
df.withColumn("idf", sf.log1p(sf.lit(n_items) / sf.col("DF")))
.drop("DF")
)
elif self.weighting == "bm25":
idf = (
df.withColumn(
"idf",
sf.log1p(
(sf.lit(n_items) - sf.col("DF") + 0.5)
/ (sf.col("DF") + 0.5)
),
)
.drop("DF")
)
else:
raise ValueError("weighting must be one of ['tf_idf', 'bm25']")
return idf
def _get_products(self, log: DataFrame) -> DataFrame:
"""
Calculate item dot products
:param log: DataFrame with interactions, `[user_idx, item_idx, relevance]`
:return: similarity matrix `[item_idx_one, item_idx_two, norm1, norm2]`
"""
if self.weighting:
log = self._reweight_log(log)
left = log.withColumnRenamed(
"item_idx", "item_idx_one"
).withColumnRenamed("relevance", "rel_one")
right = log.withColumnRenamed(
"item_idx", "item_idx_two"
).withColumnRenamed("relevance", "rel_two")
dot_products = (
left.join(right, how="inner", on="user_idx")
.filter(sf.col("item_idx_one") != sf.col("item_idx_two"))
.withColumn("relevance", sf.col("rel_one") * sf.col("rel_two"))
.groupBy("item_idx_one", "item_idx_two")
.agg(sf.sum("relevance").alias("dot_product"))
)
item_norms = (
log.withColumn("relevance", sf.col("relevance") ** 2)
.groupBy("item_idx")
.agg(sf.sum("relevance").alias("square_norm"))
.select(sf.col("item_idx"), sf.sqrt("square_norm").alias("norm"))
)
norm1 = item_norms.withColumnRenamed(
"item_idx", "item_id1"
).withColumnRenamed("norm", "norm1")
norm2 = item_norms.withColumnRenamed(
"item_idx", "item_id2"
).withColumnRenamed("norm", "norm2")
dot_products = dot_products.join(
norm1, how="inner", on=sf.col("item_id1") == sf.col("item_idx_one")
)
dot_products = dot_products.join(
norm2, how="inner", on=sf.col("item_id2") == sf.col("item_idx_two")
)
return dot_products
def _get_k_most_similar(self, similarity_matrix: DataFrame) -> DataFrame:
"""
Leaves only top-k neighbours for each item
:param similarity_matrix: dataframe `[item_idx_one, item_idx_two, similarity]`
:return: cropped similarity matrix
"""
return (
similarity_matrix.withColumn(
"similarity_order",
sf.row_number().over(
Window.partitionBy("item_idx_one").orderBy(
sf.col("similarity").desc(),
sf.col("item_idx_two").desc(),
)
),
)
.filter(sf.col("similarity_order") <= self.num_neighbours)
.drop("similarity_order")
)
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
df = log.select("user_idx", "item_idx", "relevance")
if not self.use_relevance:
df = df.withColumn("relevance", sf.lit(1))
similarity_matrix = self._get_similarity(df)
self.similarity = self._get_k_most_similar(similarity_matrix)
self.similarity.cache().count() | /replay_rec-0.11.0-py3-none-any.whl/replay/models/knn.py | 0.939906 | 0.48932 | knn.py | pypi |
import os
from os.path import join
from typing import Optional, Tuple
import numpy as np
import pandas as pd
import pyspark.sql.functions as sf
from lightfm import LightFM
from pyspark.sql import DataFrame
from scipy.sparse import csr_matrix, hstack, diags
from sklearn.preprocessing import MinMaxScaler
from replay.constants import REC_SCHEMA
from replay.models.base_rec import HybridRecommender
from replay.utils import to_csr, check_numeric, save_picklable_to_parquet, load_pickled_from_parquet
from replay.session_handler import State
# pylint: disable=too-many-locals, too-many-instance-attributes
class LightFMWrap(HybridRecommender):
"""Wrapper for LightFM."""
epochs: int = 10
_search_space = {
"loss": {
"type": "categorical",
"args": ["logistic", "bpr", "warp", "warp-kos"],
},
"no_components": {"type": "loguniform_int", "args": [8, 512]},
}
user_feat_scaler: Optional[MinMaxScaler] = None
item_feat_scaler: Optional[MinMaxScaler] = None
def __init__(
self,
no_components: int = 128,
loss: str = "warp",
random_state: Optional[int] = None,
): # pylint: disable=too-many-arguments
np.random.seed(42)
self.no_components = no_components
self.loss = loss
self.random_state = random_state
cpu_count = os.cpu_count()
self.num_threads = cpu_count if cpu_count is not None else 1
@property
def _init_args(self):
return {
"no_components": self.no_components,
"loss": self.loss,
"random_state": self.random_state,
}
def _save_model(self, path: str):
save_picklable_to_parquet(self.model, join(path, "model"))
save_picklable_to_parquet(self.user_feat_scaler, join(path, "user_feat_scaler"))
save_picklable_to_parquet(self.item_feat_scaler, join(path, "item_feat_scaler"))
def _load_model(self, path: str):
self.model = load_pickled_from_parquet(join(path, "model"))
self.user_feat_scaler = load_pickled_from_parquet(join(path, "user_feat_scaler"))
self.item_feat_scaler = load_pickled_from_parquet(join(path, "item_feat_scaler"))
def _feature_table_to_csr(
self,
log_ids_list: DataFrame,
feature_table: Optional[DataFrame] = None,
) -> Optional[csr_matrix]:
"""
Transform features to sparse matrix
Matrix consists of two parts:
1) Left one is a ohe-hot encoding of user and item ids.
Matrix size is: number of users or items * number of user or items in fit.
Cold users and items are represented with empty strings
2) Right one is a numerical features, passed with feature_table.
MinMaxScaler is applied per column, and then value is divided by the row sum.
:param feature_table: dataframe with ``user_idx`` or ``item_idx``,
other columns are features.
:param log_ids_list: dataframe with ``user_idx`` or ``item_idx``,
containing unique ids from log.
:returns: feature matrix
"""
if feature_table is None:
return None
check_numeric(feature_table)
log_ids_list = log_ids_list.distinct()
entity = "item" if "item_idx" in feature_table.columns else "user"
idx_col_name = f"{entity}_idx"
# filter features by log
feature_table = feature_table.join(
log_ids_list, on=idx_col_name, how="inner"
)
fit_dim = getattr(self, f"_{entity}_dim")
matrix_height = max(
fit_dim,
log_ids_list.select(sf.max(idx_col_name)).collect()[0][0] + 1,
)
if not feature_table.rdd.isEmpty():
matrix_height = max(
matrix_height,
feature_table.select(sf.max(idx_col_name)).collect()[0][0] + 1,
)
features_np = (
feature_table.select(
idx_col_name,
# first column contains id, next contain features
*(
sorted(
list(
set(feature_table.columns).difference(
{idx_col_name}
)
)
)
),
)
.toPandas()
.to_numpy()
)
entities_ids = features_np[:, 0]
features_np = features_np[:, 1:]
number_of_features = features_np.shape[1]
all_ids_list = log_ids_list.toPandas().to_numpy().ravel()
entities_seen_in_fit = all_ids_list[all_ids_list < fit_dim]
entity_id_features = csr_matrix(
(
[1.0] * entities_seen_in_fit.shape[0],
(entities_seen_in_fit, entities_seen_in_fit),
),
shape=(matrix_height, fit_dim),
)
scaler_name = f"{entity}_feat_scaler"
if getattr(self, scaler_name) is None:
if not features_np.size:
raise ValueError(f"features for {entity}s from log are absent")
setattr(self, scaler_name, MinMaxScaler().fit(features_np))
if features_np.size:
features_np = getattr(self, scaler_name).transform(features_np)
sparse_features = csr_matrix(
(
features_np.ravel(),
(
np.repeat(entities_ids, number_of_features),
np.tile(
np.arange(number_of_features),
entities_ids.shape[0],
),
),
),
shape=(matrix_height, number_of_features),
)
else:
sparse_features = csr_matrix((matrix_height, number_of_features))
concat_features = hstack([entity_id_features, sparse_features])
concat_features_sum = concat_features.sum(axis=1).A.ravel()
mask = concat_features_sum != 0.0
concat_features_sum[mask] = 1.0 / concat_features_sum[mask]
return diags(concat_features_sum, format="csr") @ concat_features
def _fit(
self,
log: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> None:
self.user_feat_scaler = None
self.item_feat_scaler = None
interactions_matrix = to_csr(log, self._user_dim, self._item_dim)
csr_item_features = self._feature_table_to_csr(
log.select("item_idx").distinct(), item_features
)
csr_user_features = self._feature_table_to_csr(
log.select("user_idx").distinct(), user_features
)
if user_features is not None:
self.can_predict_cold_users = True
if item_features is not None:
self.can_predict_cold_items = True
self.model = LightFM(
loss=self.loss,
no_components=self.no_components,
random_state=self.random_state,
).fit(
interactions=interactions_matrix,
epochs=self.epochs,
num_threads=self.num_threads,
item_features=csr_item_features,
user_features=csr_user_features,
)
def _predict_selected_pairs(
self,
pairs: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
):
def predict_by_user(pandas_df: pd.DataFrame) -> pd.DataFrame:
pandas_df["relevance"] = model.predict(
user_ids=pandas_df["user_idx"].to_numpy(),
item_ids=pandas_df["item_idx"].to_numpy(),
item_features=csr_item_features,
user_features=csr_user_features,
)
return pandas_df
model = self.model
if self.can_predict_cold_users and user_features is None:
raise ValueError("User features are missing for predict")
if self.can_predict_cold_items and item_features is None:
raise ValueError("Item features are missing for predict")
csr_item_features = self._feature_table_to_csr(
pairs.select("item_idx").distinct(), item_features
)
csr_user_features = self._feature_table_to_csr(
pairs.select("user_idx").distinct(), user_features
)
return pairs.groupby("user_idx").applyInPandas(
predict_by_user, REC_SCHEMA
)
# pylint: disable=too-many-arguments
def _predict(
self,
log: DataFrame,
k: int,
users: DataFrame,
items: DataFrame,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
filter_seen_items: bool = True,
) -> DataFrame:
return self._predict_selected_pairs(
users.crossJoin(items), user_features, item_features
)
def _predict_pairs(
self,
pairs: DataFrame,
log: Optional[DataFrame] = None,
user_features: Optional[DataFrame] = None,
item_features: Optional[DataFrame] = None,
) -> DataFrame:
return self._predict_selected_pairs(
pairs, user_features, item_features
)
def _get_features(
self, ids: DataFrame, features: Optional[DataFrame]
) -> Tuple[Optional[DataFrame], Optional[int]]:
"""
Get features from LightFM.
LightFM has methods get_item_representations/get_user_representations,
which accept object matrix and return features.
:param ids: id item_idx/user_idx to get features for
:param features: features for item_idx/user_idx
:return: spark-dataframe with biases and vectors for users/items and vector size
"""
entity = "item" if "item_idx" in ids.columns else "user"
ids_list = ids.toPandas()[f"{entity}_idx"]
# models without features use sparse matrix
if features is None:
matrix_width = getattr(self, f"fit_{entity}s").count()
warm_ids = ids_list[ids_list < matrix_width]
sparse_features = csr_matrix(
(
[1] * warm_ids.shape[0],
(warm_ids, warm_ids),
),
shape=(ids_list.max() + 1, matrix_width),
)
else:
sparse_features = self._feature_table_to_csr(ids, features)
biases, vectors = getattr(self.model, f"get_{entity}_representations")(
sparse_features
)
embed_list = list(
zip(
ids_list,
biases[ids_list].tolist(),
vectors[ids_list].tolist(),
)
)
lightfm_factors = State().session.createDataFrame(
embed_list,
schema=[
f"{entity}_idx",
f"{entity}_bias",
f"{entity}_factors",
],
)
return lightfm_factors, self.model.no_components | /replay_rec-0.11.0-py3-none-any.whl/replay/models/lightfm_wrap.py | 0.864382 | 0.276758 | lightfm_wrap.py | pypi |
from abc import ABC, abstractmethod
from typing import Optional, Tuple
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from replay.constants import AnyDataFrame
from replay.utils import convert2spark
SplitterReturnType = Tuple[DataFrame, DataFrame]
# pylint: disable=too-few-public-methods
class Splitter(ABC):
"""Base class"""
_init_arg_names = [
"drop_cold_users",
"drop_cold_items",
"drop_zero_rel_in_test",
"user_col",
"item_col",
"date_col",
]
# pylint: disable=too-many-arguments
def __init__(
self,
drop_cold_items: bool,
drop_cold_users: bool,
drop_zero_rel_in_test: bool,
user_col: str = "user_idx",
item_col: Optional[str] = "item_idx",
date_col: Optional[str] = "timestamp",
):
"""
:param drop_cold_items: flag to remove items that are not in train data
:param drop_cold_users: flag to remove users that are not in train data
:param drop_zero_rel_in_test: flag to remove entries with relevance <= 0
from the test part of the dataset
:param user_col: user id column name
:param item_col: item id column name
:param date_col: timestamp column name
"""
self.drop_cold_users = drop_cold_users
self.drop_cold_items = drop_cold_items
self.drop_zero_rel_in_test = drop_zero_rel_in_test
self.user_col = user_col
self.item_col = item_col
self.date_col = date_col
@property
def _init_args(self):
return {name: getattr(self, name) for name in self._init_arg_names}
def __str__(self):
return type(self).__name__
def _filter_zero_relevance(self, dataframe: DataFrame) -> DataFrame:
"""
Removes records with zero relevance if required by
`drop_zero_rel_in_test` initialization parameter
:param dataframe: input DataFrame
:returns: filtered DataFrame
"""
if self.drop_zero_rel_in_test:
return dataframe.filter("relevance > 0.0")
return dataframe
# pylint: disable=too-many-arguments
@staticmethod
def _drop_cold_items_and_users(
train: DataFrame,
test: DataFrame,
drop_cold_items: bool,
drop_cold_users: bool,
user_col: str = "user_idx",
item_col: Optional[str] = "item_idx"
) -> DataFrame:
"""
Removes cold users and items from the test data
:param train: train DataFrame `[timestamp, user_id, item_id, relevance]`
:param test: DataFrame like train
:param drop_cold_items: flag to remove cold items
:param drop_cold_users: flag to remove cold users
:param user_col: user id column name
:param item_col: item id column name
:return: filtered DataFrame
"""
if drop_cold_items:
train_tmp = train.select(
sf.col(item_col).alias("_item_id_inner")
).distinct()
test = test.join(train_tmp, sf.col(item_col) == sf.col("_item_id_inner")).drop(
"_item_id_inner"
)
if drop_cold_users:
train_tmp = train.select(
sf.col(user_col).alias("_user_id_inner")
).distinct()
test = test.join(train_tmp, sf.col(user_col) == sf.col("_user_id_inner")).drop(
"_user_id_inner"
)
return test
@abstractmethod
def _core_split(self, log: DataFrame) -> SplitterReturnType:
"""
This method implements split strategy
:param log: input DataFrame `[timestamp, user_id, item_id, relevance]`
:returns: `train` and `test DataFrames
"""
def split(self, log: AnyDataFrame) -> SplitterReturnType:
"""
Splits input DataFrame into train and test
:param log: input DataFrame ``[timestamp, user_id, item_id, relevance]``
:returns: `train` and `test` DataFrame
"""
train, test = self._core_split(convert2spark(log)) # type: ignore
train.cache()
train.count()
test = self._drop_cold_items_and_users(
train, test, self.drop_cold_items, self.drop_cold_users, self.user_col, self.item_col
)
test = self._filter_zero_relevance(test).cache()
test.count()
return train, test | /replay_rec-0.11.0-py3-none-any.whl/replay/splitters/base_splitter.py | 0.898494 | 0.483709 | base_splitter.py | pypi |
from datetime import datetime
from typing import Optional, Union
import pyspark.sql.functions as sf
from pyspark.sql import DataFrame, Window
from replay.splitters.base_splitter import (
Splitter,
SplitterReturnType,
)
# pylint: disable=too-few-public-methods
class DateSplitter(Splitter):
"""
Split into train and test by date.
"""
_init_arg_names = [
"test_start",
"drop_cold_users",
"drop_cold_items",
"drop_zero_rel_in_test",
"user_col",
"item_col",
"date_col",
]
# pylint: disable=too-many-arguments
def __init__(
self,
test_start: Union[datetime, float, str, int],
drop_cold_items: bool = False,
drop_cold_users: bool = False,
drop_zero_rel_in_test: bool = True,
user_col: str = "user_idx",
item_col: Optional[str] = "item_idx",
date_col: Optional[str] = "timestamp",
):
"""
:param test_start: string``yyyy-mm-dd``, int unix timestamp, datetime or a
fraction for test size to determine the date automatically
:param drop_cold_items: flag to drop cold items from test
:param drop_cold_users: flag to drop cold users from test
:param drop_zero_rel_in_test: flag to remove entries with relevance <= 0
from the test part of the dataset
:param user_col: user id column name
:param item_col: item id column name
:param date_col: timestamp column name
"""
super().__init__(
drop_cold_items=drop_cold_items,
drop_cold_users=drop_cold_users,
drop_zero_rel_in_test=drop_zero_rel_in_test,
user_col=user_col,
item_col=item_col,
date_col=date_col,
)
self.test_start = test_start
def _core_split(self, log: DataFrame) -> SplitterReturnType:
if isinstance(self.test_start, float):
dates = log.select(self.date_col).withColumn(
"_row_number_by_ts", sf.row_number().over(Window.orderBy(self.date_col))
)
test_start = int(dates.count() * (1 - self.test_start)) + 1
test_start = (
dates.filter(sf.col("_row_number_by_ts") == test_start)
.select(self.date_col)
.collect()[0][0]
)
else:
dtype = dict(log.dtypes)[self.date_col]
test_start = sf.lit(self.test_start).cast(self.date_col).cast(dtype)
train = log.filter(sf.col(self.date_col) < test_start)
test = log.filter(sf.col(self.date_col) >= test_start)
return train, test
# pylint: disable=too-few-public-methods
class RandomSplitter(Splitter):
"""Assign records into train and test at random."""
_init_arg_names = [
"test_size",
"drop_cold_items",
"drop_cold_users",
"drop_zero_rel_in_test",
"seed",
"user_col",
"item_col",
"date_col",
]
# pylint: disable=too-many-arguments
def __init__(
self,
test_size: float,
drop_cold_items: bool = False,
drop_cold_users: bool = False,
drop_zero_rel_in_test: bool = True,
seed: Optional[int] = None,
user_col: str = "user_idx",
item_col: Optional[str] = "item_idx",
date_col: Optional[str] = "timestamp",
):
"""
:param test_size: test size 0 to 1
:param drop_cold_items: flag to drop cold items from test
:param drop_cold_users: flag to drop cold users from test
:param drop_zero_rel_in_test: flag to remove entries with relevance <= 0
from the test part of the dataset
:param seed: random seed
:param user_col: user id column name
:param item_col: item id column name
:param date_col: timestamp column name
"""
super().__init__(
drop_cold_items=drop_cold_items,
drop_cold_users=drop_cold_users,
drop_zero_rel_in_test=drop_zero_rel_in_test,
user_col=user_col,
item_col=item_col,
date_col=date_col,
)
self.seed = seed
self.test_size = test_size
if test_size < 0 or test_size > 1:
raise ValueError("test_size must be 0 to 1")
def _core_split(self, log: DataFrame) -> SplitterReturnType:
train, test = log.randomSplit(
[1 - self.test_size, self.test_size], self.seed
)
return train, test
# pylint: disable=too-few-public-methods
class NewUsersSplitter(Splitter):
"""
Only new users will be assigned to test set.
Splits log by timestamp so that test has `test_size` fraction of most recent users.
>>> from replay.splitters import NewUsersSplitter
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_idx": [1,1,2,2,3,4],
... "item_idx": [1,2,3,1,2,3],
... "relevance": [1,2,3,4,5,6],
... "timestamp": [20,40,20,30,10,40]})
>>> data_frame
user_idx item_idx relevance timestamp
0 1 1 1 20
1 1 2 2 40
2 2 3 3 20
3 2 1 4 30
4 3 2 5 10
5 4 3 6 40
>>> train, test = NewUsersSplitter(test_size=0.1).split(data_frame)
>>> train.show()
+--------+--------+---------+---------+
|user_idx|item_idx|relevance|timestamp|
+--------+--------+---------+---------+
| 1| 1| 1| 20|
| 2| 3| 3| 20|
| 2| 1| 4| 30|
| 3| 2| 5| 10|
+--------+--------+---------+---------+
<BLANKLINE>
>>> test.show()
+--------+--------+---------+---------+
|user_idx|item_idx|relevance|timestamp|
+--------+--------+---------+---------+
| 4| 3| 6| 40|
+--------+--------+---------+---------+
<BLANKLINE>
Train DataFrame can be drastically reduced even with moderate
`test_size` if the amount of new users is small.
>>> train, test = NewUsersSplitter(test_size=0.3).split(data_frame)
>>> train.show()
+--------+--------+---------+---------+
|user_idx|item_idx|relevance|timestamp|
+--------+--------+---------+---------+
| 3| 2| 5| 10|
+--------+--------+---------+---------+
<BLANKLINE>
"""
_init_arg_names = ["test_size",
"drop_cold_items",
"drop_zero_rel_in_test",
"user_col",
"item_col",
"date_col",]
# pylint: disable=too-many-arguments
def __init__(
self,
test_size: float,
drop_cold_items: bool = False,
drop_zero_rel_in_test: bool = True,
user_col: str = "user_idx",
item_col: Optional[str] = "item_idx",
date_col: Optional[str] = "timestamp",
):
"""
:param test_size: test size 0 to 1
:param drop_cold_items: flag to drop cold items from test
:param drop_zero_rel_in_test: flag to remove entries with relevance <= 0
from the test part of the dataset
:param user_col: user id column name
:param item_col: item id column name
:param date_col: timestamp column name
"""
super().__init__(
drop_cold_items=drop_cold_items,
drop_cold_users=False,
drop_zero_rel_in_test=drop_zero_rel_in_test,
user_col=user_col,
item_col=item_col,
date_col=date_col,
)
self.test_size = test_size
if test_size < 0 or test_size > 1:
raise ValueError("test_size must be 0 to 1")
def _core_split(self, log: DataFrame) -> SplitterReturnType:
start_date_by_user = log.groupby(self.user_col).agg(
sf.min(self.date_col).alias("_start_dt_by_user")
)
test_start_date = (
start_date_by_user.groupby("_start_dt_by_user")
.agg(sf.count(self.user_col).alias("_num_users_by_start_date"))
.select(
"_start_dt_by_user",
sf.sum("_num_users_by_start_date")
.over(Window.orderBy(sf.desc("_start_dt_by_user")))
.alias("_cum_num_users_to_dt"),
sf.sum("_num_users_by_start_date").over(Window.orderBy(sf.lit(1))).alias("total"),
)
.filter(sf.col("_cum_num_users_to_dt") >= sf.col("total") * self.test_size)
.agg(sf.max("_start_dt_by_user"))
.head()[0]
)
train = log.filter(sf.col(self.date_col) < test_start_date)
test = log.join(
start_date_by_user.filter(sf.col("_start_dt_by_user") >= test_start_date),
how="inner",
on=self.user_col,
).drop("_start_dt_by_user")
return train, test
# pylint: disable=too-few-public-methods
class ColdUserRandomSplitter(Splitter):
"""
Test set consists of all actions of randomly chosen users.
"""
# для использования в тестах
_init_arg_names = [
"test_size",
"drop_cold_items",
"drop_cold_users",
"drop_zero_rel_in_test",
"seed",
"user_col",
"item_col",
"date_col",
]
# pylint: disable=too-many-arguments
def __init__(
self,
test_size: float,
drop_cold_items: bool = False,
drop_cold_users: bool = False,
drop_zero_rel_in_test: bool = True,
seed: Optional[int] = None,
user_col: str = "user_idx",
item_col: Optional[str] = "item_idx",
date_col: Optional[str] = "timestamp",
):
"""
:param test_size: fraction of users to be in test
:param drop_cold_items: flag to drop cold items from test
:param drop_cold_users: flag to drop cold users from test
:param drop_zero_rel_in_test: flag to remove entries with relevance <= 0
from the test part of the dataset
:param seed: random seed
:param user_col: user id column name
:param item_col: item id column name
:param date_col: timestamp column name
"""
super().__init__(
drop_cold_items=drop_cold_items,
drop_cold_users=drop_cold_users,
drop_zero_rel_in_test=drop_zero_rel_in_test,
user_col=user_col,
item_col=item_col,
date_col=date_col,
)
self.test_size = test_size
self.seed = seed
def _core_split(self, log: DataFrame) -> SplitterReturnType:
users = log.select(self.user_col).distinct()
train_users, test_users = users.randomSplit(
[1 - self.test_size, self.test_size],
seed=self.seed,
)
train = log.join(train_users, on=self.user_col, how="inner")
test = log.join(test_users, on=self.user_col, how="inner")
return train, test | /replay_rec-0.11.0-py3-none-any.whl/replay/splitters/log_splitter.py | 0.885866 | 0.342627 | log_splitter.py | pypi |
from typing import Optional, Union
import pyspark.sql.functions as sf
from pyspark.sql import DataFrame, Window
from replay.constants import AnyDataFrame
from replay.splitters.base_splitter import Splitter, SplitterReturnType
from replay.utils import convert2spark
# pylint: disable=too-few-public-methods
class UserSplitter(Splitter):
"""
Split data inside each user's history separately.
Example:
>>> from replay.session_handler import get_spark_session, State
>>> spark = get_spark_session(1, 1)
>>> state = State(spark)
>>> from replay.splitters import UserSplitter
>>> import pandas as pd
>>> data_frame = pd.DataFrame({"user_idx": [1,1,1,2,2,2],
... "item_idx": [1,2,3,1,2,3],
... "relevance": [1,2,3,4,5,6],
... "timestamp": [1,2,3,3,2,1]})
>>> data_frame
user_idx item_idx relevance timestamp
0 1 1 1 1
1 1 2 2 2
2 1 3 3 3
3 2 1 4 3
4 2 2 5 2
5 2 3 6 1
>>> from replay.utils import convert2spark
>>> data_frame = convert2spark(data_frame)
By default, test is one last item for each user
>>> UserSplitter(seed=80083).split(data_frame)[-1].toPandas()
user_idx item_idx relevance timestamp
0 1 3 3 3
1 2 1 4 3
Random records can be retrieved with ``shuffle``:
>>> UserSplitter(shuffle=True, seed=80083).split(data_frame)[-1].toPandas()
user_idx item_idx relevance timestamp
0 1 2 2 2
1 2 3 6 1
You can specify the number of items for each user:
>>> UserSplitter(item_test_size=3, shuffle=True, seed=80083).split(data_frame)[-1].toPandas()
user_idx item_idx relevance timestamp
0 1 2 2 2
1 1 3 3 3
2 1 1 1 1
3 2 3 6 1
4 2 2 5 2
5 2 1 4 3
Or a fraction:
>>> UserSplitter(item_test_size=0.67, shuffle=True, seed=80083).split(data_frame)[-1].toPandas()
user_idx item_idx relevance timestamp
0 1 2 2 2
1 1 3 3 3
2 2 3 6 1
3 2 2 5 2
`user_test_size` allows to put exact number of users into test set
>>> UserSplitter(user_test_size=1, item_test_size=2, seed=42).split(data_frame)[-1].toPandas().user_idx.nunique()
1
>>> UserSplitter(user_test_size=0.5, item_test_size=2, seed=42).split(data_frame)[-1].toPandas().user_idx.nunique()
1
"""
_init_arg_names = [
"item_test_size",
"user_test_size",
"shuffle",
"drop_cold_items",
"drop_cold_users",
"drop_zero_rel_in_test",
"seed",
"user_col",
"item_col",
"date_col",
]
# pylint: disable=too-many-arguments
def __init__(
self,
item_test_size: Union[float, int] = 1,
user_test_size: Optional[Union[float, int]] = None,
shuffle=False,
drop_cold_items: bool = False,
drop_cold_users: bool = False,
drop_zero_rel_in_test: bool = True,
seed: Optional[int] = None,
user_col: str = "user_idx",
item_col: Optional[str] = "item_idx",
date_col: Optional[str] = "timestamp",
):
"""
:param item_test_size: fraction or a number of items per user
:param user_test_size: similar to ``item_test_size``,
but corresponds to the number of users.
``None`` is all available users.
:param shuffle: take random items and not last based on ``timestamp``.
:param drop_cold_items: flag to drop cold items from test
:param drop_cold_users: flag to drop cold users from test
:param drop_zero_rel_in_test: flag to remove entries with relevance <= 0
from the test part of the dataset
:param seed: random seed
:param user_col: user id column name
:param item_col: item id column name
:param date_col: timestamp column name
"""
super().__init__(
drop_cold_items=drop_cold_items,
drop_cold_users=drop_cold_users,
drop_zero_rel_in_test=drop_zero_rel_in_test,
user_col=user_col,
item_col=item_col,
date_col=date_col,
)
self.item_test_size = item_test_size
self.user_test_size = user_test_size
self.shuffle = shuffle
self.seed = seed
def _get_test_users(
self,
log: DataFrame,
) -> DataFrame:
"""
:param log: input DataFrame
:return: Spark DataFrame with single column `user_id`
"""
all_users = log.select(self.user_col).distinct()
user_count = all_users.count()
if self.user_test_size is not None:
value_error = False
if isinstance(self.user_test_size, int):
if 1 <= self.user_test_size < user_count:
test_user_count = self.user_test_size
else:
value_error = True
else:
if 1 > self.user_test_size > 0:
test_user_count = user_count * self.user_test_size
else:
value_error = True
if value_error:
raise ValueError(
f"""
Invalid value for user_test_size: {self.user_test_size}
"""
)
test_users = (
all_users.withColumn("_rand", sf.rand(self.seed))
.withColumn(
"_row_num", sf.row_number().over(Window.orderBy("_rand"))
)
.filter(f"_row_num <= {test_user_count}")
.drop("_rand", "_row_num")
)
else:
test_users = all_users
return test_users
def _split_proportion(self, log: DataFrame) -> SplitterReturnType:
"""
Proportionate split
:param log: input DataFrame `[self.user_col, self.item_col, self.date_col, relevance]`
:return: train and test DataFrames
"""
counts = log.groupBy(self.user_col).count()
test_users = self._get_test_users(log).withColumn(
"test_user", sf.lit(1)
)
if self.shuffle:
res = self._add_random_partition(
log.join(test_users, how="left", on=self.user_col)
)
else:
res = self._add_time_partition(
log.join(test_users, how="left", on=self.user_col)
)
res = res.join(counts, on=self.user_col, how="left")
res = res.withColumn("_frac", sf.col("_row_num") / sf.col("count"))
train = res.filter(
f"""
_frac > {self.item_test_size} OR
test_user IS NULL
"""
).drop("_rand", "_row_num", "count", "_frac", "test_user")
test = res.filter(
f"""
_frac <= {self.item_test_size} AND
test_user IS NOT NULL
"""
).drop("_rand", "_row_num", "count", "_frac", "test_user")
return train, test
def _split_quantity(self, log: DataFrame) -> SplitterReturnType:
"""
Split by quantity
:param log: input DataFrame `[self.user_col, self.item_col, self.date_col, relevance]`
:return: train and test DataFrames
"""
test_users = self._get_test_users(log).withColumn(
"test_user", sf.lit(1)
)
if self.shuffle:
res = self._add_random_partition(
log.join(test_users, how="left", on=self.user_col)
)
else:
res = self._add_time_partition(
log.join(test_users, how="left", on=self.user_col)
)
train = res.filter(
f"""
_row_num > {self.item_test_size} OR
test_user IS NULL
"""
).drop("_rand", "_row_num", "test_user")
test = res.filter(
f"""
_row_num <= {self.item_test_size} AND
test_user IS NOT NULL
"""
).drop("_rand", "_row_num", "test_user")
return train, test
def _core_split(self, log: DataFrame) -> SplitterReturnType:
if 0 <= self.item_test_size < 1.0:
train, test = self._split_proportion(log)
elif self.item_test_size >= 1 and isinstance(self.item_test_size, int):
train, test = self._split_quantity(log)
else:
raise ValueError(
"`test_size` value must be [0, 1) or "
"a positive integer; "
f"test_size={self.item_test_size}"
)
return train, test
def _add_random_partition(self, dataframe: DataFrame) -> DataFrame:
"""
Adds `_rand` column and a user index column `_row_num` based on `_rand`.
:param dataframe: input DataFrame with `user_id` column
:returns: processed DataFrame
"""
dataframe = dataframe.withColumn("_rand", sf.rand(self.seed))
dataframe = dataframe.withColumn(
"_row_num",
sf.row_number().over(
Window.partitionBy(self.user_col).orderBy("_rand")
),
)
return dataframe
@staticmethod
def _add_time_partition(
dataframe: DataFrame,
user_col: str = "user_idx",
date_col: str = "timestamp",
) -> DataFrame:
"""
Adds user index `_row_num` based on `timestamp`.
:param dataframe: input DataFrame with `[timestamp, user_id]`
:param user_col: user id column name
:param date_col: timestamp column name
:returns: processed DataFrame
"""
res = dataframe.withColumn(
"_row_num",
sf.row_number().over(
Window.partitionBy(user_col).orderBy(
sf.col(date_col).desc()
)
),
)
return res
def k_folds(
log: AnyDataFrame,
n_folds: Optional[int] = 5,
seed: Optional[int] = None,
splitter: Optional[str] = "user",
user_col: str = "user_idx",
) -> SplitterReturnType:
"""
Splits log inside each user into folds at random
:param log: input DataFrame
:param n_folds: number of folds
:param seed: random seed
:param splitter: splitting strategy. Only user variant is available atm.
:param user_col: user id column name
:return: yields train and test DataFrames by folds
"""
if splitter not in {"user"}:
raise ValueError(f"Wrong splitter parameter: {splitter}")
if splitter == "user":
dataframe = convert2spark(log).withColumn("_rand", sf.rand(seed))
dataframe = dataframe.withColumn(
"fold",
sf.row_number().over(
Window.partitionBy(user_col).orderBy("_rand")
)
% n_folds,
).drop("_rand")
for i in range(n_folds):
train = dataframe.filter(f"fold != {i}").drop("fold")
test = dataframe.filter(f"fold == {i}").drop("fold")
yield train, test | /replay_rec-0.11.0-py3-none-any.whl/replay/splitters/user_log_splitter.py | 0.937283 | 0.538194 | user_log_splitter.py | pypi |
from functools import partial
from typing import Optional
import numpy as np
from pyspark.sql import DataFrame
from pyspark.sql import functions as sf
from pyspark.sql import types as st
from replay.constants import AnyDataFrame
from replay.metrics.base_metric import (
fill_na_with_empty_array,
RecOnlyMetric,
sorter,
)
from replay.utils import convert2spark, get_top_k_recs
# pylint: disable=too-few-public-methods
class Surprisal(RecOnlyMetric):
"""
Measures how many surprising rare items are present in recommendations.
.. math::
\\textit{Self-Information}(j)= -\log_2 \\frac {u_j}{N}
:math:`u_j` -- number of users that interacted with item :math:`j`.
Cold items are treated as if they were rated by 1 user.
That is, if they appear in recommendations it will be completely unexpected.
Metric is normalized.
Surprisal for item :math:`j` is
.. math::
Surprisal(j)= \\frac {\\textit{Self-Information}(j)}{log_2 N}
Recommendation list surprisal is the average surprisal of items in it.
.. math::
Surprisal@K(i) = \\frac {\sum_{j=1}^{K}Surprisal(j)} {K}
Final metric is averaged by users.
.. math::
Surprisal@K = \\frac {\sum_{i=1}^{N}Surprisal@K(i)}{N}
"""
_scala_udf_name = "getSurprisalMetricValue"
def __init__(
self, log: AnyDataFrame,
use_scala_udf: bool = False
): # pylint: disable=super-init-not-called
"""
Here we calculate self-information for each item
:param log: historical data
"""
self._use_scala_udf = use_scala_udf
self.log = convert2spark(log)
n_users = self.log.select("user_idx").distinct().count() # type: ignore
self.item_weights = self.log.groupby("item_idx").agg(
(
sf.log2(n_users / sf.countDistinct("user_idx")) # type: ignore
/ np.log2(n_users)
).alias("rec_weight")
)
@staticmethod
def _get_metric_value_by_user(k, *args):
weigths = args[0]
return sum(weigths[:k]) / k
def _get_enriched_recommendations(
self,
recommendations: DataFrame,
ground_truth: DataFrame,
max_k: int,
ground_truth_users: Optional[AnyDataFrame] = None,
) -> DataFrame:
recommendations = convert2spark(recommendations)
ground_truth_users = convert2spark(ground_truth_users)
recommendations = get_top_k_recs(recommendations, max_k)
sort_udf = sf.udf(
partial(sorter, extra_position=2),
returnType=st.StructType(
[
st.StructField(
"pred",
st.ArrayType(
self.item_weights.schema["item_idx"].dataType
),
),
st.StructField(
"rec_weight",
st.ArrayType(
self.item_weights.schema["rec_weight"].dataType
),
),
],
),
)
recommendations = (
recommendations.join(self.item_weights, on="item_idx", how="left")
.fillna(1.0)
.groupby("user_idx")
.agg(
sf.collect_list(
sf.struct("relevance", "item_idx", "rec_weight")
).alias("rel_id_weight")
)
.withColumn("pred_rec_weight", sort_udf(sf.col("rel_id_weight")))
.select(
"user_idx",
sf.col("pred_rec_weight.rec_weight").alias("rec_weight"),
)
)
if ground_truth_users is not None:
recommendations = fill_na_with_empty_array(
recommendations.join(
ground_truth_users, on="user_idx", how="right"
),
"rec_weight",
self.item_weights.schema["rec_weight"].dataType,
)
return recommendations | /replay_rec-0.11.0-py3-none-any.whl/replay/metrics/surprisal.py | 0.930443 | 0.390214 | surprisal.py | pypi |
import math
from replay.metrics.base_metric import Metric
# pylint: disable=too-few-public-methods
class NDCG(Metric):
"""
Normalized Discounted Cumulative Gain is a metric
that takes into account positions of relevant items.
This is the binary version, it takes into account
whether the item was consumed or not, relevance value is ignored.
.. math::
DCG@K(i) = \sum_{j=1}^{K}\\frac{\mathbb{1}_{r_{ij}}}{\log_2 (j+1)}
:math:`\\mathbb{1}_{r_{ij}}` -- indicator function showing that user :math:`i` interacted with item :math:`j`
To get from :math:`DCG` to :math:`nDCG` we calculate the biggest possible value of `DCG`
for user :math:`i` and recommendation length :math:`K`.
.. math::
IDCG@K(i) = max(DCG@K(i)) = \sum_{j=1}^{K}\\frac{\mathbb{1}_{j\le|Rel_i|}}{\log_2 (j+1)}
.. math::
nDCG@K(i) = \\frac {DCG@K(i)}{IDCG@K(i)}
:math:`|Rel_i|` -- number of relevant items for user :math:`i`
Metric is averaged by users.
.. math::
nDCG@K = \\frac {\sum_{i=1}^{N}nDCG@K(i)}{N}
>>> import pandas as pd
>>> pred=pd.DataFrame({"user_idx": [1, 1, 2, 2],
... "item_idx": [4, 5, 6, 7],
... "relevance": [1, 1, 1, 1]})
>>> true=pd.DataFrame({"user_idx": [1, 1, 1, 1, 1, 2],
... "item_idx": [1, 2, 3, 4, 5, 8],
... "relevance": [0.5, 0.1, 0.25, 0.6, 0.2, 0.3]})
>>> ndcg = NDCG()
>>> ndcg(pred, true, 2)
0.5
"""
_scala_udf_name = "getNDCGMetricValue"
@staticmethod
def _get_metric_value_by_user(k, pred, ground_truth) -> float:
if len(pred) == 0 or len(ground_truth) == 0:
return 0.0
pred_len = min(k, len(pred))
ground_truth_len = min(k, len(ground_truth))
denom = [1 / math.log2(i + 2) for i in range(k)]
dcg = sum(denom[i] for i in range(pred_len) if pred[i] in ground_truth)
idcg = sum(denom[:ground_truth_len])
return dcg / idcg | /replay_rec-0.11.0-py3-none-any.whl/replay/metrics/ndcg.py | 0.783658 | 0.660275 | ndcg.py | pypi |
from replay.metrics.base_metric import Metric
# pylint: disable=too-few-public-methods
class RocAuc(Metric):
"""
Receiver Operating Characteristic/Area Under the Curve is the aggregated performance measure,
that depends only on the order of recommended items.
It can be interpreted as the fraction of object pairs (object of class 1, object of class 0)
that were correctly ordered by the model.
The bigger the value of AUC, the better the classification model.
.. math::
ROCAUC@K(i) = \\frac {\sum_{s=1}^{K}\sum_{t=1}^{K}
\mathbb{1}_{r_{si}<r_{ti}}
\mathbb{1}_{gt_{si}<gt_{ti}}}
{\sum_{s=1}^{K}\sum_{t=1}^{K} \mathbb{1}_{gt_{si}<gt_{tj}}}
:math:`\\mathbb{1}_{r_{si}<r_{ti}}` -- indicator function showing that recommendation score for
user :math:`i` for item :math:`s` is bigger than for item :math:`t`
:math:`\mathbb{1}_{gt_{si}<gt_{ti}}` -- indicator function showing that
user :math:`i` values item :math:`s` more than item :math:`t`.
Metric is averaged by all users.
.. math::
ROCAUC@K = \\frac {\sum_{i=1}^{N}ROCAUC@K(i)}{N}
>>> import pandas as pd
>>> true=pd.DataFrame({"user_idx": 1,
... "item_idx": [4, 5, 6],
... "relevance": [1, 1, 1]})
>>> pred=pd.DataFrame({"user_idx": 1,
... "item_idx": [1, 2, 3, 4, 5, 6, 7],
... "relevance": [0.5, 0.1, 0.25, 0.6, 0.2, 0.3, 0]})
>>> roc = RocAuc()
>>> roc(pred, true, 7)
0.75
"""
_scala_udf_name = "getRocAucMetricValue"
@staticmethod
def _get_metric_value_by_user(k, pred, ground_truth) -> float:
length = min(k, len(pred))
if len(ground_truth) == 0 or len(pred) == 0:
return 0
fp_cur = 0
fp_cum = 0
for item in pred[:length]:
if item in ground_truth:
fp_cum += fp_cur
else:
fp_cur += 1
if fp_cur == length:
return 0
if fp_cum == 0:
return 1
return 1 - fp_cum / (fp_cur * (length - fp_cur)) | /replay_rec-0.11.0-py3-none-any.whl/replay/metrics/rocauc.py | 0.880155 | 0.607663 | rocauc.py | pypi |
from __future__ import annotations
from copy import deepcopy
from logging import getLogger
from typing import Optional, Union
import joblib
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import sklearn
import xarray as xr
from sklearn.base import BaseEstimator
from replay_trajectory_classification.continuous_state_transitions import (
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
)
from replay_trajectory_classification.core import (
_acausal_classify,
_acausal_classify_gpu,
_causal_classify,
_causal_classify_gpu,
atleast_2d,
check_converged,
get_centers,
mask,
scaled_likelihood,
)
from replay_trajectory_classification.discrete_state_transitions import (
DiagonalDiscrete,
RandomDiscrete,
UniformDiscrete,
UserDefinedDiscrete,
estimate_discrete_state_transition,
)
from replay_trajectory_classification.environments import Environment
from replay_trajectory_classification.initial_conditions import (
UniformInitialConditions,
UniformOneEnvironmentInitialConditions,
)
from replay_trajectory_classification.likelihoods import (
_SORTED_SPIKES_ALGORITHMS,
_ClUSTERLESS_ALGORITHMS,
)
from replay_trajectory_classification.observation_model import ObservationModel
logger = getLogger(__name__)
sklearn.set_config(print_changed_only=False)
_DEFAULT_CLUSTERLESS_MODEL_KWARGS = {
"mark_std": 24.0,
"position_std": 6.0,
}
_DEFAULT_SORTED_SPIKES_MODEL_KWARGS = {
"position_std": 6.0,
"use_diffusion": False,
"block_size": None,
}
_DEFAULT_CONTINUOUS_TRANSITIONS = [[RandomWalk(), Uniform()], [Uniform(), Uniform()]]
_DEFAULT_ENVIRONMENT = Environment(environment_name="")
class _ClassifierBase(BaseEstimator):
"""Base class for classifier objects."""
def __init__(
self,
environments: list[Environment] = _DEFAULT_ENVIRONMENT,
observation_models: Optional[ObservationModel] = None,
continuous_transition_types: list[
list[
Union[
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
]
]
] = _DEFAULT_CONTINUOUS_TRANSITIONS,
discrete_transition_type: Union[
DiagonalDiscrete,
RandomDiscrete,
UniformDiscrete,
UserDefinedDiscrete,
] = DiagonalDiscrete(0.968),
initial_conditions_type: Union[
UniformInitialConditions, UniformOneEnvironmentInitialConditions
] = UniformInitialConditions(),
infer_track_interior: bool = True,
):
if isinstance(environments, Environment):
environments = (environments,)
if observation_models is None:
n_states = len(continuous_transition_types)
env_name = environments[0].environment_name
observation_models = (ObservationModel(env_name),) * n_states
self.environments = environments
self.observation_models = observation_models
self.continuous_transition_types = continuous_transition_types
self.discrete_transition_type = discrete_transition_type
self.initial_conditions_type = initial_conditions_type
self.infer_track_interior = infer_track_interior
def fit_environments(
self, position: np.ndarray, environment_labels: Optional[np.ndarray] = None
) -> None:
"""Fits the Environment class on the position data to get information about the spatial environment.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
environment_labels : np.ndarray, optional, shape (n_time,)
Labels for each time points about which environment it corresponds to, by default None
"""
for environment in self.environments:
if environment_labels is None:
is_environment = np.ones((position.shape[0],), dtype=bool)
else:
is_environment = environment_labels == environment.environment_name
environment.fit_place_grid(
position[is_environment], infer_track_interior=self.infer_track_interior
)
self.max_pos_bins_ = np.max(
[env.place_bin_centers_.shape[0] for env in self.environments]
)
def fit_initial_conditions(self):
"""Constructs the initial probability for the state and each spatial bin."""
logger.info("Fitting initial conditions...")
environment_names_to_state = [
obs.environment_name for obs in self.observation_models
]
n_states = len(self.observation_models)
initial_conditions = self.initial_conditions_type.make_initial_conditions(
self.environments, environment_names_to_state
)
self.initial_conditions_ = np.zeros(
(n_states, self.max_pos_bins_, 1), dtype=np.float64
)
for state_ind, ic in enumerate(initial_conditions):
self.initial_conditions_[state_ind, : ic.shape[0]] = ic[..., np.newaxis]
def fit_continuous_state_transition(
self,
continuous_transition_types: list[
list[
Union[
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
]
]
] = _DEFAULT_CONTINUOUS_TRANSITIONS,
position: Optional[np.ndarray] = None,
is_training: Optional[np.ndarray] = None,
encoding_group_labels: Optional[np.ndarray] = None,
environment_labels: Optional[np.ndarray] = None,
) -> None:
"""Constructs the transition matrices for the continuous states.
Parameters
----------
continuous_transition_types : list of list of transition matrix instances, optional
Types of transition models, by default _DEFAULT_CONTINUOUS_TRANSITIONS
position : np.ndarray, optional
Position of the animal in the environment, by default None
is_training : np.ndarray, optional
Boolean array that determines what data to train the place fields on, by default None
encoding_group_labels : np.ndarray, shape (n_time,), optional
If place fields should correspond to each state, label each time point with the group name
For example, Some points could correspond to inbound trajectories and some outbound, by default None
environment_labels : np.ndarray, shape (n_time,), optional
If there are multiple environments, label each time point with the environment name, by default None
"""
logger.info("Fitting continuous state transition...")
if is_training is None:
n_time = position.shape[0]
is_training = np.ones((n_time,), dtype=bool)
if encoding_group_labels is None:
n_time = position.shape[0]
encoding_group_labels = np.zeros((n_time,), dtype=np.int32)
is_training = np.asarray(is_training).squeeze()
self.continuous_transition_types = continuous_transition_types
continuous_state_transition = []
for row in self.continuous_transition_types:
continuous_state_transition.append([])
for transition in row:
if isinstance(transition, EmpiricalMovement):
continuous_state_transition[-1].append(
transition.make_state_transition(
self.environments,
position,
is_training,
encoding_group_labels,
environment_labels,
)
)
else:
continuous_state_transition[-1].append(
transition.make_state_transition(self.environments)
)
n_states = len(self.continuous_transition_types)
self.continuous_state_transition_ = np.zeros(
(n_states, n_states, self.max_pos_bins_, self.max_pos_bins_)
)
for row_ind, row in enumerate(continuous_state_transition):
for column_ind, st in enumerate(row):
self.continuous_state_transition_[
row_ind, column_ind, : st.shape[0], : st.shape[1]
] = st
def fit_discrete_state_transition(self):
"""Constructs the transition matrix for the discrete states."""
logger.info("Fitting discrete state transition")
n_states = len(self.continuous_transition_types)
self.discrete_state_transition_ = (
self.discrete_transition_type.make_state_transition(n_states)
)
def plot_discrete_state_transition(
self,
state_names: Optional[list[str]] = None,
cmap: str = "Oranges",
ax: Optional[matplotlib.axes.Axes] = None,
convert_to_seconds: bool = False,
sampling_frequency: int = 1,
) -> None:
"""Plot heatmap of discrete transition matrix.
Parameters
----------
state_names : list[str], optional
Names corresponding to each discrete state, by default None
cmap : str, optional
matplotlib colormap, by default "Oranges"
ax : matplotlib.axes.Axes, optional
Plotting axis, by default plots to current axis
convert_to_seconds : bool, optional
Convert the probabilities of state to expected duration of state, by default False
sampling_frequency : int, optional
Number of samples per second, by default 1
"""
if ax is None:
ax = plt.gca()
if state_names is None:
state_names = [
f"state {ind + 1}"
for ind in range(self.discrete_state_transition_.shape[0])
]
if convert_to_seconds:
discrete_state_transition = (
1 / (1 - self.discrete_state_transition_)
) / sampling_frequency
vmin, vmax, fmt = 0.0, None, "0.03f"
label = "Seconds"
else:
discrete_state_transition = self.discrete_state_transition_
vmin, vmax, fmt = 0.0, 1.0, "0.03f"
label = "Probability"
sns.heatmap(
data=discrete_state_transition,
vmin=vmin,
vmax=vmax,
annot=True,
fmt=fmt,
cmap=cmap,
xticklabels=state_names,
yticklabels=state_names,
ax=ax,
cbar_kws={"label": label},
)
ax.set_ylabel("Previous State", fontsize=12)
ax.set_xlabel("Current State", fontsize=12)
ax.set_title("Discrete State Transition", fontsize=16)
def estimate_parameters(
self,
fit_args: dict,
predict_args: dict,
tolerance: float = 1e-4,
max_iter: int = 10,
verbose: bool = True,
store_likelihood: bool = True,
estimate_initial_conditions: bool = True,
estimate_discrete_transition: bool = True,
) -> tuple[xr.Dataset, list[float]]:
"""Estimate the intial conditions and/or discrete transition matrix of the model.
Parameters
----------
fit_args : dict
Arguments that would be passed to the `fit` method.
predict_args : dict
Arguments that would be passed to the `predict` method.
tolerance : float, optional
Smallest change in data log likelihood for there to be no change in likelihood, by default 1e-4
max_iter : int, optional
Maximum number of iterations, by default 10
verbose : bool, optional
Log results of each iteration, by default True
store_likelihood : bool, optional
If True, don't reestimate the likelihood, by default True
estimate_initial_conditions : bool, optional
If True, estimate the initial conditions, by default True
estimate_discrete_transition : bool, optional
If True, estimate the discrete state transition, by default True
Returns
-------
results : xr.Dataset
data_log_likelihoods : list, len (n_iter,)
The data log likelihood of each iteration
"""
if "store_likelihood" in predict_args:
store_likelihood = predict_args["store_likelihood"]
else:
predict_args["store_likelihood"] = store_likelihood
self.fit(**fit_args)
results = self.predict(**predict_args)
data_log_likelihoods = [results.data_log_likelihood]
log_likelihood_change = np.inf
converged = False
increasing = True
n_iter = 0
n_time = len(results.time)
logger.info(f"iteration {n_iter}, likelihood: {data_log_likelihoods[-1]}")
get_results_args = {
key: value
for key, value in predict_args.items()
if key in ["time", "state_names", "use_gpu", "is_compute_acausal"]
}
while not converged and (n_iter < max_iter):
if estimate_initial_conditions:
self.initial_conditions_ = results.isel(
time=0
).acausal_posterior.values[..., np.newaxis]
if estimate_discrete_transition:
self.discrete_state_transition_ = estimate_discrete_state_transition(
self, results
)
if store_likelihood:
results = self._get_results(
self.likelihood_, n_time, **get_results_args
)
else:
results = self.predict(**predict_args)
data_log_likelihoods.append(results.data_log_likelihood)
log_likelihood_change = data_log_likelihoods[-1] - data_log_likelihoods[-2]
n_iter += 1
converged, increasing = check_converged(
data_log_likelihoods[-1], data_log_likelihoods[-2], tolerance
)
if verbose:
logger.info(
f"iteration {n_iter}, "
f"likelihood: {data_log_likelihoods[-1]}, "
f"change: {log_likelihood_change}"
)
if not converged and (n_iter == max_iter):
logger.warning("Max iterations reached...")
return results, data_log_likelihoods
@staticmethod
def convert_2D_to_1D_results(
results2D: xr.Dataset, environment2D: Environment, environment1D: Environment
) -> xr.Dataset:
"""Projects a 2D position decoding result to a 1D decoding result.
Parameters
----------
results : xarray.core.dataset.Dataset
environment2D : replay_trajectory_classification.environments.Environment
environment1D : replay_trajectory_classification.environments.Environment
Returns
-------
results1D : xarray.core.dataset.Dataset
Examples
--------
results = classifier.predict(spikes)
environment1D = (
Environment(track_graph=track_graph,
place_bin_size=2.0,
edge_order=edge_order,
edge_spacing=edge_spacing)
.fit_place_grid())
results1D = convert_2D_to_1D_results(
results, classifier.environments[0], environment1D)
"""
projected_1D_position = np.asarray(
environment1D.place_bin_centers_nodes_df_[["x_position", "y_position"]]
)
bin_centers_2D = environment2D.place_bin_centers_
closest_1D_bin_ind = np.asarray(
[
np.argmin(np.linalg.norm(projected_1D_position - bin_center, axis=1))
for bin_center in bin_centers_2D
]
)
non_position_dims = [
n_elements
for dim, n_elements in results2D.dims.items()
if dim not in ["x_position", "y_position"]
]
results1D_shape = (
*non_position_dims,
environment1D.place_bin_centers_.shape[0],
)
results1D = {
variable: np.zeros(results1D_shape) for variable in results2D.data_vars
}
is_track_interior = environment2D.is_track_interior_.ravel(order="F")
interior_bin_ind = np.unravel_index(
np.nonzero(is_track_interior)[0],
(len(results2D.x_position), len(results2D.y_position)),
order="F",
)
for linear_bin_ind, x_ind, y_ind in zip(
closest_1D_bin_ind[is_track_interior], *interior_bin_ind
):
for variable in results2D.data_vars:
results1D[variable][:, linear_bin_ind] += results2D.isel(
x_position=x_ind, y_position=y_ind
)[variable].values
dims = [
dim for dim in results2D.dims if dim not in ["x_position", "y_position"]
]
coords = {dim: results2D.coords[dim].values for dim in dims}
dims.append("position")
coords["position"] = environment1D.place_bin_centers_.squeeze()
return xr.Dataset(
{key: (dims, value) for key, value in results1D.items()},
coords=coords,
attrs=results2D.attrs,
)
def project_1D_position_to_2D(
self, results: xr.Dataset, posterior_type="acausal_posterior"
) -> np.ndarray:
"""Project the 1D most probable position into the 2D track graph space.
Only works for single environment.
Parameters
----------
results : xr.Dataset
posterior_type : causal_posterior | acausal_posterior | likelihood
Returns
-------
map_position2D : np.ndarray
"""
if len(self.environments) > 1:
print("Canont project back with multiple environments.")
return
map_position_ind = (
results[posterior_type].sum("state").argmax("position").to_numpy().squeeze()
)
return (
self.environments[0]
.place_bin_centers_nodes_df_.iloc[map_position_ind][
["x_position", "y_position"]
]
.to_numpy()
)
def _get_results(
self,
likelihood: np.ndarray,
n_time: int,
time: Optional[np.ndarray] = None,
is_compute_acausal: bool = True,
use_gpu: bool = False,
state_names: Optional[list[str]] = None,
) -> xr.Dataset:
"""Computes the causal and acausal posterior after the likelihood has been computed.
Parameters
----------
likelihood : np.ndarray
n_time : int
time : np.ndarray, optional
is_compute_acausal : bool, optional
use_gpu : bool, optional
state_names : list[str], optional
Returns
-------
results : xr.Dataset
"""
n_states = self.discrete_state_transition_.shape[0]
results = {}
dtype = np.float32 if use_gpu else np.float64
results["likelihood"] = np.full(
(n_time, n_states, self.max_pos_bins_, 1), np.nan, dtype=dtype
)
compute_causal = _causal_classify_gpu if use_gpu else _causal_classify
compute_acausal = _acausal_classify_gpu if use_gpu else _acausal_classify
for state_ind, obs in enumerate(self.observation_models):
likelihood_name = (obs.environment_name, obs.encoding_group)
n_bins = likelihood[likelihood_name].shape[1]
results["likelihood"][:, state_ind, :n_bins] = likelihood[likelihood_name][
..., np.newaxis
]
results["likelihood"] = scaled_likelihood(results["likelihood"], axis=(1, 2))
results["likelihood"][np.isnan(results["likelihood"])] = 0.0
n_environments = len(self.environments)
if time is None:
time = np.arange(n_time)
if n_environments == 1:
logger.info("Estimating causal posterior...")
is_track_interior = self.environments[0].is_track_interior_.ravel(order="F")
n_position_bins = len(is_track_interior)
is_states = np.ones((n_states,), dtype=bool)
st_interior_ind = np.ix_(
is_states, is_states, is_track_interior, is_track_interior
)
results["causal_posterior"] = np.full(
(n_time, n_states, n_position_bins, 1), np.nan, dtype=dtype
)
(
results["causal_posterior"][:, :, is_track_interior],
data_log_likelihood,
) = compute_causal(
self.initial_conditions_[:, is_track_interior].astype(dtype),
self.continuous_state_transition_[st_interior_ind].astype(dtype),
self.discrete_state_transition_.astype(dtype),
results["likelihood"][:, :, is_track_interior].astype(dtype),
)
if is_compute_acausal:
logger.info("Estimating acausal posterior...")
results["acausal_posterior"] = np.full(
(n_time, n_states, n_position_bins, 1), np.nan, dtype=dtype
)
results["acausal_posterior"][:, :, is_track_interior] = compute_acausal(
results["causal_posterior"][:, :, is_track_interior].astype(dtype),
self.continuous_state_transition_[st_interior_ind].astype(dtype),
self.discrete_state_transition_.astype(dtype),
)
return self._convert_results_to_xarray(
results, time, state_names, data_log_likelihood
)
else:
logger.info("Estimating causal posterior...")
(results["causal_posterior"], data_log_likelihood) = compute_causal(
self.initial_conditions_.astype(dtype),
self.continuous_state_transition_.astype(dtype),
self.discrete_state_transition_.astype(dtype),
results["likelihood"].astype(dtype),
)
if is_compute_acausal:
logger.info("Estimating acausal posterior...")
results["acausal_posterior"] = compute_acausal(
results["causal_posterior"].astype(dtype),
self.continuous_state_transition_.astype(dtype),
self.discrete_state_transition_.astype(dtype),
)
return self._convert_results_to_xarray_mutienvironment(
results, time, state_names, data_log_likelihood
)
def _convert_results_to_xarray(
self,
results: dict,
time: np.ndarray,
state_names: list,
data_log_likelihood: float,
) -> xr.Dataset:
"""Converts the results dict into a collection of labeled arrays.
Parameters
----------
results : dict
time : np.ndarray
state_names : list
data_log_likelihood : float
Returns
-------
results : xr.Dataset
"""
attrs = {"data_log_likelihood": data_log_likelihood}
n_position_dims = self.environments[0].place_bin_centers_.shape[1]
diag_transition_names = np.diag(np.asarray(self.continuous_transition_types))
if state_names is None:
if len(np.unique(self.observation_models)) == 1:
state_names = diag_transition_names
else:
state_names = [
f"{obs.encoding_group}-{transition}"
for obs, transition in zip(
self.observation_models, diag_transition_names
)
]
n_time = time.shape[0]
n_states = len(state_names)
is_track_interior = self.environments[0].is_track_interior_.ravel(order="F")
edges = self.environments[0].edges_
if n_position_dims > 1:
centers_shape = self.environments[0].centers_shape_
new_shape = (n_time, n_states, *centers_shape)
dims = ["time", "state", "x_position", "y_position"]
coords = dict(
time=time,
x_position=get_centers(edges[0]),
y_position=get_centers(edges[1]),
state=state_names,
)
results = xr.Dataset(
{
key: (
dims,
(
mask(value, is_track_interior)
.squeeze(axis=-1)
.reshape(new_shape, order="F")
),
)
for key, value in results.items()
},
coords=coords,
attrs=attrs,
)
else:
dims = ["time", "state", "position"]
coords = dict(
time=time,
position=get_centers(edges[0]),
state=state_names,
)
results = xr.Dataset(
{
key: (dims, (mask(value, is_track_interior).squeeze(axis=-1)))
for key, value in results.items()
},
coords=coords,
attrs=attrs,
)
return results
def _convert_results_to_xarray_mutienvironment(
self,
results: dict,
time: np.ndarray,
state_names: list,
data_log_likelihood: float,
) -> xr.Dataset:
"""Converts the results dict into a collection of labeled arrays when there are multiple environments.
Parameters
----------
results : dict
time : np.ndarray
state_names : list
data_log_likelihood : float
Returns
-------
results : xr.Dataset
"""
if state_names is None:
state_names = [
f"{obs.environment_name}-{obs.encoding_group}"
for obs in self.observation_models
]
attrs = {"data_log_likelihood": data_log_likelihood}
n_position_dims = self.environments[0].place_bin_centers_.shape[1]
if n_position_dims > 1:
dims = ["time", "state", "position"]
coords = dict(
time=time,
state=state_names,
)
for env in self.environments:
coords[env.environment_name + "_x_position"] = get_centers(
env.edges_[0]
)
coords[env.environment_name + "_y_position"] = get_centers(
env.edges_[1]
)
results = xr.Dataset(
{key: (dims, value.squeeze(axis=-1)) for key, value in results.items()},
coords=coords,
attrs=attrs,
)
else:
dims = ["time", "state", "position"]
coords = dict(
time=time,
state=state_names,
)
for env in self.environments:
coords[env.environment_name + "_position"] = get_centers(env.edges_[0])
results = xr.Dataset(
{key: (dims, value.squeeze(axis=-1)) for key, value in results.items()},
coords=coords,
attrs=attrs,
)
return results
def fit(self):
"""To be implemented by inheriting class"""
raise NotImplementedError
def predict(self):
"""To be implemented by inheriting class"""
raise NotImplementedError
def save_model(self, filename: str = "model.pkl") -> None:
"""Save the classifier to a pickled file.
Parameters
----------
filename : str, optional
"""
joblib.dump(self, filename)
@staticmethod
def load_model(filename: str = "model.pkl"):
"""Load the classifier from a file.
Parameters
----------
filename : str, optional
Returns
-------
classifier instance
"""
return joblib.load(filename)
@staticmethod
def predict_proba(results: xr.Dataset) -> xr.Dataset:
"""Predicts the probability of each state.
Parameters
----------
results : xr.Dataset
Returns
-------
results : xr.Dataset
"""
try:
return results.sum(["x_position", "y_position"])
except ValueError:
return results.sum(["position"])
def copy(self):
"""Makes a copy of the classifier"""
return deepcopy(self)
class SortedSpikesClassifier(_ClassifierBase):
"""Classifies neural population representation of position and trajectory from clustered cells.
Parameters
----------
environments : list of Environment instances, optional
The spatial environment(s) to fit
observation_models : ObservationModel instance, optional
Links environments and encoding group
continuous_transition_types : list of list of transition matrix instances, optional
Types of transition models, by default _DEFAULT_CONTINUOUS_TRANSITIONS
Length correspond to number of discrete states.
discrete_transition_type : discrete transition instance, optional
initial_conditions_type : initial conditions instance, optional
The initial conditions class instance
infer_track_interior : bool, optional
Whether to infer the spatial geometry of track from position
sorted_spikes_algorithm : str, optional
The type of algorithm. See _SORTED_SPIKES_ALGORITHMS for keys
sorted_spikes_algorithm_params : dict, optional
Parameters for the algorithm.
"""
def __init__(
self,
environments: list[Environment] = _DEFAULT_ENVIRONMENT,
observation_models: Optional[ObservationModel] = None,
continuous_transition_types: list[
list[
Union[
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
]
]
] = _DEFAULT_CONTINUOUS_TRANSITIONS,
discrete_transition_type: Union[
DiagonalDiscrete,
RandomDiscrete,
UniformDiscrete,
UserDefinedDiscrete,
] = DiagonalDiscrete(0.98),
initial_conditions_type: Union[
UniformInitialConditions, UniformOneEnvironmentInitialConditions
] = UniformInitialConditions(),
infer_track_interior: bool = True,
sorted_spikes_algorithm: str = "spiking_likelihood_kde",
sorted_spikes_algorithm_params: dict = _DEFAULT_SORTED_SPIKES_MODEL_KWARGS,
):
super().__init__(
environments,
observation_models,
continuous_transition_types,
discrete_transition_type,
initial_conditions_type,
infer_track_interior,
)
self.sorted_spikes_algorithm = sorted_spikes_algorithm
self.sorted_spikes_algorithm_params = sorted_spikes_algorithm_params
def fit_place_fields(
self,
position: np.ndarray,
spikes: np.ndarray,
is_training: Optional[np.ndarray] = None,
encoding_group_labels: Optional[np.ndarray] = None,
environment_labels: Optional[np.ndarray] = None,
) -> None:
"""Fits the place intensity function for each encoding group and environment.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
Position of the animal.
spikes : np.ndarray, (n_time, n_neurons)
Binary indicator of whether there was a spike in a given time bin for a given neuron.
is_training : np.ndarray, shape (n_time,), optional
Boolean array to indicate which data should be included in fitting of place fields, by default None
encoding_group_labels : np.ndarray, shape (n_time,), optional
Label for the corresponding encoding group for each time point
environment_labels : np.ndarray, shape (n_time,), optional
Label for the corresponding environment for each time point
"""
logger.info("Fitting place fields...")
n_time = position.shape[0]
if is_training is None:
is_training = np.ones((n_time,), dtype=bool)
if encoding_group_labels is None:
encoding_group_labels = np.zeros((n_time,), dtype=np.int32)
if environment_labels is None:
environment_labels = np.asarray(
[self.environments[0].environment_name] * n_time
)
is_training = np.asarray(is_training).squeeze()
kwargs = self.sorted_spikes_algorithm_params
if kwargs is None:
kwargs = {}
self.place_fields_ = {}
for obs in np.unique(self.observation_models):
environment = self.environments[
self.environments.index(obs.environment_name)
]
is_encoding = np.isin(encoding_group_labels, obs.encoding_group)
is_environment = environment_labels == obs.environment_name
likelihood_name = (obs.environment_name, obs.encoding_group)
self.place_fields_[likelihood_name] = _SORTED_SPIKES_ALGORITHMS[
self.sorted_spikes_algorithm
][0](
position=position[is_training & is_encoding & is_environment],
spikes=spikes[is_training & is_encoding & is_environment],
place_bin_centers=environment.place_bin_centers_,
place_bin_edges=environment.place_bin_edges_,
edges=environment.edges_,
is_track_interior=environment.is_track_interior_,
is_track_boundary=environment.is_track_boundary_,
**kwargs,
)
def plot_place_fields(
self, sampling_frequency: int = 1, figsize: tuple[float, float] = (10.0, 7.0)
):
"""Plots place fields for each neuron.
Parameters
----------
sampling_frequency : int, optional
samples per second, by default 1
figsize : tuple, optional
figure dimensions, by default (10, 7)
"""
try:
for env, enc in self.place_fields_:
is_track_interior = self.environments[
self.environments.index(env)
].is_track_interior_[np.newaxis]
(
(self.place_fields_[(env, enc)] * sampling_frequency)
.unstack("position")
.where(is_track_interior)
.plot(
x="x_position",
y="y_position",
col="neuron",
col_wrap=8,
vmin=0.0,
vmax=3.0,
)
)
except ValueError:
n_enc_env = len(self.place_fields_)
fig, axes = plt.subplots(
n_enc_env, 1, constrained_layout=True, figsize=figsize
)
if n_enc_env == 1:
axes = np.asarray([axes])
for ax, ((env_name, enc_group), place_fields) in zip(
axes.flat, self.place_fields_.items()
):
is_track_interior = self.environments[
self.environments.index(env_name)
].is_track_interior_[:, np.newaxis]
(
(place_fields * sampling_frequency)
.where(is_track_interior)
.plot(x="position", hue="neuron", add_legend=False, ax=ax)
)
ax.set_title(f"Environment = {env_name}, Encoding Group = {enc_group}")
ax.set_ylabel("Firing Rate\n[spikes/s]")
def fit(
self,
position: np.ndarray,
spikes: np.ndarray,
is_training: Optional[np.ndarray] = None,
encoding_group_labels: Optional[np.ndarray] = None,
environment_labels: Optional[np.ndarray] = None,
):
"""Fit the spatial grid, initial conditions, place field model, and
transition matrices.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
Position of the animal.
spikes : np.ndarray, shape (n_time, n_neurons)
Binary indicator of whether there was a spike in a given time bin for a given neuron.
is_training : None or np.ndarray, shape (n_time), optional
Boolean array to indicate which data should be included in fitting of place fields, by default None
encoding_group_labels : None or np.ndarray, shape (n_time,)
Label for the corresponding encoding group for each time point
environment_labels : None or np.ndarray, shape (n_time,)
Label for the corresponding environment for each time point
Returns
-------
self
"""
position = atleast_2d(np.asarray(position))
spikes = np.asarray(spikes)
self.fit_environments(position, environment_labels)
self.fit_initial_conditions()
self.fit_continuous_state_transition(
self.continuous_transition_types,
position,
is_training,
encoding_group_labels,
environment_labels,
)
self.fit_discrete_state_transition()
self.fit_place_fields(
position, spikes, is_training, encoding_group_labels, environment_labels
)
return self
def predict(
self,
spikes: np.ndarray,
time: Optional[np.ndarray] = None,
is_compute_acausal: bool = True,
use_gpu: bool = False,
state_names: Optional[list[str]] = None,
store_likelihood: bool = False,
) -> xr.Dataset:
"""Predict the probability of spatial position and category from the spikes.
Parameters
----------
spikes : np.ndarray, shape (n_time, n_neurons)
Binary indicator of whether there was a spike in a given time bin for a given neuron.
time : np.ndarray or None, shape (n_time,), optional
Label the time axis with these values.
is_compute_acausal : bool, optional
If True, compute the acausal posterior.
use_gpu : bool, optional
Use GPU for the state space part of the model, not the likelihood.
state_names : None or array_like, shape (n_states,)
Label the discrete states.
store_likelihood : bool, optional
Store the likelihood to reuse in next computation.
Returns
-------
results : xarray.Dataset
"""
spikes = np.asarray(spikes)
n_time = spikes.shape[0]
# likelihood
logger.info("Estimating likelihood...")
likelihood = {}
for (env_name, enc_group), place_fields in self.place_fields_.items():
env_ind = self.environments.index(env_name)
is_track_interior = self.environments[env_ind].is_track_interior_.ravel(
order="F"
)
likelihood[(env_name, enc_group)] = _SORTED_SPIKES_ALGORITHMS[
self.sorted_spikes_algorithm
][1](spikes, place_fields.values, is_track_interior)
if store_likelihood:
self.likelihood_ = likelihood
return self._get_results(
likelihood, n_time, time, is_compute_acausal, use_gpu, state_names
)
class ClusterlessClassifier(_ClassifierBase):
"""Classifies neural population representation of position and trajectory from multiunit spikes and waveforms.
Parameters
----------
environments : list of Environment instances, optional
The spatial environment(s) to fit
observation_models : ObservationModel instance, optional
Links environments and encoding group
continuous_transition_types : list of list of transition matrix instances, optional
Types of transition models, by default _DEFAULT_CONTINUOUS_TRANSITIONS
Length correspond to number of discrete states.
discrete_transition_type : discrete transition instance, optional
initial_conditions_type : initial conditions instance, optional
The initial conditions class instance
infer_track_interior : bool, optional
Whether to infer the spatial geometry of track from position
clusterless_algorithm : str
The type of clusterless algorithm. See _ClUSTERLESS_ALGORITHMS for keys
clusterless_algorithm_params : dict
Parameters for the clusterless algorithms.
"""
def __init__(
self,
environments: list[Environment] = _DEFAULT_ENVIRONMENT,
observation_models=None,
continuous_transition_types: list[
list[
Union[
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
]
]
] = _DEFAULT_CONTINUOUS_TRANSITIONS,
discrete_transition_type: Union[
DiagonalDiscrete,
RandomDiscrete,
UniformDiscrete,
UserDefinedDiscrete,
] = DiagonalDiscrete(0.98),
initial_conditions_type: Union[
UniformInitialConditions, UniformOneEnvironmentInitialConditions
] = UniformInitialConditions(),
infer_track_interior: bool = True,
clusterless_algorithm: str = "multiunit_likelihood",
clusterless_algorithm_params: dict = _DEFAULT_CLUSTERLESS_MODEL_KWARGS,
):
super().__init__(
environments,
observation_models,
continuous_transition_types,
discrete_transition_type,
initial_conditions_type,
infer_track_interior,
)
self.clusterless_algorithm = clusterless_algorithm
self.clusterless_algorithm_params = clusterless_algorithm_params
def fit_multiunits(
self,
position: np.ndarray,
multiunits: np.ndarray,
is_training: Optional[np.ndarray] = None,
encoding_group_labels: Optional[np.ndarray] = None,
environment_labels: Optional[np.ndarray] = None,
):
"""Fit the clusterless place field model.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
Position of the animal.
multiunits : array_like, shape (n_time, n_marks, n_electrodes)
Array where spikes are indicated by non-Nan values that correspond to the waveform features
for each electrode.
is_training : None or np.ndarray, shape (n_time), optional
Boolean array to indicate which data should be included in fitting of place fields, by default None
encoding_group_labels : None or np.ndarray, shape (n_time,)
Label for the corresponding encoding group for each time point
environment_labels : None or np.ndarray, shape (n_time,)
Label for the corresponding environment for each time point
"""
logger.info("Fitting multiunits...")
n_time = position.shape[0]
if is_training is None:
is_training = np.ones((n_time,), dtype=bool)
if encoding_group_labels is None:
encoding_group_labels = np.zeros((n_time,), dtype=np.int32)
if environment_labels is None:
environment_labels = np.asarray(
[self.environments[0].environment_name] * n_time
)
is_training = np.asarray(is_training).squeeze()
kwargs = self.clusterless_algorithm_params
if kwargs is None:
kwargs = {}
self.encoding_model_ = {}
for obs in np.unique(self.observation_models):
environment = self.environments[
self.environments.index(obs.environment_name)
]
is_encoding = np.isin(encoding_group_labels, obs.encoding_group)
is_environment = environment_labels == obs.environment_name
is_group = is_training & is_encoding & is_environment
likelihood_name = (obs.environment_name, obs.encoding_group)
self.encoding_model_[likelihood_name] = _ClUSTERLESS_ALGORITHMS[
self.clusterless_algorithm
][0](
position=position[is_group],
multiunits=multiunits[is_group],
place_bin_centers=environment.place_bin_centers_,
is_track_interior=environment.is_track_interior_,
is_track_boundary=environment.is_track_boundary_,
edges=environment.edges_,
**kwargs,
)
def fit(
self,
position: np.ndarray,
multiunits: np.ndarray,
is_training: Optional[np.ndarray] = None,
encoding_group_labels: Optional[np.ndarray] = None,
environment_labels: Optional[np.ndarray] = None,
):
"""Fit the spatial grid, initial conditions, place field model, and
transition matrices.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
Position of the animal.
multiunits : array_like, shape (n_time, n_marks, n_electrodes)
Array where spikes are indicated by non-Nan values that correspond to the waveform features
for each electrode.
is_training : None or np.ndarray, shape (n_time), optional
Boolean array to indicate which data should be included in fitting of place fields, by default None
encoding_group_labels : None or np.ndarray, shape (n_time,)
Label for the corresponding encoding group for each time point
environment_labels : None or np.ndarray, shape (n_time,)
Label for the corresponding environment for each time point
Returns
-------
self
"""
position = atleast_2d(np.asarray(position))
multiunits = np.asarray(multiunits)
self.fit_environments(position, environment_labels)
self.fit_initial_conditions()
self.fit_continuous_state_transition(
self.continuous_transition_types,
position,
is_training,
encoding_group_labels,
environment_labels,
)
self.fit_discrete_state_transition()
self.fit_multiunits(
position, multiunits, is_training, encoding_group_labels, environment_labels
)
return self
def predict(
self,
multiunits: np.ndarray,
time: Optional[np.ndarray] = None,
is_compute_acausal: bool = True,
use_gpu: bool = False,
state_names: Optional[list[str]] = None,
store_likelihood: bool = False,
) -> xr.Dataset:
"""Predict the probability of spatial position and category from the multiunit spikes and waveforms.
Parameters
----------
multiunits : array_like, shape (n_time, n_marks, n_electrodes)
Array where spikes are indicated by non-Nan values that correspond to the waveform features
for each electrode.
time : np.ndarray or None, shape (n_time,), optional
Label the time axis with these values.
is_compute_acausal : bool, optional
If True, compute the acausal posterior.
use_gpu : bool, optional
Use GPU for the state space part of the model, not the likelihood.
state_names : None or array_like, shape (n_states,)
Label the discrete states.
store_likelihood : bool, optional
Store the likelihood to reuse in next computation.
Returns
-------
results : xarray.Dataset
"""
multiunits = np.asarray(multiunits)
n_time = multiunits.shape[0]
logger.info("Estimating likelihood...")
likelihood = {}
for (env_name, enc_group), encoding_params in self.encoding_model_.items():
env_ind = self.environments.index(env_name)
is_track_interior = self.environments[env_ind].is_track_interior_.ravel(
order="F"
)
place_bin_centers = self.environments[env_ind].place_bin_centers_
likelihood[(env_name, enc_group)] = _ClUSTERLESS_ALGORITHMS[
self.clusterless_algorithm
][1](
multiunits=multiunits,
place_bin_centers=place_bin_centers,
is_track_interior=is_track_interior,
**encoding_params,
)
if store_likelihood:
self.likelihood_ = likelihood
return self._get_results(
likelihood, n_time, time, is_compute_acausal, use_gpu, state_names
) | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/classifier.py | 0.949 | 0.308998 | classifier.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
import numpy as np
from scipy.stats import multivariate_normal
from replay_trajectory_classification.core import atleast_2d
from replay_trajectory_classification.environments import Environment, diffuse_each_bin
def _normalize_row_probability(x: np.ndarray) -> np.ndarray:
"""Ensure the state transition matrix rows sum to 1.
Parameters
----------
x : np.ndarray, shape (n_rows, n_cols)
Returns
-------
normalized_x : np.ndarray, shape (n_rows, n_cols)
"""
x /= x.sum(axis=1, keepdims=True)
x[np.isnan(x)] = 0
return x
def estimate_movement_var(
position: np.ndarray, sampling_frequency: int = 1
) -> np.ndarray:
"""Estimates the movement variance based on position.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dim)
Position of the animal
sampling_frequency : int, optional
Number of samples per second.
Returns
-------
movement_var : np.ndarray, shape (n_position_dim,)
Variance of the movement.
"""
position = atleast_2d(position)
is_nan = np.any(np.isnan(position), axis=1)
position = position[~is_nan]
movement_var = np.cov(np.diff(position, axis=0), rowvar=False)
return movement_var * sampling_frequency
def _random_walk_on_track_graph(
place_bin_centers: np.ndarray,
movement_mean: float,
movement_var: float,
place_bin_center_ind_to_node: np.ndarray,
distance_between_nodes: dict[dict],
) -> np.ndarray:
"""Estimates the random walk probabilities based on the spatial
topology given by the track graph.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins,)
movement_mean : float
movement_var : float
place_bin_center_ind_to_node : np.ndarray
Mapping of place bin center to track graph node
distance_between_nodes : dict[dict]
Distance between each pair of track graph nodes with an edge.
Returns
-------
random_walk : np.ndarray, shape (n_position_bins, n_position_bins)
"""
state_transition = np.zeros((place_bin_centers.size, place_bin_centers.size))
gaussian = multivariate_normal(mean=movement_mean, cov=movement_var)
for bin_ind1, node1 in enumerate(place_bin_center_ind_to_node):
for bin_ind2, node2 in enumerate(place_bin_center_ind_to_node):
try:
state_transition[bin_ind1, bin_ind2] = gaussian.pdf(
distance_between_nodes[node1][node2]
)
except KeyError:
# bins not on track interior will be -1 and not in distance
# between nodes
continue
return state_transition
@dataclass
class RandomWalk:
"""A transition where the movement stays locally close in space
Attributes
----------
environment_name : str, optional
Name of environment to fit
movement_var : float, optional
How far the animal is can move in one time bin during normal
movement.
movement_mean : float, optional
use_diffusion : bool, optional
Use diffusion to respect the geometry of the environment
"""
environment_name: str = ""
movement_var: float = 6.0
movement_mean: float = 0.0
use_diffusion: bool = False
def make_state_transition(self, environments: tuple[Environment]) -> np.ndarray:
"""Creates a transition matrix for a given environment.
Parameters
----------
environments : tuple[Environment]
The existing environments in the model
Returns
-------
state_transition_matrix : np.ndarray, shape (n_position_bins, n_position_bins)
"""
self.environment = environments[environments.index(self.environment_name)]
if self.environment.track_graph is None:
n_position_dims = self.environment.place_bin_centers_.shape[1]
if (n_position_dims == 1) or not self.use_diffusion:
transition_matrix = np.stack(
[
multivariate_normal(
mean=center + self.movement_mean, cov=self.movement_var
).pdf(self.environment.place_bin_centers_)
for center in self.environment.place_bin_centers_
],
axis=1,
)
else:
dx = self.environment.edges_[0][1] - self.environment.edges_[0][0]
dy = self.environment.edges_[1][1] - self.environment.edges_[1][0]
n_total_bins = np.prod(self.environment.is_track_interior_.shape)
transition_matrix = diffuse_each_bin(
self.environment.is_track_interior_,
self.environment.is_track_boundary_,
dx,
dy,
std=np.sqrt(self.movement_var),
).reshape((n_total_bins, -1), order="F")
else:
place_bin_center_ind_to_node = np.asarray(
self.environment.place_bin_centers_nodes_df_.node_id
)
transition_matrix = _random_walk_on_track_graph(
self.environment.place_bin_centers_,
self.movement_mean,
self.movement_var,
place_bin_center_ind_to_node,
self.environment.distance_between_nodes_,
)
is_track_interior = self.environment.is_track_interior_.ravel(order="F")
transition_matrix[~is_track_interior] = 0.0
transition_matrix[:, ~is_track_interior] = 0.0
return _normalize_row_probability(transition_matrix)
@dataclass
class Uniform:
"""
Attributes
----------
environment_name : str, optional
Name of first environment to fit
environment_name2 : str, optional
Name of second environment to fit if going from one environment to
another
"""
environment_name: str = ""
environment2_name: str = None
def make_state_transition(self, environments: tuple[Environment]):
"""Creates a transition matrix for a given environment.
Parameters
----------
environments : tuple[Environment]
The existing environments in the model
Returns
-------
state_transition_matrix : np.ndarray, shape (n_position_bins, n_position_bins)
"""
self.environment1 = environments[environments.index(self.environment_name)]
n_bins1 = self.environment1.place_bin_centers_.shape[0]
is_track_interior1 = self.environment1.is_track_interior_.ravel(order="F")
if self.environment2_name is None:
n_bins2 = n_bins1
is_track_interior2 = is_track_interior1.copy()
else:
self.environment2 = environments[environments.index(self.environment2_name)]
n_bins2 = self.environment2.place_bin_centers_.shape[0]
is_track_interior2 = self.environment2.is_track_interior_.ravel(order="F")
transition_matrix = np.ones((n_bins1, n_bins2))
transition_matrix[~is_track_interior1] = 0.0
transition_matrix[:, ~is_track_interior2] = 0.0
return _normalize_row_probability(transition_matrix)
@dataclass
class Identity:
"""A transition where the movement stays within a place bin
Attributes
----------
environment_name : str, optional
Name of environment to fit
"""
environment_name: str = ""
def make_state_transition(self, environments: tuple[Environment]):
"""Creates a transition matrix for a given environment.
Parameters
----------
environments : tuple[Environment]
The existing environments in the model
Returns
-------
state_transition_matrix : np.ndarray, shape (n_position_bins, n_position_bins)
"""
self.environment = environments[environments.index(self.environment_name)]
n_bins = self.environment.place_bin_centers_.shape[0]
transition_matrix = np.identity(n_bins)
is_track_interior = self.environment.is_track_interior_.ravel(order="F")
transition_matrix[~is_track_interior] = 0.0
transition_matrix[:, ~is_track_interior] = 0.0
return _normalize_row_probability(transition_matrix)
@dataclass
class EmpiricalMovement:
"""A transition matrix trained on the animal's actual movement
Attributes
----------
environment_name : str, optional
Name of environment to fit
encoding_group : str, optional
Name of encoding group to fit
speedup : int, optional
Used to make the empirical transition matrix "faster", means allowing for
all the same transitions made by the animal but sped up by
`speedup` times. So `speedup=20` means 20x faster than the
animal's movement.
"""
environment_name: str = ""
encoding_group: str = None
speedup: int = 1
def make_state_transition(
self,
environments: tuple[Environment],
position: np.ndarray,
is_training: np.ndarray = None,
encoding_group_labels: np.ndarray = None,
environment_labels: np.ndarray = None,
):
"""Creates a transition matrix for a given environment.
Parameters
----------
environments : tuple[Environment]
The existing environments in the model
position : np.ndarray
Position of the animal
is_training : np.ndarray, optional
Boolean array that determines what data to train the place fields on, by default None
encoding_group_labels : np.ndarray, shape (n_time,), optional
If place fields should correspond to each state, label each time point with the group name
For example, Some points could correspond to inbound trajectories and some outbound, by default None
environment_labels : np.ndarray, shape (n_time,), optional
If there are multiple environments, label each time point with the environment name, by default None
Returns
-------
state_transition_matrix : np.ndarray, shape (n_position_bins, n_position_bins)
"""
self.environment = environments[environments.index(self.environment_name)]
n_time = position.shape[0]
if is_training is None:
is_training = np.ones((n_time,), dtype=bool)
if encoding_group_labels is None:
is_encoding = np.ones((n_time,), dtype=bool)
else:
is_encoding = encoding_group_labels == self.encoding_group
if environment_labels is None:
is_environment = np.ones((n_time,), dtype=bool)
else:
is_environment = environment_labels == self.environment_name
position = atleast_2d(position)[is_training & is_encoding & is_environment]
state_transition, _ = np.histogramdd(
np.concatenate((position[1:], position[:-1]), axis=1),
bins=self.environment.edges_ * 2,
range=self.environment.position_range,
)
original_shape = state_transition.shape
n_position_dims = position.shape[1]
shape_2d = np.product(original_shape[:n_position_dims])
state_transition = _normalize_row_probability(
state_transition.reshape((shape_2d, shape_2d), order="F")
)
return np.linalg.matrix_power(state_transition, self.speedup)
@dataclass
class RandomWalkDirection1:
"""A Gaussian random walk in that can only go one direction
Attributes
----------
environment_name : str, optional
Name of environment to fit
movement_var : float, optional
How far the animal is can move in one time bin during normal
movement.
"""
environment_name: str = ""
movement_var: float = 6.0
def make_state_transition(self, environments: tuple[Environment]):
"""Creates a transition matrix for a given environment.
Parameters
----------
environments : tuple[Environment]
The existing environments in the model
Returns
-------
state_transition_matrix : np.ndarray, shape (n_position_bins, n_position_bins)
"""
self.environment = environments[environments.index(self.environment_name)]
random = RandomWalk(
self.environment_name, self.movement_var
).make_state_transition(environments)
return _normalize_row_probability(np.triu(random))
@dataclass
class RandomWalkDirection2:
"""A Gaussian random walk in that can only go one direction
Attributes
----------
environment_name : str, optional
Name of environment to fit
movement_var : float, optional
How far the animal is can move in one time bin during normal
movement.
"""
environment_name: str = ""
movement_var: float = 6.0
def make_state_transition(self, environments: tuple[Environment]):
"""Creates a transition matrix for a given environment.
Parameters
----------
environments : tuple[Environment]
The existing environments in the model
Returns
-------
state_transition_matrix : np.ndarray, shape (n_position_bins, n_position_bins)
"""
self.environment = environments[environments.index(self.environment_name)]
random = RandomWalk(
self.environment_name, self.movement_var
).make_state_transition(environments)
return _normalize_row_probability(np.tril(random)) | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/continuous_state_transitions.py | 0.954287 | 0.698606 | continuous_state_transitions.py | pypi |
from __future__ import annotations
import numpy as np
from replay_trajectory_classification.simulate import (
get_trajectory_direction,
simulate_position,
simulate_neuron_with_place_field,
simulate_place_field_firing_rate,
simulate_time,
)
SAMPLING_FREQUENCY = 1000
TRACK_HEIGHT = 180
RUNNING_SPEED = 15
PLACE_FIELD_VARIANCE = 6.0**2
PLACE_FIELD_MEANS = np.arange(0, TRACK_HEIGHT + 10, 10)
N_RUNS = 15
REPLAY_SPEEDUP = 120.0
# Figure Parameters
MM_TO_INCHES = 1.0 / 25.4
ONE_COLUMN = 89.0 * MM_TO_INCHES
ONE_AND_HALF_COLUMN = 140.0 * MM_TO_INCHES
TWO_COLUMN = 178.0 * MM_TO_INCHES
PAGE_HEIGHT = 247.0 * MM_TO_INCHES
GOLDEN_RATIO = (np.sqrt(5) - 1.0) / 2.0
def make_simulated_run_data(
sampling_frequency: int = SAMPLING_FREQUENCY,
track_height: float = TRACK_HEIGHT,
running_speed: float = RUNNING_SPEED,
n_runs: int = N_RUNS,
place_field_variance: float = PLACE_FIELD_VARIANCE,
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
make_inbound_outbound_neurons: bool = False,
) -> tuple[np.ndarray, np.ndarray, float, np.ndarray, np.ndarray]:
"""Make simulated data of a rat running back and forth
on a linear maze with sorted spikes.
Parameters
----------
sampling_frequency : float, optional
Samples per second
track_height : float, optional
Height of the simulated track
running_speed : float, optional
Speed of the simulated animal
n_runs : int, optional
Number of runs across the track the simulated animal will perform
place_field_variance : float, optional
Spatial variance of the place field
place_field_means : np.ndarray, shape (n_neurons,), optional
Location of the center of the Gaussian place fields.
make_inbound_outbound_neurons : bool
Makes neurons direction selective.
Returns
-------
time : np.ndarray, shape (n_time,)
position : np.ndarray, shape (n_time,)
Position of the simualted animal
sampling_frequency : float
Samples per second
spikes : np.ndarray, shape (n_time, n_neurons)
Binned spike indicator. 1 means spike occured. 0 means no spike occured.
place_fields : np.ndarray, shape (n_time, n_neurons)
"""
n_samples = int(n_runs * sampling_frequency * 2 * track_height / running_speed)
time = simulate_time(n_samples, sampling_frequency)
position = simulate_position(time, track_height, running_speed)
if not make_inbound_outbound_neurons:
place_fields = np.stack(
[
simulate_place_field_firing_rate(
place_field_mean, position, variance=place_field_variance
)
for place_field_mean in place_field_means
],
axis=1,
)
spikes = np.stack(
[
simulate_neuron_with_place_field(
place_field_mean,
position,
max_rate=15,
variance=place_field_variance,
sampling_frequency=sampling_frequency,
)
for place_field_mean in place_field_means.T
],
axis=1,
)
else:
trajectory_direction = get_trajectory_direction(position)
place_fields = []
spikes = []
for direction in np.unique(trajectory_direction):
is_condition = trajectory_direction == direction
for place_field_mean in place_field_means:
place_fields.append(
simulate_place_field_firing_rate(
place_field_mean,
position,
variance=place_field_variance,
is_condition=is_condition,
)
)
spikes.append(
simulate_neuron_with_place_field(
place_field_mean,
position,
max_rate=15,
variance=place_field_variance,
sampling_frequency=sampling_frequency,
is_condition=is_condition,
)
)
place_fields = np.stack(place_fields, axis=1)
spikes = np.stack(spikes, axis=1)
return time, position, sampling_frequency, spikes, place_fields
def make_continuous_replay(
sampling_frequency: int = SAMPLING_FREQUENCY,
track_height: float = TRACK_HEIGHT,
running_speed: float = RUNNING_SPEED,
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
replay_speedup: int = REPLAY_SPEEDUP,
is_outbound: bool = True,
) -> tuple[np.ndarray, np.ndarray]:
"""Make a simulated continuous replay.
Parameters
----------
sampling_frequency : int, optional
Samples per second
track_height : float, optional
Height of the simulated track
running_speed : float, optional
Speed of the simulated animal
place_field_means : np.ndarray, optional
Location of the center of the Gaussian place fields.
replay_speedup : int, optional
_description_, by default REPLAY_SPEEDUP
is_outbound : bool, optional
_description_, by default True
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_spikes : np.ndarray, shape (n_time, n_neurons)
Binned spike indicator. 1 means spike occured. 0 means no spike occured.
"""
replay_speed = running_speed * replay_speedup
n_samples = int(np.ceil(2 * sampling_frequency * track_height / replay_speed))
replay_time = simulate_time(n_samples, sampling_frequency)
true_replay_position = simulate_position(replay_time, track_height, replay_speed)
# Make inbound or outbound
replay_time = replay_time[: n_samples // 2]
if is_outbound:
true_replay_position = true_replay_position[: n_samples // 2]
else:
true_replay_position = true_replay_position[n_samples // 2 :]
min_times_ind = np.argmin(
np.abs(true_replay_position[:, np.newaxis] - place_field_means), axis=0
)
n_neurons = place_field_means.shape[0]
test_spikes = np.zeros((replay_time.size, n_neurons))
test_spikes[
(
min_times_ind,
np.arange(n_neurons),
)
] = 1.0
return replay_time, test_spikes
def make_hover_replay(
hover_neuron_ind: int = None,
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
sampling_frequency: int = SAMPLING_FREQUENCY,
) -> tuple[np.ndarray, np.ndarray]:
"""Make a simulated stationary replay.
Parameters
----------
hover_neuron_ind : int, optional
_description_, by default None
place_field_means : np.ndarray, optional
_description_, by default PLACE_FIELD_MEANS
sampling_frequency : int, optional
Samples per second
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_spikes : np.ndarray, shape (n_time, n_neurons)
Binned spike indicator. 1 means spike occured. 0 means no spike occured.
"""
n_neurons = place_field_means.shape[0]
if hover_neuron_ind is None:
hover_neuron_ind = n_neurons // 2
N_TIME = 50
replay_time = np.arange(N_TIME) / sampling_frequency
spike_time_ind = np.arange(0, N_TIME, 2)
test_spikes = np.zeros((N_TIME, n_neurons))
neuron_ind = np.ones((N_TIME // 2,), dtype=int) * hover_neuron_ind
test_spikes[(spike_time_ind, neuron_ind)] = 1.0
return replay_time, test_spikes
def make_fragmented_replay(
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
sampling_frequency: int = SAMPLING_FREQUENCY,
) -> tuple[np.ndarray, np.ndarray]:
"""Make a simulated fragmented replay.
Parameters
----------
place_field_means : np.ndarray, optional
_description_, by default PLACE_FIELD_MEANS
sampling_frequency : int, optional
Samples per second
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_spikes : np.ndarray, shape (n_time, n_neurons)
Binned spike indicator. 1 means spike occured. 0 means no spike occured.
"""
N_TIME = 10
replay_time = np.arange(N_TIME) / sampling_frequency
ind = ([1, 3, 5, 7, 9], [1, -1, 10, -5, 8])
n_neurons = place_field_means.shape[0]
test_spikes = np.zeros((N_TIME, n_neurons))
test_spikes[ind] = 1.0
return replay_time, test_spikes
def make_hover_continuous_hover_replay(
sampling_frequency: int = SAMPLING_FREQUENCY,
) -> tuple[np.ndarray, np.ndarray]:
"""Make a replay that starts stationary, then is continuous, then is stationary again.
Parameters
----------
sampling_frequency : int, optional
Samples per second
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_spikes : np.ndarray, shape (n_time, n_neurons)
Binned spike indicator. 1 means spike occured. 0 means no spike occured.
"""
_, test_spikes1 = make_hover_replay(hover_neuron_ind=0)
_, test_spikes2 = make_continuous_replay()
_, test_spikes3 = make_hover_replay(hover_neuron_ind=-1)
test_spikes = np.concatenate((test_spikes1, test_spikes2, test_spikes3))
replay_time = np.arange(test_spikes.shape[0]) / sampling_frequency
return replay_time, test_spikes
def make_fragmented_hover_fragmented_replay(
sampling_frequency: int = SAMPLING_FREQUENCY,
) -> tuple[np.ndarray, np.ndarray]:
"""Make a replay that starts fragmented, then is stationary, and then is fragmented again.
Parameters
----------
sampling_frequency : int, optional
Samples per second
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_spikes : np.ndarray, shape (n_time, n_neurons)
Binned spike indicator. 1 means spike occured. 0 means no spike occured.
"""
_, test_spikes1 = make_fragmented_replay()
_, test_spikes2 = make_hover_replay(hover_neuron_ind=6)
_, test_spikes3 = make_fragmented_replay()
test_spikes = np.concatenate((test_spikes1, test_spikes2, test_spikes3))
replay_time = np.arange(test_spikes.shape[0]) / sampling_frequency
return replay_time, test_spikes
def make_fragmented_continuous_fragmented_replay(
sampling_frequency: int = SAMPLING_FREQUENCY,
) -> tuple[np.ndarray, np.ndarray]:
"""Make a replay that is fragmented, then is continuous, then is fragmented again.
Parameters
----------
sampling_frequency : int, optional
Samples per second
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_spikes : np.ndarray, shape (n_time, n_neurons)
Binned spike indicator. 1 means spike occured. 0 means no spike occured.
"""
_, test_spikes1 = make_fragmented_replay()
_, test_spikes2 = make_continuous_replay()
_, test_spikes3 = make_fragmented_replay()
test_spikes = np.concatenate((test_spikes1, test_spikes2, test_spikes3))
replay_time = np.arange(test_spikes.shape[0]) / sampling_frequency
return replay_time, test_spikes
def make_theta_sweep(
sampling_frequency: int = SAMPLING_FREQUENCY, n_sweeps: int = 5
) -> tuple[np.ndarray, np.ndarray]:
"""Simulate theta sweeping
Parameters
----------
sampling_frequency : int, optional
Samples per second
n_sweeps : int, optional
Number of sweeps to simulate, by default 5
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_spikes : np.ndarray, shape (n_time, n_neurons)
Binned spike indicator. 1 means spike occured. 0 means no spike occured.
"""
_, test_spikes1 = make_continuous_replay(is_outbound=False, replay_speedup=145)
_, test_spikes2 = make_continuous_replay(is_outbound=True, replay_speedup=145)
test_spikes = np.concatenate(
[test_spikes1[test_spikes1.shape[0] // 2 :], test_spikes2] * n_sweeps
)
replay_time = np.arange(test_spikes.shape[0]) / sampling_frequency
return replay_time, test_spikes | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/sorted_spikes_simulation.py | 0.943705 | 0.434701 | sorted_spikes_simulation.py | pypi |
from __future__ import annotations
import numpy as np
from numba import njit
from typing import Tuple
def atleast_2d(x: np.ndarray) -> np.ndarray:
"""Appends a dimension at the end if `x` is one dimensional
Parameters
----------
x : np.ndarray
Returns
-------
x : np.ndarray
"""
return np.atleast_2d(x).T if x.ndim < 2 else x
def get_centers(bin_edges: np.ndarray) -> np.ndarray:
"""Given a set of bin edges, return the center of the bin.
Parameters
----------
bin_edges : np.ndarray, shape (n_edges,)
Returns
-------
bin_centers : np.ndarray, shape (n_edges - 1,)
"""
return bin_edges[:-1] + np.diff(bin_edges) / 2
@njit(parallel=True, error_model="numpy")
def normalize_to_probability(distribution: np.ndarray) -> np.ndarray:
"""Ensure the distribution integrates to 1 so that it is a probability
distribution.
Parameters
----------
distribution : np.ndarray
Returns
-------
normalized_distribution : np.ndarray
"""
return distribution / np.nansum(distribution)
@njit(nogil=True, error_model="numpy", cache=False)
def _causal_decode(
initial_conditions: np.ndarray, state_transition: np.ndarray, likelihood: np.ndarray
) -> Tuple[np.ndarray, float]:
"""Adaptive filter to iteratively calculate the posterior probability
of a state variable using past information.
Parameters
----------
initial_conditions : np.ndarray, shape (n_bins,)
state_transition : np.ndarray, shape (n_bins, n_bins)
likelihood : np.ndarray, shape (n_time, n_bins)
Returns
-------
posterior : np.ndarray, shape (n_time, n_bins)
log_data_likelihood : float
"""
n_time = likelihood.shape[0]
posterior = np.zeros_like(likelihood)
posterior[0] = initial_conditions.copy() * likelihood[0]
norm = np.nansum(posterior[0])
log_data_likelihood = np.log(norm)
posterior[0] /= norm
for k in np.arange(1, n_time):
posterior[k] = state_transition.T @ posterior[k - 1] * likelihood[k]
norm = np.nansum(posterior[k])
log_data_likelihood += np.log(norm)
posterior[k] /= norm
return posterior, log_data_likelihood
@njit(nogil=True, error_model="numpy", cache=False)
def _acausal_decode(
causal_posterior: np.ndarray, state_transition: np.ndarray
) -> np.ndarray:
"""Uses past and future information to estimate the state.
Parameters
----------
causal_posterior : np.ndarray, shape (n_time, n_bins, 1)
state_transition : np.ndarray, shape (n_bins, n_bins)
Return
------
acausal_posterior : np.ndarray, shape (n_time, n_bins, 1)
"""
acausal_posterior = np.zeros_like(causal_posterior)
acausal_posterior[-1] = causal_posterior[-1].copy()
n_time, n_bins = causal_posterior.shape[0], causal_posterior.shape[-2]
weights = np.zeros((n_bins, 1))
eps = np.spacing(1)
for time_ind in np.arange(n_time - 2, -1, -1):
acausal_prior = state_transition.T @ causal_posterior[time_ind]
log_ratio = np.log(acausal_posterior[time_ind + 1, ..., 0] + eps) - np.log(
acausal_prior[..., 0] + eps
)
weights[..., 0] = np.exp(log_ratio) @ state_transition
acausal_posterior[time_ind] = normalize_to_probability(
weights * causal_posterior[time_ind]
)
return acausal_posterior
@njit(nogil=True, error_model="numpy", cache=False)
def _causal_classify(
initial_conditions: np.ndarray,
continuous_state_transition: np.ndarray,
discrete_state_transition: np.ndarray,
likelihood: np.ndarray,
) -> Tuple[np.ndarray, float]:
"""Adaptive filter to iteratively calculate the posterior probability
of a state variable using past information.
Parameters
----------
initial_conditions : np.ndarray, shape (n_states, n_bins, 1)
continuous_state_transition : np.ndarray, shape (n_states, n_states, n_bins, n_bins)
discrete_state_transition : np.ndarray, shape (n_states, n_states)
likelihood : np.ndarray, shape (n_time, n_states, n_bins, 1)
Returns
-------
causal_posterior : np.ndarray, shape (n_time, n_states, n_bins, 1)
log_data_likelihood : float
"""
n_time, n_states, n_bins, _ = likelihood.shape
posterior = np.zeros_like(likelihood)
posterior[0] = initial_conditions.copy() * likelihood[0]
norm = np.nansum(posterior[0])
log_data_likelihood = np.log(norm)
posterior[0] /= norm
for k in np.arange(1, n_time):
prior = np.zeros((n_states, n_bins, 1))
for state_k in np.arange(n_states):
for state_k_1 in np.arange(n_states):
prior[state_k, :] += (
discrete_state_transition[state_k_1, state_k]
* continuous_state_transition[state_k_1, state_k].T
@ posterior[k - 1, state_k_1]
)
posterior[k] = prior * likelihood[k]
norm = np.nansum(posterior[k])
log_data_likelihood += np.log(norm)
posterior[k] /= norm
return posterior, log_data_likelihood
@njit(nogil=True, error_model="numpy", cache=False)
def _acausal_classify(
causal_posterior: np.ndarray,
continuous_state_transition: np.ndarray,
discrete_state_transition: np.ndarray,
) -> np.ndarray:
"""Uses past and future information to estimate the state.
Parameters
----------
causal_posterior : np.ndarray, shape (n_time, n_states, n_bins, 1)
continuous_state_transition : np.ndarray, shape (n_states, n_states, n_bins, n_bins)
discrete_state_transition : np.ndarray, shape (n_states, n_states)
Return
------
acausal_posterior : np.ndarray, shape (n_time, n_states, n_bins, 1)
"""
acausal_posterior = np.zeros_like(causal_posterior)
acausal_posterior[-1] = causal_posterior[-1].copy()
n_time, n_states, n_bins, _ = causal_posterior.shape
eps = np.spacing(1)
for k in np.arange(n_time - 2, -1, -1):
# Prediction Step -- p(x_{k+1}, I_{k+1} | y_{1:k})
prior = np.zeros((n_states, n_bins, 1))
for state_k_1 in np.arange(n_states):
for state_k in np.arange(n_states):
prior[state_k_1, :] += (
discrete_state_transition[state_k, state_k_1]
* continuous_state_transition[state_k, state_k_1].T
@ causal_posterior[k, state_k]
)
# Backwards Update
weights = np.zeros((n_states, n_bins, 1))
ratio = np.exp(np.log(acausal_posterior[k + 1] + eps) - np.log(prior + eps))
for state_k in np.arange(n_states):
for state_k_1 in np.arange(n_states):
weights[state_k] += (
discrete_state_transition[state_k, state_k_1]
* continuous_state_transition[state_k, state_k_1]
@ ratio[state_k_1]
)
acausal_posterior[k] = normalize_to_probability(weights * causal_posterior[k])
return acausal_posterior
def scaled_likelihood(log_likelihood: np.ndarray, axis: int = 1) -> np.ndarray:
"""Scale the likelihood so the maximum value is 1.
Parameters
----------
log_likelihood : np.ndarray, shape (n_time, n_bins)
axis : int
Returns
-------
scaled_log_likelihood : np.ndarray, shape (n_time, n_bins)
"""
max_log_likelihood = np.nanmax(log_likelihood, axis=axis, keepdims=True)
# If maximum is infinity, set to zero
if max_log_likelihood.ndim > 0:
max_log_likelihood[~np.isfinite(max_log_likelihood)] = 0.0
elif not np.isfinite(max_log_likelihood):
max_log_likelihood = 0.0
# Maximum likelihood is always 1
likelihood = np.exp(log_likelihood - max_log_likelihood)
# avoid zero likelihood
likelihood += np.spacing(1, dtype=likelihood.dtype)
return likelihood
def mask(value: np.ndarray, is_track_interior: np.ndarray) -> np.ndarray:
"""Set bins that are not part of the track to NaN.
Parameters
----------
value : np.ndarray, shape (..., n_bins)
is_track_interior : np.ndarray, shape (n_bins,)
Returns
-------
masked_value : np.ndarray
"""
try:
value[..., ~is_track_interior] = np.nan
except IndexError:
value[..., ~is_track_interior, :] = np.nan
return value
def check_converged(
log_likelihood: np.ndarray,
previous_log_likelihood: np.ndarray,
tolerance: float = 1e-4,
) -> Tuple[bool, bool]:
"""We have converged if the slope of the log-likelihood function falls below 'tolerance',
i.e., |f(t) - f(t-1)| / avg < tolerance,
where avg = (|f(t)| + |f(t-1)|)/2 and f(t) is log lik at iteration t.
Parameters
----------
log_likelihood : np.ndarray
Current log likelihood
previous_log_likelihood : np.ndarray
Previous log likelihood
tolerance : float, optional
threshold for similarity, by default 1e-4
Returns
-------
is_converged : bool
is_increasing : bool
"""
delta_log_likelihood = abs(log_likelihood - previous_log_likelihood)
avg_log_likelihood = (
abs(log_likelihood) + abs(previous_log_likelihood) + np.spacing(1)
) / 2
is_increasing = log_likelihood - previous_log_likelihood >= -1e-3
is_converged = (delta_log_likelihood / avg_log_likelihood) < tolerance
return is_converged, is_increasing
try:
import cupy as cp
@cp.fuse()
def _causal_decode_gpu(
initial_conditions: np.ndarray,
state_transition: np.ndarray,
likelihood: np.ndarray,
) -> Tuple[np.ndarray, float]:
"""Adaptive filter to iteratively calculate the posterior probability
of a state variable using past information.
Parameters
----------
initial_conditions : np.ndarray, shape (n_bins,)
state_transition : np.ndarray, shape (n_bins, n_bins)
likelihood : np.ndarray, shape (n_time, n_bins)
Returns
-------
posterior : np.ndarray, shape (n_time, n_bins)
log_data_likelihood : float
"""
initial_conditions = cp.asarray(initial_conditions, dtype=cp.float32)
state_transition = cp.asarray(state_transition, dtype=cp.float32)
likelihood = cp.asarray(likelihood, dtype=cp.float32)
n_time = likelihood.shape[0]
posterior = cp.zeros_like(likelihood)
posterior[0] = initial_conditions * likelihood[0]
norm = cp.nansum(posterior[0])
log_data_likelihood = cp.log(norm)
posterior[0] /= norm
for k in np.arange(1, n_time):
posterior[k] = state_transition.T @ posterior[k - 1] * likelihood[k]
norm = np.nansum(posterior[k])
log_data_likelihood += cp.log(norm)
posterior[k] /= norm
return cp.asnumpy(posterior), cp.asnumpy(log_data_likelihood)
@cp.fuse()
def _acausal_decode_gpu(
causal_posterior: np.ndarray, state_transition: np.ndarray
) -> np.ndarray:
"""Uses past and future information to estimate the state.
Parameters
----------
causal_posterior : np.ndarray, shape (n_time, n_bins, 1)
state_transition : np.ndarray, shape (n_bins, n_bins)
Return
------
acausal_posterior : np.ndarray, shape (n_time, n_bins, 1)
"""
causal_posterior = cp.asarray(causal_posterior, dtype=cp.float32)
state_transition = cp.asarray(state_transition, dtype=cp.float32)
acausal_posterior = cp.zeros_like(causal_posterior)
acausal_posterior[-1] = causal_posterior[-1]
n_time, n_bins = causal_posterior.shape[0], causal_posterior.shape[-2]
weights = cp.zeros((n_bins, 1))
eps = np.spacing(1, dtype=np.float32)
for time_ind in np.arange(n_time - 2, -1, -1):
acausal_prior = state_transition.T @ causal_posterior[time_ind]
log_ratio = cp.log(acausal_posterior[time_ind + 1, ..., 0] + eps) - cp.log(
acausal_prior[..., 0] + eps
)
weights[..., 0] = cp.exp(log_ratio) @ state_transition
acausal_posterior[time_ind] = weights * causal_posterior[time_ind]
acausal_posterior[time_ind] /= np.nansum(acausal_posterior[time_ind])
return cp.asnumpy(acausal_posterior)
@cp.fuse()
def _causal_classify_gpu(
initial_conditions: np.ndarray,
continuous_state_transition: np.ndarray,
discrete_state_transition: np.ndarray,
likelihood: np.ndarray,
) -> Tuple[np.ndarray, float]:
"""Adaptive filter to iteratively calculate the posterior probability
of a state variable using past information.
Parameters
----------
initial_conditions : np.ndarray, shape (n_states, n_bins, 1)
continuous_state_transition : np.ndarray, shape (n_states, n_states, n_bins, n_bins)
discrete_state_transition : np.ndarray, shape (n_states, n_states)
likelihood : np.ndarray, shape (n_time, n_states, n_bins, 1)
Returns
-------
causal_posterior : np.ndarray, shape (n_time, n_states, n_bins, 1)
log_data_likelihood : float
"""
initial_conditions = cp.asarray(initial_conditions, dtype=cp.float32)
continuous_state_transition = cp.asarray(
continuous_state_transition, dtype=cp.float32
)
discrete_state_transition = cp.asarray(
discrete_state_transition, dtype=cp.float32
)
likelihood = cp.asarray(likelihood, dtype=cp.float32)
n_time, n_states, n_bins, _ = likelihood.shape
posterior = cp.zeros_like(likelihood)
posterior[0] = initial_conditions * likelihood[0]
norm = cp.nansum(posterior[0])
log_data_likelihood = cp.log(norm)
posterior[0] /= norm
for k in np.arange(1, n_time):
for state_k in np.arange(n_states):
for state_k_1 in np.arange(n_states):
posterior[k, state_k] += (
discrete_state_transition[state_k_1, state_k]
* continuous_state_transition[state_k_1, state_k].T
@ posterior[k - 1, state_k_1]
)
posterior[k] *= likelihood[k]
norm = cp.nansum(posterior[k])
log_data_likelihood += cp.log(norm)
posterior[k] /= norm
return cp.asnumpy(posterior), cp.asnumpy(log_data_likelihood)
@cp.fuse()
def _acausal_classify_gpu(
causal_posterior: np.ndarray,
continuous_state_transition: np.ndarray,
discrete_state_transition: np.ndarray,
) -> np.ndarray:
"""Uses past and future information to estimate the state.
Parameters
----------
causal_posterior : np.ndarray, shape (n_time, n_states, n_bins, 1)
continuous_state_transition : np.ndarray, shape (n_states, n_states, n_bins, n_bins)
discrete_state_transition : np.ndarray, shape (n_states, n_states)
Return
------
acausal_posterior : np.ndarray, shape (n_time, n_states, n_bins, 1)
"""
causal_posterior = cp.asarray(causal_posterior, dtype=cp.float32)
continuous_state_transition = cp.asarray(
continuous_state_transition, dtype=cp.float32
)
discrete_state_transition = cp.asarray(
discrete_state_transition, dtype=cp.float32
)
acausal_posterior = cp.zeros_like(causal_posterior)
acausal_posterior[-1] = causal_posterior[-1]
n_time, n_states, n_bins, _ = causal_posterior.shape
eps = np.spacing(1, dtype=np.float32)
for k in np.arange(n_time - 2, -1, -1):
# Prediction Step -- p(x_{k+1}, I_{k+1} | y_{1:k})
prior = cp.zeros((n_states, n_bins, 1))
for state_k_1 in np.arange(n_states):
for state_k in np.arange(n_states):
prior[state_k_1, :] += (
discrete_state_transition[state_k, state_k_1]
* continuous_state_transition[state_k, state_k_1].T
@ causal_posterior[k, state_k]
)
# Backwards Update
weights = cp.zeros((n_states, n_bins, 1))
ratio = cp.exp(cp.log(acausal_posterior[k + 1] + eps) - cp.log(prior + eps))
for state_k in np.arange(n_states):
for state_k_1 in np.arange(n_states):
weights[state_k] += (
discrete_state_transition[state_k, state_k_1]
* continuous_state_transition[state_k, state_k_1]
@ ratio[state_k_1]
)
acausal_posterior[k] = weights * causal_posterior[k]
acausal_posterior[k] /= cp.nansum(acausal_posterior[k])
return cp.asnumpy(acausal_posterior)
except ImportError:
from logging import getLogger
logger = getLogger(__name__)
logger.warning(
"Cupy is not installed or GPU is not detected."
" Ignore this message if not using GPU"
)
def _causal_decode_gpu(*args, **kwargs):
logger.error("No GPU detected...")
def _acausal_decode_gpu(*args, **kwargs):
logger.error("No GPU detected...")
def _causal_classify_gpu(*args, **kwargs):
logger.error("No GPU detected...")
def _acausal_classify_gpu(*args, **kwargs):
logger.error("No GPU detected...") | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/core.py | 0.974018 | 0.825765 | core.py | pypi |
from __future__ import annotations
import numpy as np
from replay_trajectory_classification.core import scaled_likelihood
from replay_trajectory_classification.likelihoods.multiunit_likelihood import (
estimate_intensity,
)
from scipy.signal import convolve
from scipy.special import cotdg
from scipy.stats import rv_histogram
from skimage.transform import radon
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model import LinearRegression
def poisson_mark_log_likelihood(
joint_mark_intensity, ground_process_intensity, time_bin_size=1
):
"""Probability of parameters given spiking indicator at a particular
time and associated marks.
Parameters
----------
joint_mark_intensity : ndarray, shape (n_time, n_bins)
ground_process_intensity : ndarray, shape (1, n_bins)
time_bin_size : int, optional
Returns
-------
poisson_mark_log_likelihood : ndarray, shape (n_time, n_bins)
"""
return np.log(joint_mark_intensity + np.spacing(1)) - (
(ground_process_intensity + np.spacing(1)) * time_bin_size
)
def predict_mark_likelihood(
start_time,
end_time,
place_bin_centers,
occupancy,
joint_pdf_models,
multiunit_dfs,
ground_process_intensities,
mean_rates,
is_track_interior,
dt=0.020,
):
n_time_bins = np.ceil((end_time - start_time) / dt).astype(int)
time_bin_edges = start_time + np.arange(n_time_bins + 1) * dt
n_place_bins = len(place_bin_centers)
log_likelihood = np.zeros((n_time_bins, n_place_bins))
interior_bin_inds = np.nonzero(is_track_interior)[0]
for joint_model, multiunit_df, gpi, mean_rate in zip(
joint_pdf_models, multiunit_dfs, ground_process_intensities, mean_rates
):
time_index = np.searchsorted(time_bin_edges, multiunit_df.index.total_seconds())
in_time_bins = np.nonzero(~np.isin(time_index, [0, len(time_bin_edges)]))[0]
time_index = time_index[in_time_bins] - 1
multiunit_df = multiunit_df.iloc[in_time_bins, :4]
multiunit_df["time_bin_ind"] = time_index
n_spikes = multiunit_df.shape[0]
joint_mark_intensity = np.ones((n_spikes, n_place_bins))
if n_spikes > 0:
zipped = zip(
interior_bin_inds,
place_bin_centers[interior_bin_inds],
occupancy[interior_bin_inds],
)
for bin_ind, bin, bin_occupancy in zipped:
marks_pos = np.asarray(multiunit_df.iloc[:, :4])
marks_pos = np.concatenate(
(marks_pos, bin * np.ones((n_spikes, 1))), axis=1
)
joint_mark_intensity[:, bin_ind] = estimate_intensity(
np.exp(joint_model.score_samples(marks_pos)),
bin_occupancy,
mean_rate,
)
tetrode_likelihood = poisson_mark_log_likelihood(
joint_mark_intensity, np.atleast_2d(gpi)
)
for time_bin_ind in np.unique(time_index):
log_likelihood[time_bin_ind] += np.sum(
tetrode_likelihood[time_index == time_bin_ind], axis=0
)
mask = np.ones_like(is_track_interior, dtype=float)
mask[~is_track_interior] = np.nan
log_likelihood = log_likelihood * mask
time = np.arange(n_time_bins) * dt
return scaled_likelihood(log_likelihood), time
def predict_poisson_likelihood(
start_time, end_time, spike_times, place_fields, is_track_interior, dt=0.020
):
place_fields = np.asarray(place_fields)
n_time_bins = np.ceil((end_time - start_time) / dt).astype(int)
time_bin_edges = start_time + np.arange(n_time_bins + 1) * dt
time_bin_centers = time_bin_edges[:-1] + np.diff(time_bin_edges) / 2
spike_time_ind, neuron_ind = [], []
for ind, times in enumerate(spike_times):
is_valid_time = (times >= start_time) & (times <= end_time)
inds = np.digitize(times[is_valid_time], time_bin_edges[1:-1])
spike_time_ind.append(inds)
neuron_ind.append(np.ones_like(inds) * ind)
neuron_ind = np.concatenate(neuron_ind)
spike_time_ind = np.concatenate(spike_time_ind)
log_likelihood = np.stack(
[
np.sum(
np.log(
place_fields[:, neuron_ind[spike_time_ind == time_bin]]
+ np.spacing(1)
),
axis=1,
)
for time_bin in np.arange(n_time_bins)
]
)
log_likelihood -= dt * np.sum(place_fields, axis=1)
mask = np.ones_like(is_track_interior, dtype=float)
mask[~is_track_interior] = np.nan
return scaled_likelihood(log_likelihood) * mask, time_bin_centers
def normalize_to_posterior(likelihood, prior=None):
if prior is None:
n_position_bins = likelihood.shape[1]
prior = np.ones_like(likelihood) / n_position_bins
posterior = likelihood * prior
return posterior / np.nansum(posterior, axis=1, keepdims=True)
def convert_polar_to_slope_intercept(
n_pixels_from_center, projection_angle, center_pixel
):
slope = -cotdg(-projection_angle)
intercept = (
n_pixels_from_center / np.sin(-np.deg2rad(projection_angle))
- slope * center_pixel[0]
+ center_pixel[1]
)
return intercept, slope
def detect_line_with_radon(
posterior,
dt, # s
dp, # cm
projection_angles=np.arange(-90, 90, 0.5), # degrees
filter_invalid_positions=True,
incorporate_nearby_positions=True,
nearby_positions_max=15, # cm
):
if incorporate_nearby_positions:
n_nearby_bins = int(nearby_positions_max / 2 // dp)
filt = np.ones(2 * n_nearby_bins + 1)
posterior = np.apply_along_axis(
lambda time_bin: convolve(time_bin, filt, mode="same"),
axis=1,
arr=posterior,
)
else:
n_nearby_bins = 1
# Sinogram is shape (pixels_from_center, projection_angles)
sinogram = radon(
posterior.T, theta=projection_angles, circle=False, preserve_range=False
)
n_time, n_position_bins = posterior.shape
center_pixel = np.asarray((n_time // 2, n_position_bins // 2))
pixels_from_center = np.arange(
-sinogram.shape[0] // 2 + 1, sinogram.shape[0] // 2 + 1
)
if filter_invalid_positions:
start_positions, velocities = convert_polar_to_slope_intercept(
pixels_from_center[:, np.newaxis],
projection_angles[np.newaxis, :],
center_pixel,
)
end_positions = start_positions + velocities * (n_time - 1)
sinogram[(start_positions < 0) | (start_positions > n_position_bins - 1)] = 0.0
sinogram[(end_positions < 0) | (end_positions > n_position_bins - 1)] = 0.0
sinogram[:, np.isinf(velocities.squeeze())] = 0.0
# Find the maximum of the sinogram
n_pixels_from_center_ind, projection_angle_ind = np.unravel_index(
indices=np.argmax(sinogram), shape=sinogram.shape
)
projection_angle = projection_angles[projection_angle_ind]
n_pixels_from_center = pixels_from_center[n_pixels_from_center_ind]
# Normalized score based on the integrated projection
score = np.max(sinogram) / (n_time * n_nearby_bins)
# Convert from polar form to slope-intercept form
start_position, velocity = convert_polar_to_slope_intercept(
n_pixels_from_center, projection_angle, center_pixel
)
# Convert from pixels to position units
start_position *= dp
velocity *= dp / dt
# Estimate position for the posterior
time = np.arange(n_time) * dt
radon_position = start_position + velocity * time
return start_position, velocity, radon_position, score
def map_estimate(posterior, place_bin_centers):
posterior[np.isnan(posterior)] = 0.0
return place_bin_centers[posterior.argmax(axis=1)].squeeze()
def _m(x, w):
"""Weighted Mean"""
return np.sum(x * w) / np.sum(w)
def _cov(x, y, w):
"""Weighted Covariance"""
return np.sum(w * (x - _m(x, w)) * (y - _m(y, w))) / np.sum(w)
def _corr(x, y, w):
"""Weighted Correlation"""
return _cov(x, y, w) / np.sqrt(_cov(x, x, w) * _cov(y, y, w))
def weighted_correlation(posterior, time, place_bin_centers):
place_bin_centers = place_bin_centers.squeeze()
posterior[np.isnan(posterior)] = 0.0
return _corr(time[:, np.newaxis], place_bin_centers[np.newaxis, :], posterior)
def isotonic_regression(posterior, time, place_bin_centers):
place_bin_centers = place_bin_centers.squeeze()
posterior[np.isnan(posterior)] = 0.0
map = map_estimate(posterior, place_bin_centers)
map_probabilities = np.max(posterior, axis=1)
regression = IsotonicRegression(increasing="auto").fit(
X=time,
y=map,
sample_weight=map_probabilities,
)
score = regression.score(
X=time,
y=map,
sample_weight=map_probabilities,
)
prediction = regression.predict(time)
return prediction, score
def _sample_posterior(posterior, place_bin_edges, n_samples=1000):
"""Samples the posterior positions.
Parameters
----------
posterior : np.array, shape (n_time, n_position_bins)
Returns
-------
posterior_samples : numpy.ndarray, shape (n_time, n_samples)
"""
place_bin_edges = place_bin_edges.squeeze()
n_time = posterior.shape[0]
posterior_samples = [
rv_histogram((posterior[time_ind], place_bin_edges)).rvs(size=n_samples)
for time_ind in range(n_time)
]
return np.asarray(posterior_samples)
def linear_regression(posterior, place_bin_edges, time, n_samples=1000):
posterior[np.isnan(posterior)] = 0.0
samples = _sample_posterior(posterior, place_bin_edges, n_samples=n_samples)
design_matrix = np.tile(time, n_samples)[:, np.newaxis]
response = samples.ravel(order="F")
regression = LinearRegression().fit(X=design_matrix, y=response)
r2 = regression.score(X=design_matrix, y=response)
slope = regression.coef_[0]
intercept = regression.intercept_
prediction = regression.predict(time[:, np.newaxis])
return intercept, slope, r2, prediction | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/standard_decoder.py | 0.927087 | 0.551634 | standard_decoder.py | pypi |
from __future__ import annotations
import numpy as np
from replay_trajectory_classification.core import atleast_2d
from scipy.stats import multivariate_normal
from typing import Optional
def simulate_time(n_samples: int, sampling_frequency: float) -> np.ndarray:
"""Simulate a time in seconds.
Parameters
----------
n_samples : int
The number of samples to generate.
sampling_frequency : float
Samples per second.
Returns
-------
time : ndarray, shape (n_samples,)
Time in seconds
"""
return np.arange(n_samples) / sampling_frequency
def simulate_position(
time: np.ndarray, track_height: float, running_speed: float = 15
) -> np.ndarray:
"""Simulate animal moving through linear space.
Parameters
----------
time : ndarray, shape (n_time,)
Time in seconds.
track_height : float
The height of the simulated track.
running_speed : float, optional
The running speed of the simulated animal (default is 15).
Returns
-------
position : ndarray, shape (n_time,)
The simulated position of the animal.
"""
half_height = track_height / 2
freq = 1 / (2 * track_height / running_speed)
return half_height * np.sin(freq * 2 * np.pi * time - np.pi / 2) + half_height
def simulate_position_with_pauses(
time: np.ndarray,
track_height: float,
running_speed: float = 15,
pause: float = 0.5,
sampling_frequency: float = 1,
) -> np.ndarray:
"""Simulate an animal moving with pauses.
Parameters
----------
time : ndarray, shape (n_time,)
The time vector.
track_height : float
The height of the track.
running_speed : float, optional
The running speed (default is 15).
pause : float, optional
The pause duration (default is 0.5).
sampling_frequency : float, optional
The sampling frequency (default is 1).
Returns
-------
position : ndarray, shape (n_time,)
The simulated position of the animal with pauses.
"""
position = simulate_position(time, track_height, running_speed)
peaks = np.nonzero(np.isclose(position, track_height))[0]
n_pause_samples = int(pause * sampling_frequency)
pause_position = np.zeros((time.size + n_pause_samples * peaks.size,))
pause_ind = peaks[:, np.newaxis] + np.arange(n_pause_samples)
pause_ind += np.arange(peaks.size)[:, np.newaxis] * n_pause_samples
pause_position[pause_ind.ravel()] = track_height
pause_position[pause_position == 0] = position
return pause_position[: time.size]
def simulate_poisson_spikes(rate: np.ndarray, sampling_frequency: int) -> np.ndarray:
"""Given a rate, returns a time series of spikes.
Parameters
----------
rate : np.ndarray, shape (n_time,)
sampling_frequency : int
Returns
-------
spikes : np.ndarray, shape (n_time,)
"""
return 1.0 * (np.random.poisson(rate / sampling_frequency) > 0)
def simulate_place_field_firing_rate(
means: np.ndarray,
position: np.ndarray,
max_rate: float = 15.0,
variance: float = 10.0,
is_condition: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Simulates the firing rate of a neuron with a place field at `means`.
Parameters
----------
means : np.ndarray, shape (n_position_dims,)
position : np.ndarray, shape (n_time, n_position_dims)
max_rate : float, optional
variance : float, optional
is_condition : None or np.ndarray, (n_time,)
Returns
-------
firing_rate : np.ndarray, shape (n_time,)
"""
if is_condition is None:
is_condition = np.ones(position.shape[0], dtype=bool)
position = atleast_2d(position)
firing_rate = multivariate_normal(means, variance).pdf(position)
firing_rate /= firing_rate.max()
firing_rate *= max_rate
firing_rate[~is_condition] = 0.0
return firing_rate
def simulate_neuron_with_place_field(
means: np.ndarray,
position: np.ndarray,
max_rate: float = 15.0,
variance: float = 36.0,
sampling_frequency: int = 500,
is_condition: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Simulates the spiking of a neuron with a place field at `means`.
Parameters
----------
means : np.ndarray, shape (n_position_dims,)
position : np.ndarray, shape (n_time, n_position_dims)
max_rate : float, optional
variance : float, optional
sampling_frequency : float, optional
is_condition : None or np.ndarray, (n_time,)
Returns
-------
spikes : np.ndarray, shape (n_time,)
"""
firing_rate = simulate_place_field_firing_rate(
means, position, max_rate, variance, is_condition
)
return simulate_poisson_spikes(firing_rate, sampling_frequency)
def simulate_multiunit_with_place_fields(
place_means: np.ndarray,
position: np.ndarray,
mark_spacing: int = 5,
n_mark_dims: int = 4,
place_variance: float = 36.0,
mark_variance: float = 1.0,
max_rate: float = 100.0,
sampling_frequency: int = 1000,
is_condition: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Simulates a multiunit with neurons at `place_means`
Parameters
----------
place_means : np.ndarray, shape (n_neurons, n_position_dims)
position : np.ndarray, shape (n_time, n_position_dims)
mark_spacing : int, optional
n_mark_dims : int, optional
place_variance : float
max_rate : float
sampling_frequency : int
is_condition : np.ndarray or None
Returns
-------
multiunit : np.ndarray, shape (n_time, n_mark_dims)
"""
n_neurons = place_means.shape[0]
mark_centers = np.arange(0, n_neurons * mark_spacing, mark_spacing)
n_time = position.shape[0]
marks = np.full((n_time, n_mark_dims), np.nan)
for mean, mark_center in zip(place_means, mark_centers):
is_spike = (
simulate_neuron_with_place_field(
mean,
position,
max_rate=max_rate,
variance=place_variance,
sampling_frequency=sampling_frequency,
is_condition=is_condition,
)
> 0
)
n_spikes = int(is_spike.sum())
marks[is_spike] = multivariate_normal(
mean=[mark_center] * n_mark_dims, cov=mark_variance
).rvs(size=n_spikes)
return marks
def get_trajectory_direction(position: np.ndarray) -> np.ndarray:
"""Find if the trajectory is inbound or outbound.
Parameters
----------
position : np.ndarray, shape (n_time,)
Returns
-------
is_inbound : np.ndarray, shape (n_time,)
"""
is_inbound = np.insert(np.diff(position) < 0, 0, False)
return np.where(is_inbound, "Inbound", "Outbound") | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/simulate.py | 0.977746 | 0.737371 | simulate.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, Union
import joblib
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from numba import njit
from scipy import ndimage
from scipy.interpolate import interp1d
from sklearn.neighbors import NearestNeighbors
from track_linearization import plot_graph_as_1D
from replay_trajectory_classification.core import atleast_2d, get_centers
@dataclass
class Environment:
"""Represent the spatial environment with a discrete grid.
Parameters
----------
environment_name : str, optional
place_bin_size : float, optional
Approximate size of the position bins.
track_graph : networkx.Graph, optional
Graph representing the 1D spatial topology
edge_order : tuple of 2-tuples, optional
The order of the edges in 1D space
edge_spacing : None or int or tuples of len n_edges-1, optional
Any gapes between the edges in 1D space
is_track_interior : np.ndarray or None, optional
If given, this will be used to define the valid areas of the track.
Must be of type boolean.
position_range : sequence, optional
A sequence of `n_position_dims`, each an optional (lower, upper)
tuple giving the outer bin edges for position.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of
`n_position_dims` None values.
infer_track_interior : bool, optional
If True, then use the given positions to figure out the valid track
areas.
fill_holes : bool, optional
Fill holes when inferring the track
dilate : bool, optional
Inflate the available track area with binary dilation
bin_count_threshold : int, optional
Greater than this number of samples should be in the bin for it to
be considered on the track.
"""
environment_name: str = ""
place_bin_size: float = 2.0
track_graph: nx.Graph = None
edge_order: tuple = None
edge_spacing: tuple = None
is_track_interior: np.ndarray = None
position_range: np.ndarray = None
infer_track_interior: bool = True
fill_holes: bool = False
dilate: bool = False
bin_count_threshold: int = 0
def __eq__(self, other: str) -> bool:
return self.environment_name == other
def fit_place_grid(
self, position: Optional[np.ndarray] = None, infer_track_interior: bool = True
):
"""Fits a discrete grid of the spatial environment.
Parameters
----------
position : np.ndarray, optional
Position of the animal.
infer_track_interior : bool, optional
Whether to infer the spatial geometry of track from position
Returns
-------
self
"""
if self.track_graph is None:
(
self.edges_,
self.place_bin_edges_,
self.place_bin_centers_,
self.centers_shape_,
) = get_grid(
position,
self.place_bin_size,
self.position_range,
)
self.infer_track_interior = infer_track_interior
if self.is_track_interior is None and self.infer_track_interior:
self.is_track_interior_ = get_track_interior(
position,
self.edges_,
self.fill_holes,
self.dilate,
self.bin_count_threshold,
)
elif self.is_track_interior is None and not self.infer_track_interior:
self.is_track_interior_ = np.ones(self.centers_shape_, dtype=bool)
if len(self.edges_) > 1:
self.is_track_boundary_ = get_track_boundary(
self.is_track_interior_, connectivity=1
)
else:
self.is_track_boundary_ = None
else:
(
self.place_bin_centers_,
self.place_bin_edges_,
self.is_track_interior_,
self.distance_between_nodes_,
self.centers_shape_,
self.edges_,
self.track_graph_with_bin_centers_edges_,
self.original_nodes_df_,
self.place_bin_edges_nodes_df_,
self.place_bin_centers_nodes_df_,
self.nodes_df_,
) = get_track_grid(
self.track_graph,
self.edge_order,
self.edge_spacing,
self.place_bin_size,
)
self.is_track_boundary_ = None
return self
def plot_grid(self, ax: matplotlib.axes.Axes = None):
"""Plot the fitted spatial grid of the environment.
Parameters
----------
ax : plt.axes, optional
Plot on this axis if given, by default None
"""
if self.track_graph is not None:
if ax is None:
_, ax = plt.subplots(figsize=(15, 2))
plot_graph_as_1D(
self.track_graph, self.edge_order, self.edge_spacing, ax=ax
)
for edge in self.edges_[0]:
ax.axvline(edge.squeeze(), linewidth=0.5, color="black")
ax.set_ylim((0, 0.1))
else:
if ax is None:
_, ax = plt.subplots(figsize=(6, 7))
ax.pcolormesh(
self.edges_[0], self.edges_[1], self.is_track_interior_.T, cmap="bone_r"
)
ax.set_xticks(self.edges_[0], minor=True)
ax.set_yticks(self.edges_[1], minor=True)
ax.grid(visible=True, which="minor")
ax.grid(visible=False, which="major")
def save_environment(self, filename: str = "environment.pkl"):
"""Saves the environment as a pickled file.
Parameters
----------
filename : str, optional
File name to pickle the environment to, by default "environment.pkl"
"""
joblib.dump(self, filename)
@staticmethod
def load_environment(filename: str = "environment.pkl"):
"""Load the environment from a file.
Parameters
----------
filename : str, optional
Returns
-------
environment instance
"""
return joblib.load(filename)
def get_n_bins(
position: np.ndarray,
bin_size: float = 2.5,
position_range: Optional[list[np.ndarray]] = None,
) -> int:
"""Get number of bins need to span a range given a bin size.
Parameters
----------
position : np.ndarray, shape (n_time,)
bin_size : float, optional
position_range : None or list of np.ndarray
Use this to define the extent instead of position
Returns
-------
n_bins : int
"""
if position_range is not None:
extent = np.diff(position_range, axis=1).squeeze()
else:
extent = np.ptp(position, axis=0)
return np.ceil(extent / bin_size).astype(np.int32)
def get_grid(
position: np.ndarray,
bin_size: float = 2.5,
position_range: Optional[list[np.ndarray]] = None,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, tuple]:
"""Gets the spatial grid of bins.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
bin_size : float, optional
Maximum size of each position bin.
position_range : None or list of np.ndarray
Use this to define the extent instead of position
Returns
-------
edges : tuple of bin edges, len n_position_dims
place_bin_edges : np.ndarray, shape (n_bins + 1, n_position_dims)
Edges of each position bin
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
Center of each position bin
centers_shape : tuple
Position grid shape
"""
position = atleast_2d(position)
is_nan = np.any(np.isnan(position), axis=1)
position = position[~is_nan]
n_bins = get_n_bins(position, bin_size, position_range)
_, edges = np.histogramdd(position, bins=n_bins, range=position_range)
if len(edges) > 1:
edges = [
np.insert(
edge,
(0, len(edge)),
(edge[0] - np.diff(edge)[0], edge[-1] + np.diff(edge)[0]),
)
for edge in edges
]
mesh_edges = np.meshgrid(*edges)
place_bin_edges = np.stack([edge.ravel() for edge in mesh_edges], axis=1)
mesh_centers = np.meshgrid(*[get_centers(edge) for edge in edges])
place_bin_centers = np.stack([center.ravel() for center in mesh_centers], axis=1)
centers_shape = mesh_centers[0].T.shape
return edges, place_bin_edges, place_bin_centers, centers_shape
def get_track_interior(
position: np.ndarray,
bins: int,
fill_holes: bool = False,
dilate: bool = False,
bin_count_threshold: int = 0,
) -> np.ndarray:
"""Infers the interior bins of the track given positions.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
fill_holes : bool, optional
Fill any holes in the extracted track interior bins
dialate : bool, optional
Inflate the extracted track interior bins
bin_count_threshold : int, optional
Greater than this number of samples should be in the bin for it to
be considered on the track.
Returns
-------
is_track_interior : np.ndarray, optional
The interior bins of the track as inferred from position
"""
bin_counts, _ = np.histogramdd(position, bins=bins)
is_track_interior = (bin_counts > bin_count_threshold).astype(int)
n_position_dims = position.shape[1]
if n_position_dims > 1:
structure = ndimage.generate_binary_structure(n_position_dims, 1)
is_track_interior = ndimage.binary_closing(
is_track_interior, structure=structure
)
if fill_holes:
is_track_interior = ndimage.binary_fill_holes(is_track_interior)
if dilate:
is_track_interior = ndimage.binary_dilation(is_track_interior)
# adjust for boundary edges in 2D
is_track_interior[-1] = False
is_track_interior[:, -1] = False
return is_track_interior.astype(bool)
def get_track_segments_from_graph(track_graph: nx.Graph) -> np.ndarray:
"""Returns a 2D array of node positions corresponding to each edge.
Parameters
----------
track_graph : networkx Graph
Returns
-------
track_segments : np.ndarray, shape (n_segments, n_nodes, n_space)
"""
node_positions = nx.get_node_attributes(track_graph, "pos")
return np.asarray(
[
(node_positions[node1], node_positions[node2])
for node1, node2 in track_graph.edges()
]
)
def project_points_to_segment(
track_segments: np.ndarray, position: np.ndarray
) -> np.ndarray:
"""Finds the closet point on a track segment in terms of Euclidean distance
Parameters
----------
track_segments : np.ndarray, shape (n_segments, n_nodes, 2)
position : np.ndarray, shape (n_time, 2)
Returns
-------
projected_positions : np.ndarray, shape (n_time, n_segments, n_space)
"""
segment_diff = np.diff(track_segments, axis=1).squeeze(axis=1)
sum_squares = np.sum(segment_diff**2, axis=1)
node1 = track_segments[:, 0, :]
nx = (
np.sum(segment_diff * (position[:, np.newaxis, :] - node1), axis=2)
/ sum_squares
)
nx[np.where(nx < 0)] = 0.0
nx[np.where(nx > 1)] = 1.0
return node1[np.newaxis, ...] + (
nx[:, :, np.newaxis] * segment_diff[np.newaxis, ...]
)
def _calculate_linear_position(
track_graph: nx.Graph,
position: np.ndarray,
track_segment_id: np.ndarray,
edge_order: list[tuple],
edge_spacing: Union[float, list],
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Determines the linear position given a 2D position and a track graph.
Parameters
----------
track_graph : nx.Graph
position : np.ndarray, shape (n_time, n_position_dims)
track_segment_id : np.ndarray, shape (n_time,)
edge_order : list of 2-tuples
edge_spacing : float or list, len n_edges - 1
Returns
-------
linear_position : np.ndarray, shape (n_time,)
projected_track_positions_x : np.ndarray, shape (n_time,)
projected_track_positions_y : np.ndarray, shape (n_time,)
"""
is_nan = np.isnan(track_segment_id)
track_segment_id[is_nan] = 0 # need to check
track_segment_id = track_segment_id.astype(int)
track_segments = get_track_segments_from_graph(track_graph)
projected_track_positions = project_points_to_segment(track_segments, position)
n_time = projected_track_positions.shape[0]
projected_track_positions = projected_track_positions[
(np.arange(n_time), track_segment_id)
]
n_edges = len(edge_order)
if isinstance(edge_spacing, int) | isinstance(edge_spacing, float):
edge_spacing = [
edge_spacing,
] * (n_edges - 1)
counter = 0.0
start_node_linear_position = []
for ind, edge in enumerate(edge_order):
start_node_linear_position.append(counter)
try:
counter += track_graph.edges[edge]["distance"] + edge_spacing[ind]
except IndexError:
pass
start_node_linear_position = np.asarray(start_node_linear_position)
track_segment_id_to_start_node_linear_position = {
track_graph.edges[e]["edge_id"]: snlp
for e, snlp in zip(edge_order, start_node_linear_position)
}
start_node_linear_position = np.asarray(
[
track_segment_id_to_start_node_linear_position[edge_id]
for edge_id in track_segment_id
]
)
track_segment_id_to_edge = {track_graph.edges[e]["edge_id"]: e for e in edge_order}
start_node_id = np.asarray(
[track_segment_id_to_edge[edge_id][0] for edge_id in track_segment_id]
)
start_node_2D_position = np.asarray(
[track_graph.nodes[node]["pos"] for node in start_node_id]
)
linear_position = start_node_linear_position + (
np.linalg.norm(start_node_2D_position - projected_track_positions, axis=1)
)
linear_position[is_nan] = np.nan
return (
linear_position,
projected_track_positions[:, 0],
projected_track_positions[:, 1],
)
def make_track_graph_with_bin_centers_edges(
track_graph: nx.Graph, place_bin_size: float
) -> nx.Graph:
"""Insert the bin center and bin edge positions as nodes in the track graph.
Parameters
----------
track_graph : nx.Graph
place_bin_size : float
Returns
-------
track_graph_with_bin_centers_edges : nx.Graph
"""
track_graph_with_bin_centers_edges = track_graph.copy()
n_nodes = len(track_graph.nodes)
for edge_ind, (node1, node2) in enumerate(track_graph.edges):
node1_x_pos, node1_y_pos = track_graph.nodes[node1]["pos"]
node2_x_pos, node2_y_pos = track_graph.nodes[node2]["pos"]
edge_size = np.linalg.norm(
[(node2_x_pos - node1_x_pos), (node2_y_pos - node1_y_pos)]
)
n_bins = 2 * np.ceil(edge_size / place_bin_size).astype(np.int32) + 1
if ~np.isclose(node1_x_pos, node2_x_pos):
f = interp1d((node1_x_pos, node2_x_pos), (node1_y_pos, node2_y_pos))
xnew = np.linspace(node1_x_pos, node2_x_pos, num=n_bins, endpoint=True)
xy = np.stack((xnew, f(xnew)), axis=1)
else:
ynew = np.linspace(node1_y_pos, node2_y_pos, num=n_bins, endpoint=True)
xnew = np.ones_like(ynew) * node1_x_pos
xy = np.stack((xnew, ynew), axis=1)
dist_between_nodes = np.linalg.norm(np.diff(xy, axis=0), axis=1)
new_node_ids = n_nodes + np.arange(len(dist_between_nodes) + 1)
nx.add_path(
track_graph_with_bin_centers_edges,
[*new_node_ids],
distance=dist_between_nodes[0],
)
nx.add_path(
track_graph_with_bin_centers_edges, [node1, new_node_ids[0]], distance=0
)
nx.add_path(
track_graph_with_bin_centers_edges, [node2, new_node_ids[-1]], distance=0
)
track_graph_with_bin_centers_edges.remove_edge(node1, node2)
for ind, (node_id, pos) in enumerate(zip(new_node_ids, xy)):
track_graph_with_bin_centers_edges.nodes[node_id]["pos"] = pos
track_graph_with_bin_centers_edges.nodes[node_id]["edge_id"] = edge_ind
if ind % 2:
track_graph_with_bin_centers_edges.nodes[node_id]["is_bin_edge"] = False
else:
track_graph_with_bin_centers_edges.nodes[node_id]["is_bin_edge"] = True
track_graph_with_bin_centers_edges.nodes[node1]["edge_id"] = edge_ind
track_graph_with_bin_centers_edges.nodes[node2]["edge_id"] = edge_ind
track_graph_with_bin_centers_edges.nodes[node1]["is_bin_edge"] = True
track_graph_with_bin_centers_edges.nodes[node2]["is_bin_edge"] = True
n_nodes = len(track_graph_with_bin_centers_edges.nodes)
return track_graph_with_bin_centers_edges
def extract_bin_info_from_track_graph(
track_graph: nx.Graph,
track_graph_with_bin_centers_edges: nx.Graph,
edge_order: list[tuple],
edge_spacing: Union[float, list],
) -> pd.DataFrame:
"""For each node, find edge_id, is_bin_edge, x_position, y_position, and
linear_position.
Parameters
----------
track_graph : nx.Graph
track_graph_with_bin_centers_edges : nx.Graph
edge_order : list of 2-tuples
edge_spacing : list, len n_edges - 1
Returns
-------
nodes_df : pd.DataFrame
Collect information about each bin
"""
nodes_df = (
pd.DataFrame.from_dict(
dict(track_graph_with_bin_centers_edges.nodes(data=True)), orient="index"
)
.assign(x_position=lambda df: np.asarray(list(df.pos))[:, 0])
.assign(y_position=lambda df: np.asarray(list(df.pos))[:, 1])
.drop(columns="pos")
)
node_linear_position, _, _ = _calculate_linear_position(
track_graph,
np.asarray(nodes_df.loc[:, ["x_position", "y_position"]]),
np.asarray(nodes_df.edge_id),
edge_order,
edge_spacing,
)
nodes_df["linear_position"] = node_linear_position
nodes_df = nodes_df.rename_axis(index="node_id")
edge_avg_linear_position = (
nodes_df.groupby("edge_id")
.linear_position.mean()
.rename("edge_avg_linear_position")
)
nodes_df = (
pd.merge(nodes_df.reset_index(), edge_avg_linear_position, on="edge_id")
.sort_values(by=["edge_avg_linear_position", "linear_position"], axis="rows")
.set_index("node_id")
.drop(columns="edge_avg_linear_position")
)
return nodes_df
def get_track_grid(
track_graph: nx.Graph,
edge_order: list[tuple],
edge_spacing: Union[float, list],
place_bin_size: float,
) -> tuple[
np.ndarray,
np.ndarray,
np.ndarray,
dict,
tuple,
tuple,
nx.Graph,
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
pd.DataFrame,
]:
"""Figures out 1D spatial bins given a track graph.
Parameters
----------
track_graph : nx.Graph
edge_order : list of 2-tuples
edge_spacing : list, len n_edges - 1
place_bin_size : float
Returns
-------
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
place_bin_edges : np.ndarray, shape (n_bins + n_position_dims, n_position_dims)
is_track_interior : np.ndarray, shape (n_bins, n_position_dim)
distance_between_nodes : dict
centers_shape : tuple
edges : tuple of np.ndarray
track_graph_with_bin_centers_edges : nx.Graph
original_nodes_df : pd.DataFrame
Table of information about the original nodes in the track graph
place_bin_edges_nodes_df : pd.DataFrame
Table of information with bin edges and centers
place_bin_centers_nodes_df : pd.DataFrame
Table of information about bin centers
nodes_df : pd.DataFrame
Table of information with information about the original nodes,
bin edges, and bin centers
"""
track_graph_with_bin_centers_edges = make_track_graph_with_bin_centers_edges(
track_graph, place_bin_size
)
nodes_df = extract_bin_info_from_track_graph(
track_graph, track_graph_with_bin_centers_edges, edge_order, edge_spacing
)
# Dataframe with nodes from track graph only
original_nodes = list(track_graph.nodes)
original_nodes_df = nodes_df.loc[original_nodes].reset_index()
# Dataframe with only added edge nodes
place_bin_edges_nodes_df = nodes_df.loc[
~nodes_df.index.isin(original_nodes) & nodes_df.is_bin_edge
].reset_index()
# Dataframe with only added center nodes
place_bin_centers_nodes_df = nodes_df.loc[~nodes_df.is_bin_edge].reset_index()
# Determine place bin edges and centers.
# Make sure to remove duplicate nodes from bins with no gaps
is_duplicate_edge = np.isclose(
np.diff(np.asarray(place_bin_edges_nodes_df.linear_position)), 0.0
)
is_duplicate_edge = np.append(is_duplicate_edge, False)
no_duplicate_place_bin_edges_nodes_df = place_bin_edges_nodes_df.iloc[
~is_duplicate_edge
]
place_bin_edges = np.asarray(no_duplicate_place_bin_edges_nodes_df.linear_position)
place_bin_centers = get_centers(place_bin_edges)
# Compute distance between nodes
distance_between_nodes = dict(
nx.all_pairs_dijkstra_path_length(
track_graph_with_bin_centers_edges, weight="distance"
)
)
# Figure out which points are on the track and not just gaps
change_edge_ind = np.nonzero(
np.diff(no_duplicate_place_bin_edges_nodes_df.edge_id)
)[0]
if isinstance(edge_spacing, int) | isinstance(edge_spacing, float):
n_edges = len(edge_order)
edge_spacing = [
edge_spacing,
] * (n_edges - 1)
is_track_interior = np.ones_like(place_bin_centers, dtype=bool)
not_track = change_edge_ind[np.asarray(edge_spacing) > 0]
is_track_interior[not_track] = False
# Add information about bin centers not on track
place_bin_centers_nodes_df = (
pd.concat(
(
place_bin_centers_nodes_df,
pd.DataFrame(
{
"linear_position": place_bin_centers[~is_track_interior],
"node_id": -1,
"edge_id": -1,
"is_bin_edge": False,
}
),
)
).sort_values(by=["linear_position"], axis="rows")
).reset_index(drop=True)
# Other needed information
edges = [place_bin_edges]
centers_shape = (place_bin_centers.size,)
return (
place_bin_centers[:, np.newaxis],
place_bin_edges[:, np.newaxis],
is_track_interior,
distance_between_nodes,
centers_shape,
edges,
track_graph_with_bin_centers_edges,
original_nodes_df,
place_bin_edges_nodes_df,
place_bin_centers_nodes_df,
nodes_df.reset_index(),
)
def get_track_boundary(
is_track_interior: np.ndarray, n_position_dims: int = 2, connectivity: int = 1
) -> np.ndarray:
"""Determines the boundary of the valid interior track bins. The boundary
are not bins on the track but surround it.
Parameters
----------
is_track_interior : np.ndarray, shape (n_bins_x, n_bins_y)
n_position_dims : int
connectivity : int
`connectivity` determines which elements of the output array belong
to the structure, i.e., are considered as neighbors of the central
element. Elements up to a squared distance of `connectivity` from
the center are considered neighbors. `connectivity` may range from 1
(no diagonal elements are neighbors) to `rank` (all elements are
neighbors).
Returns
-------
is_track_boundary : np.ndarray, shape (n_bins,)
"""
structure = ndimage.generate_binary_structure(
rank=n_position_dims, connectivity=connectivity
)
return (
ndimage.binary_dilation(is_track_interior, structure=structure)
^ is_track_interior
)
def order_boundary(boundary: np.ndarray) -> np.ndarray:
"""Given boundary bin centers, orders them in a way to make a continuous line.
https://stackoverflow.com/questions/37742358/sorting-points-to-form-a-continuous-line
Parameters
----------
boundary : np.ndarray, shape (n_boundary_points, n_position_dims)
Returns
-------
ordered_boundary : np.ndarray, shape (n_boundary_points, n_position_dims)
"""
n_points = boundary.shape[0]
clf = NearestNeighbors(n_neighbors=2).fit(boundary)
G = clf.kneighbors_graph()
T = nx.from_scipy_sparse_matrix(G)
paths = [list(nx.dfs_preorder_nodes(T, i)) for i in range(n_points)]
min_idx, min_dist = 0, np.inf
for idx, path in enumerate(paths):
ordered = boundary[path] # ordered nodes
cost = np.sum(np.diff(ordered) ** 2)
if cost < min_dist:
min_idx, min_dist = idx, cost
opt_order = paths[min_idx]
return boundary[opt_order][:-1]
def get_track_boundary_points(
is_track_interior: np.ndarray, edges: list[np.ndarray], connectivity: int = 1
):
"""
Parameters
----------
is_track_interior : np.ndarray, shape (n_x_bins, n_y_bins)
edges : list of ndarray
Returns
-------
boundary_points : np.ndarray, shape (n_boundary_points, n_position_dims)
"""
n_position_dims = len(edges)
boundary = get_track_boundary(
is_track_interior, n_position_dims=n_position_dims, connectivity=connectivity
)
inds = np.nonzero(boundary)
centers = [get_centers(x) for x in edges]
boundary = np.stack([center[ind] for center, ind in zip(centers, inds)], axis=1)
return order_boundary(boundary)
@njit
def diffuse(
position_grid: np.ndarray,
Fx: float,
Fy: float,
is_track_interior: np.ndarray,
is_track_boundary: np.ndarray,
) -> np.ndarray:
"""Calculates diffusion for a single time step given a track.
Parameters
----------
position_grid : np.ndarray, shape (n_bins_x, n_bins_y)
Function to diffusion
Fx : float
Diffusion coefficient x
Fy : float
Diffusion coefficient y
is_track_interior : np.ndarray, shape (n_bins_x, n_bins_y)
is_track_boundary : np.ndarray, shape (n_bins_x, n_bins_y)
Returns
-------
diffused_grid : np.ndarray, shape (n_bins_x, n_bins_y)
"""
# interior points
for x_ind, y_ind in zip(*np.nonzero(is_track_interior)):
# check if current point is on the boundary
if not is_track_boundary[x_ind, y_ind]:
# no flux boundary condition
if is_track_boundary[x_ind - 1, y_ind]:
position_grid[x_ind - 1, y_ind] = position_grid[x_ind, y_ind]
if is_track_boundary[x_ind + 1, y_ind]:
position_grid[x_ind + 1, y_ind] = position_grid[x_ind, y_ind]
if is_track_boundary[x_ind, y_ind - 1]:
position_grid[x_ind, y_ind - 1] = position_grid[x_ind, y_ind]
if is_track_boundary[x_ind, y_ind + 1]:
position_grid[x_ind, y_ind + 1] = position_grid[x_ind, y_ind]
position_grid[x_ind, y_ind] += Fx * (
position_grid[x_ind - 1, y_ind]
- 2.0 * position_grid[x_ind, y_ind]
+ position_grid[x_ind + 1, y_ind]
) + Fy * (
position_grid[x_ind, y_ind - 1]
- 2.0 * position_grid[x_ind, y_ind]
+ position_grid[x_ind, y_ind + 1]
)
return position_grid
@njit
def run_diffusion(
position_grid: np.ndarray,
is_track_interior: np.ndarray,
is_track_boundary: np.ndarray,
dx: float,
dy: float,
std: float = 6.0,
alpha: float = 0.5,
dt: float = 0.250,
) -> np.ndarray:
"""Calculates diffusion of a single point over time up until it matches a
Gaussian with standard deviation `std`.
Parameters
----------
position_grid : np.ndarray, shape (n_bins_x, n_bins_y)
Function to diffusion
is_track_interior : np.ndarray, shape (n_bins_x, n_bins_y)
Boolean that denotes which bins that are on the track
is_track_boundary : np.ndarray, shape (n_bins_x, n_bins_y)
Boolean that denotes which bins that are just outside the track
dx : float
Size of grid bins in x-direction
dy : float
Size of grid bins in y-direction
std : float
Standard deviation of the diffusion if it were Gaussian
alpha : float
Diffusion constant. Should be 0.5 if Gaussian diffusion.
dt : float
Time step size
Returns
-------
diffused_grid : np.ndarray, shape (n_bins_x, n_bins_y)
"""
Fx = alpha * (dt / dx**2)
Fy = alpha * (dt / dy**2)
T = std**2 / (2.0 * alpha)
n_time = int((T // dt) + 1)
for _ in range(n_time):
position_grid = diffuse(
position_grid, Fx, Fy, is_track_interior, is_track_boundary
)
return position_grid
@njit
def diffuse_each_bin(
is_track_interior: np.ndarray,
is_track_boundary: np.ndarray,
dx: float,
dy: float,
std: float = 6.0,
alpha: float = 0.5,
) -> np.ndarray:
"""
For each position bin in the grid, diffuse by `std`.
Parameters
----------
is_track_interior : np.ndarray, shape (n_bins_x, n_bins_y)
Boolean that denotes which bins that are on the track
is_track_boundary : np.ndarray, shape (n_bins_x, n_bins_y)
Boolean that denotes which bins that are just outside the track
dx : float
Size of grid bins in x-direction
dy : float
Size of grid bins in y-direction
std : float
Standard deviation of the diffusion if it were Gaussian
alpha : float
Diffusion constant. Should be 0.5 if Gaussian diffusion.
Returns
-------
diffused_grid : np.ndarray, shape (n_bins_x * n_bins_y, n_bins_x * n_bins_y)
For each bin in the grid, the diffusion of that bin
"""
x_inds, y_inds = np.nonzero(is_track_interior)
n_interior_bins = len(x_inds)
n_bins = is_track_interior.size
bins_shape = is_track_interior.shape
diffused_grid = np.zeros((n_bins, *bins_shape))
dt = 0.25 / (alpha / dx**2 + alpha / dy**2)
for ind in range(n_interior_bins):
# initial conditions
position_grid = np.zeros(bins_shape)
position_grid[x_inds[ind], y_inds[ind]] = 1.0
diffused_grid[x_inds[ind] + y_inds[ind] * bins_shape[0]] = run_diffusion(
position_grid,
is_track_interior,
is_track_boundary,
dx,
dy,
std=std,
alpha=alpha,
dt=dt,
)
return diffused_grid
def get_bin_ind(sample: np.ndarray, edges: list[np.ndarray]) -> np.ndarray:
"""Figure out which bin a given sample falls into.
Extracted from np.histogramdd.
Parameters
----------
sample : np.ndarray
edges : list of np.ndarray
Returns
-------
Ncount : np.ndarray
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side="right")
for i in range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in range(D):
# Find which points are on the rightmost edge.
on_edge = sample[:, i] == edges[i][-1]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
return Ncount | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/environments.py | 0.967656 | 0.531149 | environments.py | pypi |
from __future__ import annotations
from copy import deepcopy
from logging import getLogger
from typing import Optional, Union
import joblib
import numpy as np
import sklearn
import xarray as xr
from sklearn.base import BaseEstimator
from replay_trajectory_classification.continuous_state_transitions import (
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
)
from replay_trajectory_classification.core import (
_acausal_decode,
_acausal_decode_gpu,
_causal_decode,
_causal_decode_gpu,
atleast_2d,
get_centers,
mask,
scaled_likelihood,
)
from replay_trajectory_classification.environments import Environment
from replay_trajectory_classification.initial_conditions import UniformInitialConditions
from replay_trajectory_classification.likelihoods import (
_SORTED_SPIKES_ALGORITHMS,
_ClUSTERLESS_ALGORITHMS,
)
logger = getLogger(__name__)
sklearn.set_config(print_changed_only=False)
_DEFAULT_CLUSTERLESS_MODEL_KWARGS = {
"mark_std": 24.0,
"position_std": 6.0,
}
_DEFAULT_SORTED_SPIKES_MODEL_KWARGS = {
"position_std": 6.0,
"use_diffusion": False,
"block_size": None,
}
class _DecoderBase(BaseEstimator):
"""Base class for decoder objects.
Parameters
----------
environment : Environment, optional
The spatial environment to fit
transition_type : EmpiricalMovement | RandomWalk | RandomWalkDirection1 | RandomWalkDirection2 | Uniform
The continuous state transition matrix
initial_conditions_type : UniformInitialConditions, optional
The initial conditions class instance
infer_track_interior : bool, optional
Whether to infer the spatial geometry of track from position
"""
def __init__(
self,
environment: Environment = Environment(environment_name=""),
transition_type: Union[
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
] = RandomWalk(),
initial_conditions_type: UniformInitialConditions = UniformInitialConditions(),
infer_track_interior: bool = True,
):
self.environment = environment
self.transition_type = transition_type
self.initial_conditions_type = initial_conditions_type
self.infer_track_interior = infer_track_interior
def fit_environment(self, position: np.ndarray) -> None:
"""Discretize the spatial environment into bins. Determine valid track positions.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
Position of the animal in the environment.
"""
self.environment.fit_place_grid(
position, infer_track_interior=self.infer_track_interior
)
def fit_initial_conditions(self):
"""Set the initial probability of position."""
logger.info("Fitting initial conditions...")
self.initial_conditions_ = self.initial_conditions_type.make_initial_conditions(
[self.environment], [self.environment.environment_name]
)[0]
def fit_state_transition(
self,
position: np.ndarray,
is_training: Optional[np.ndarray] = None,
transition_type: Optional[
Union[
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
]
] = None,
):
logger.info("Fitting state transition...")
if transition_type is not None:
self.transition_type = transition_type
if isinstance(self.transition_type, EmpiricalMovement):
if is_training is None:
is_training = np.ones((position.shape[0],), dtype=bool)
is_training = np.asarray(is_training).squeeze()
n_time = position.shape[0]
encoding_group_labels = np.zeros((n_time,), dtype=np.int32)
self.state_transition_ = self.transition_type.make_state_transition(
(self.environment,),
position,
is_training,
encoding_group_labels,
environment_labels=None,
)
else:
self.state_transition_ = self.transition_type.make_state_transition(
(self.environment,)
)
def fit(self):
"""To be implemented by inheriting class"""
raise NotImplementedError
def predict(self):
"""To be implemented by inheriting class"""
raise NotImplementedError
def save_model(self, filename="model.pkl"):
"""Save the classifier to a pickled file.
Parameters
----------
filename : str, optional
"""
joblib.dump(self, filename)
@staticmethod
def load_model(filename="model.pkl"):
"""Load the classifier from a file.
Parameters
----------
filename : str, optional
Returns
-------
classifier instance
"""
return joblib.load(filename)
def copy(self):
"""Makes a copy of the classifier"""
return deepcopy(self)
def project_1D_position_to_2D(
self, results: xr.Dataset, posterior_type="acausal_posterior"
) -> np.ndarray:
"""Project the 1D most probable position into the 2D track graph space.
Only works for single environment.
Parameters
----------
results : xr.Dataset
posterior_type : causal_posterior | acausal_posterior | likelihood
Returns
-------
map_position2D : np.ndarray
"""
map_position_ind = (
results[posterior_type].sum("state").argmax("position").to_numpy().squeeze()
)
return self.environment.place_bin_centers_nodes_df_.iloc[map_position_ind][
["x_position", "y_position"]
].to_numpy()
def _get_results(
self,
results: dict,
n_time: int,
time: Optional[np.ndarray] = None,
is_compute_acausal: bool = True,
use_gpu: bool = False,
):
"""Converts the results dict into a collection of labeled arrays.
Parameters
----------
results : dict
n_time : int
time : np.ndarray
is_compute_acausal : bool
use_gpu : bool
Returns
-------
results : xr.Dataset
"""
is_track_interior = self.environment.is_track_interior_.ravel(order="F")
n_position_bins = is_track_interior.shape[0]
st_interior_ind = np.ix_(is_track_interior, is_track_interior)
logger.info("Estimating causal posterior...")
if not use_gpu:
results["causal_posterior"] = np.full(
(n_time, n_position_bins), np.nan, dtype=float
)
(
results["causal_posterior"][:, is_track_interior],
data_log_likelihood,
) = _causal_decode(
self.initial_conditions_[is_track_interior].astype(float),
self.state_transition_[st_interior_ind].astype(float),
results["likelihood"][:, is_track_interior].astype(float),
)
else:
results["causal_posterior"] = np.full(
(n_time, n_position_bins), np.nan, dtype=np.float32
)
(
results["causal_posterior"][:, is_track_interior],
data_log_likelihood,
) = _causal_decode_gpu(
self.initial_conditions_[is_track_interior],
self.state_transition_[st_interior_ind],
results["likelihood"][:, is_track_interior],
)
if is_compute_acausal:
logger.info("Estimating acausal posterior...")
if not use_gpu:
results["acausal_posterior"] = np.full(
(n_time, n_position_bins, 1), np.nan, dtype=float
)
results["acausal_posterior"][:, is_track_interior] = _acausal_decode(
results["causal_posterior"][
:, is_track_interior, np.newaxis
].astype(float),
self.state_transition_[st_interior_ind].astype(float),
)
else:
results["acausal_posterior"] = np.full(
(n_time, n_position_bins, 1), np.nan, dtype=np.float32
)
results["acausal_posterior"][
:, is_track_interior
] = _acausal_decode_gpu(
results["causal_posterior"][:, is_track_interior, np.newaxis],
self.state_transition_[st_interior_ind],
)
if time is None:
time = np.arange(n_time)
return self.convert_results_to_xarray(results, time, data_log_likelihood)
def convert_results_to_xarray(
self, results: dict, time: np.ndarray, data_log_likelihood: float
) -> xr.Dataset:
"""Converts the results dict into a collection of labeled arrays.
Parameters
----------
results : dict
time : np.ndarray
data_log_likelihood : float
Returns
-------
results : xr.Dataset
"""
n_position_dims = self.environment.place_bin_centers_.shape[1]
n_time = time.shape[0]
attrs = {"data_log_likelihood": data_log_likelihood}
if n_position_dims > 1:
dims = ["time", "x_position", "y_position"]
coords = dict(
time=time,
x_position=get_centers(self.environment.edges_[0]),
y_position=get_centers(self.environment.edges_[1]),
)
else:
dims = ["time", "position"]
coords = dict(
time=time,
position=get_centers(self.environment.edges_[0]),
)
new_shape = (n_time, *self.environment.centers_shape_)
is_track_interior = self.environment.is_track_interior_.ravel(order="F")
try:
results = xr.Dataset(
{
key: (
dims,
mask(value, is_track_interior)
.reshape(new_shape)
.swapaxes(-1, -2),
)
for key, value in results.items()
},
coords=coords,
attrs=attrs,
)
except ValueError:
results = xr.Dataset(
{
key: (dims, mask(value, is_track_interior).reshape(new_shape))
for key, value in results.items()
},
coords=coords,
attrs=attrs,
)
return results
class SortedSpikesDecoder(_DecoderBase):
def __init__(
self,
environment: Environment = Environment(environment_name=""),
transition_type: Union[
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
] = RandomWalk(),
initial_conditions_type: UniformInitialConditions = UniformInitialConditions(),
infer_track_interior: bool = True,
sorted_spikes_algorithm: str = "spiking_likelihood_kde",
sorted_spikes_algorithm_params: dict = _DEFAULT_SORTED_SPIKES_MODEL_KWARGS,
):
"""Decodes neural population representation of position from clustered cells.
Parameters
----------
environment : Environment instance, optional
The spatial environment to fit
transition_type : transition matrix instance, optional
Movement model for the continuous state
discrete_transition_type : discrete transition instance, optional
initial_conditions_type : initial conditions instance, optional
The initial conditions class instance
infer_track_interior : bool, optional
Whether to infer the spatial geometry of track from position
sorted_spikes_algorithm : str, optional
The type of algorithm. See _SORTED_SPIKES_ALGORITHMS for keys
sorted_spikes_algorithm_params : dict, optional
Parameters for the algorithm.
"""
super().__init__(
environment, transition_type, initial_conditions_type, infer_track_interior
)
self.sorted_spikes_algorithm = sorted_spikes_algorithm
self.sorted_spikes_algorithm_params = sorted_spikes_algorithm_params
def fit_place_fields(
self,
position: np.ndarray,
spikes: np.ndarray,
is_training: Optional[np.ndarray] = None,
):
"""Fits the place intensity function.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
Position of the animal.
spikes : np.ndarray, (n_time, n_neurons)
Binary indicator of whether there was a spike in a given time bin for a given neuron.
is_training : np.ndarray, shape (n_time,), optional
Boolean array to indicate which data should be included in fitting of place fields, by default None
"""
logger.info("Fitting place fields...")
if is_training is None:
is_training = np.ones((position.shape[0],), dtype=bool)
is_training = np.asarray(is_training).squeeze()
kwargs = self.sorted_spikes_algorithm_params
if kwargs is None:
kwargs = {}
self.place_fields_ = _SORTED_SPIKES_ALGORITHMS[self.sorted_spikes_algorithm][0](
position[is_training],
spikes[is_training],
place_bin_centers=self.environment.place_bin_centers_,
place_bin_edges=self.environment.place_bin_edges_,
edges=self.environment.edges_,
is_track_interior=self.environment.is_track_interior_,
is_track_boundary=self.environment.is_track_boundary_,
**kwargs,
)
def plot_place_fields(
self, sampling_frequency: int = 1, col_wrap: int = 5
) -> xr.plot.FacetGrid:
"""Plots the fitted place fields for each neuron.
Parameters
----------
sampling_frequency : int, optional
Number of samples per second
col_wrap : int, optional
Number of columns in the subplot.
Returns
-------
g : xr.plot.FacetGrid instance
"""
try:
g = (
self.place_fields_.unstack("position").where(
self.environment.is_track_interior_
)
* sampling_frequency
).plot(x="x_position", y="y_position", col="neuron", col_wrap=col_wrap)
except ValueError:
g = (self.place_fields_ * sampling_frequency).plot(
x="position", col="neuron", col_wrap=col_wrap
)
return g
def fit(
self,
position: np.ndarray,
spikes: np.ndarray,
is_training: Optional[np.ndarray] = None,
):
"""Fit the spatial grid, initial conditions, place field model, and
transition matrix.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
Position of the animal.
spikes : np.ndarray, shape (n_time, n_neurons)
Binary indicator of whether there was a spike in a given time bin for a given neuron.
is_training : None or np.ndarray, shape (n_time), optional
Boolean array to indicate which data should be included in fitting of place fields, by default None
Returns
-------
self
"""
position = atleast_2d(np.asarray(position))
spikes = np.asarray(spikes)
self.fit_environment(position)
self.fit_initial_conditions()
self.fit_state_transition(
position, is_training, transition_type=self.transition_type
)
self.fit_place_fields(position, spikes, is_training)
return self
def predict(
self,
spikes: np.ndarray,
time: Optional[np.ndarray] = None,
is_compute_acausal: bool = True,
use_gpu: bool = False,
) -> xr.Dataset:
"""Predict the probability of spatial position from the spikes.
Parameters
----------
spikes : np.ndarray, shape (n_time, n_neurons)
Binary indicator of whether there was a spike in a given time bin for a given neuron.
time : np.ndarray or None, shape (n_time,), optional
Label the time axis with these values.
is_compute_acausal : bool, optional
If True, compute the acausal posterior.
use_gpu : bool, optional
Use GPU for the state space part of the model, not the likelihood.
Returns
-------
results : xarray.Dataset
"""
spikes = np.asarray(spikes)
n_time = spikes.shape[0]
logger.info("Estimating likelihood...")
results = {}
results["likelihood"] = scaled_likelihood(
_SORTED_SPIKES_ALGORITHMS[self.sorted_spikes_algorithm][1](
spikes, np.asarray(self.place_fields_)
)
)
return self._get_results(results, n_time, time, is_compute_acausal, use_gpu)
class ClusterlessDecoder(_DecoderBase):
"""Classifies neural population representation of position from multiunit spikes and waveforms.
Parameters
----------
environment : Environment, optional
The spatial environment to fit
transition_type : EmpiricalMovement | RandomWalk | RandomWalkDirection1 | RandomWalkDirection2 | Uniform
The continuous state transition matrix
initial_conditions_type : UniformInitialConditions, optional
The initial conditions class instance
infer_track_interior : bool, optional
Whether to infer the spatial geometry of track from position
clusterless_algorithm : str
The type of clusterless algorithm. See _ClUSTERLESS_ALGORITHMS for keys
clusterless_algorithm_params : dict
Parameters for the clusterless algorithms.
"""
def __init__(
self,
environment: Environment = Environment(environment_name=""),
transition_type: Union[
EmpiricalMovement,
RandomWalk,
RandomWalkDirection1,
RandomWalkDirection2,
Uniform,
] = RandomWalk(),
initial_conditions_type: UniformInitialConditions = UniformInitialConditions(),
infer_track_interior: bool = True,
clusterless_algorithm: str = "multiunit_likelihood",
clusterless_algorithm_params: dict = _DEFAULT_CLUSTERLESS_MODEL_KWARGS,
):
super().__init__(
environment, transition_type, initial_conditions_type, infer_track_interior
)
self.clusterless_algorithm = clusterless_algorithm
self.clusterless_algorithm_params = clusterless_algorithm_params
def fit_multiunits(
self,
position: np.ndarray,
multiunits: np.ndarray,
is_training: Optional[np.ndarray] = None,
):
"""
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
multiunits : np.ndarray, shape (n_time, n_marks, n_electrodes)
is_training : None or array_like, shape (n_time,)
"""
logger.info("Fitting multiunits...")
if is_training is None:
is_training = np.ones((position.shape[0],), dtype=bool)
is_training = np.asarray(is_training).squeeze()
kwargs = self.clusterless_algorithm_params
if kwargs is None:
kwargs = {}
self.encoding_model_ = _ClUSTERLESS_ALGORITHMS[self.clusterless_algorithm][0](
position=position[is_training],
multiunits=multiunits[is_training],
place_bin_centers=self.environment.place_bin_centers_,
is_track_interior=self.environment.is_track_interior_.ravel(order="F"),
**kwargs,
)
def fit(
self,
position: np.ndarray,
multiunits: np.ndarray,
is_training: Optional[np.ndarray] = None,
):
"""Fit the spatial grid, initial conditions, place field model, and
transition matrices.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
Position of the animal.
multiunits : np.ndarray, shape (n_time, n_marks, n_electrodes)
Array where spikes are indicated by non-Nan values that correspond to the waveform features
for each electrode.
is_training : None or np.ndarray, shape (n_time), optional
Boolean array to indicate which data should be included in fitting of place fields, by default None
Returns
-------
self
"""
position = atleast_2d(np.asarray(position))
multiunits = np.asarray(multiunits)
self.fit_environment(position)
self.fit_initial_conditions()
self.fit_state_transition(
position, is_training, transition_type=self.transition_type
)
self.fit_multiunits(position, multiunits, is_training)
return self
def predict(
self,
multiunits: np.ndarray,
time: Optional[np.ndarray] = None,
is_compute_acausal: bool = True,
use_gpu: bool = False,
) -> xr.Dataset:
"""Predict the probability of spatial position and category from the multiunit spikes and waveforms.
Parameters
----------
multiunits : np.ndarray, shape (n_time, n_marks, n_electrodes)
Array where spikes are indicated by non-Nan values that correspond to the waveform features
for each electrode.
time : np.ndarray or None, shape (n_time,), optional
Label the time axis with these values.
is_compute_acausal : bool, optional
If True, compute the acausal posterior.
use_gpu : bool, optional
Use GPU for the state space part of the model, not the likelihood.
Returns
-------
results : xarray.Dataset
"""
multiunits = np.asarray(multiunits)
is_track_interior = self.environment.is_track_interior_.ravel(order="F")
n_time = multiunits.shape[0]
logger.info("Estimating likelihood...")
results = {}
results["likelihood"] = scaled_likelihood(
_ClUSTERLESS_ALGORITHMS[self.clusterless_algorithm][1](
multiunits=multiunits,
place_bin_centers=self.environment.place_bin_centers_,
is_track_interior=is_track_interior,
**self.encoding_model_,
)
)
return self._get_results(results, n_time, time, is_compute_acausal, use_gpu) | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/decoder.py | 0.960305 | 0.321833 | decoder.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
import numpy as np
import xarray as xr
@dataclass
class DiagonalDiscrete:
"""Transition matrix with `diagonal_value` on the value for n_states
Off-diagonals are probability: (1 - `diagonal_value`) / (`n_states` - 1)
Attributes
----------
diagonal_value : float, optional
"""
diagonal_value: float = 0.98
def make_state_transition(self, n_states: int) -> np.ndarray:
"""Makes discrete state transition matrix.
Parameters
----------
n_states : int
Returns
-------
discrete_state_transition : np.ndarray, shape (n_states, n_states)
"""
strong_diagonal = np.identity(n_states) * self.diagonal_value
is_off_diag = ~np.identity(n_states, dtype=bool)
strong_diagonal[is_off_diag] = (1 - self.diagonal_value) / (n_states - 1)
self.state_transition_ = strong_diagonal
return self.state_transition_
@dataclass
class UniformDiscrete:
"""All transitions to states (including self transitions) are the same
probability.
"""
def make_state_transition(self, n_states: int) -> np.ndarray:
"""Makes discrete state transition matrix.
Parameters
----------
n_states : int
Returns
-------
discrete_state_transition : np.ndarray, shape (n_states, n_states)
"""
self.state_transition_ = np.ones((n_states, n_states)) / n_states
return self.state_transition_
@dataclass
class RandomDiscrete:
"""All state transitions are random"""
def make_state_transition(self, n_states: int) -> np.ndarray:
"""Makes discrete state transition matrix.
Parameters
----------
n_states : int
Returns
-------
discrete_state_transition : np.ndarray, shape (n_states, n_states)
"""
state_transition = np.random.random_sample(n_states, n_states)
state_transition /= state_transition.sum(axis=1, keepdims=True)
self.state_transition_ = state_transition
return self.state_transition_
@dataclass
class UserDefinedDiscrete:
"""State transitions are provided by user.
Attributes
----------
state_transition : np.ndarray, shape (n_states, n_states)
"""
state_transition_: np.ndarray
def make_state_transition(self, n_states: int) -> np.ndarray:
"""Makes discrete state transition matrix.
Parameters
----------
n_states : int
Returns
-------
discrete_state_transition : np.ndarray, shape (n_states, n_states)
"""
return self.state_transition_
def expected_duration(
discrete_state_transition: np.ndarray, sampling_frequency: int = 1
):
"""The average duration of each discrete state if it follows
a geometric distribution.
Use `sampling_frequency` to convert duration to seconds. Time is in
number of samples by default.
Parameters
----------
discrete_state_transition : np.ndarray, shape (n_states, n_states)
sampling_frequency : int, optional
Returns
-------
duration : np.ndarray, shape (n_states)
"""
self_transitions = np.diag(discrete_state_transition)
return (1 / (1 - self_transitions)) / sampling_frequency
def estimate_discrete_state_transition(
classifier,
results: xr.Dataset,
) -> np.ndarray:
"""Estimate a new discrete transition matrix given the old one and updated smoother results.
Parameters
----------
classifier : ClusterlessClassifier or SortedSpikesClassifier instance
results : xr.Dataset
Returns
-------
new_transition_matrix : np.ndarray, shape (n_states, n_states)
"""
likelihood = results.likelihood.sum("position").values
causal_prob = results.causal_posterior.sum("position").values
acausal_prob = results.acausal_posterior.sum("position").values
transition_matrix = classifier.discrete_state_transition_
n_time, n_states = causal_prob.shape
# probability of state 1 in time t and state 2 in time t+1
xi = np.zeros((n_time - 1, n_states, n_states))
for from_state in range(n_states):
for to_state in range(n_states):
xi[:, from_state, to_state] = (
causal_prob[:-1, from_state]
* likelihood[1:, to_state]
* acausal_prob[1:, to_state]
* transition_matrix[from_state, to_state]
/ (causal_prob[1:, to_state] + np.spacing(1))
)
xi = xi / xi.sum(axis=(1, 2), keepdims=True)
summed_xi = xi.sum(axis=0)
new_transition_matrix = summed_xi / summed_xi.sum(axis=1, keepdims=True)
return new_transition_matrix | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/discrete_state_transitions.py | 0.972204 | 0.810141 | discrete_state_transitions.py | pypi |
from __future__ import annotations
import numpy as np
from replay_trajectory_classification.simulate import (
get_trajectory_direction,
simulate_position,
simulate_multiunit_with_place_fields,
simulate_time,
)
SAMPLING_FREQUENCY = 1000
TRACK_HEIGHT = 175
RUNNING_SPEED = 15
PLACE_FIELD_VARIANCE = 6.0**2
PLACE_FIELD_MEANS = np.arange(0, 200, 10)
N_RUNS = 15
REPLAY_SPEEDUP = 120.0
N_TETRODES = 5
N_FEATURES = 4
MARK_SPACING = 5
def make_simulated_run_data(
sampling_frequency: int = SAMPLING_FREQUENCY,
track_height: float = TRACK_HEIGHT,
running_speed: float = RUNNING_SPEED,
n_runs: int = N_RUNS,
place_field_variance: float = PLACE_FIELD_VARIANCE,
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
n_tetrodes: int = N_TETRODES,
make_inbound_outbound_neurons: bool = False,
):
"""Make simulated data of a rat running back and forth
on a linear maze with unclustered spikes.
Parameters
----------
sampling_frequency : int, optional
track_height : float, optional
Height of the simulated track
running_speed : float, optional
Speed of the simulated animal
n_runs : int, optional
Number of runs across the track the simulated animal will perform
place_field_variance : float, optional
Spatial extent of place fields
place_field_means : np.ndarray, shape (n_neurons,), optional
Location of the center of the Gaussian place fields.
n_tetrodes : int, optional
Total number of tetrodes to simulate
make_inbound_outbound_neurons : bool, optional
Create neurons with directional place fields
Returns
-------
time : np.ndarray, shape (n_time,)
position : np.ndarray, shape (n_time,)
sampling_frequency : float
multiunits : np.ndarray, shape (n_time, n_features, n_electrodes)
multiunits_spikes : np.ndarray (n_time, n_electrodes)
place_field_means : np.ndarray (n_tetrodes, n_place_fields)
"""
n_samples = int(n_runs * sampling_frequency * 2 * track_height / running_speed)
time = simulate_time(n_samples, sampling_frequency)
position = simulate_position(time, track_height, running_speed)
multiunits = []
if not make_inbound_outbound_neurons:
for place_means in place_field_means.reshape(((n_tetrodes, -1))):
multiunits.append(
simulate_multiunit_with_place_fields(
place_means,
position,
mark_spacing=10,
n_mark_dims=4,
sampling_frequency=sampling_frequency,
)
)
else:
trajectory_direction = get_trajectory_direction(position)
for direction in np.unique(trajectory_direction):
is_condition = trajectory_direction == direction
for place_means in place_field_means.reshape(((n_tetrodes, -1))):
multiunits.append(
simulate_multiunit_with_place_fields(
place_means,
position,
mark_spacing=10,
n_mark_dims=4,
sampling_frequency=sampling_frequency,
is_condition=is_condition,
)
)
multiunits = np.stack(multiunits, axis=-1)
multiunits_spikes = np.any(~np.isnan(multiunits), axis=1)
return (time, position, sampling_frequency, multiunits, multiunits_spikes)
def make_continuous_replay(
sampling_frequency: int = SAMPLING_FREQUENCY,
track_height: float = TRACK_HEIGHT,
running_speed: float = RUNNING_SPEED,
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
replay_speedup: int = REPLAY_SPEEDUP,
n_tetrodes: int = N_TETRODES,
n_features: int = N_FEATURES,
mark_spacing: float = MARK_SPACING,
) -> tuple[np.ndarray, np.ndarray]:
"""Creates a simulated continuous replay.
Parameters
----------
sampling_frequency : int, optional
Samples per second
track_height : float, optional
Height of the simulated track
running_speed : float, optional
Simualted speed of the animal
place_field_means : np.ndarray, optional
Location of the center of the Gaussian place fields.
replay_speedup : int, optional
Number of times faster the replay event is faster than the running speed
n_tetrodes : int, optional
Number of simulated tetrodes
n_features : int, optional
Number of simulated features
mark_spacing : float, optional
Spacing between Gaussian mark features
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_multiunits : np.ndarray, shape (n_time, n_features, n_tetrodes)
Binned clusterless spike times and features. NaN indicates no spike. Non-Nan indicates spike.
"""
replay_speed = running_speed * replay_speedup
n_samples = int(0.5 * sampling_frequency * 2 * track_height / replay_speed)
replay_time = simulate_time(n_samples, sampling_frequency)
true_replay_position = simulate_position(replay_time, track_height, replay_speed)
place_field_means = place_field_means.reshape(((n_tetrodes, -1)))
min_times_ind = np.argmin(
np.abs(true_replay_position[:, np.newaxis] - place_field_means.ravel()), axis=0
)
tetrode_ind = (
np.ones_like(place_field_means) * np.arange(5)[:, np.newaxis]
).ravel()
test_multiunits = np.full((replay_time.size, n_features, n_tetrodes), np.nan)
n_neurons = place_field_means.shape[1]
mark_centers = np.arange(0, n_neurons * mark_spacing, mark_spacing)
mark_ind = (np.ones_like(place_field_means) * np.arange(4)).ravel()
for i in range(n_features):
test_multiunits[(min_times_ind, i, tetrode_ind)] = mark_centers[mark_ind]
return replay_time, test_multiunits
def make_hover_replay(
hover_neuron_ind: int = None,
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
sampling_frequency: int = SAMPLING_FREQUENCY,
n_tetrodes: int = N_TETRODES,
n_features: int = N_FEATURES,
mark_spacing: float = MARK_SPACING,
) -> tuple[np.ndarray, np.ndarray]:
"""Creates a simulated stationary replay.
Parameters
----------
hover_neuron_ind : int, optional
Index of which neuron is the stationary neuron.
place_field_means : np.ndarray, optional
Location of the center of the Gaussian place fields.
sampling_frequency : int, optional
Samples per second
n_tetrodes : int, optional
Number of simulated tetrodes
n_features : int, optional
Number of simulated features
mark_spacing : float, optional
Spacing between Gaussian mark features
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_multiunits : np.ndarray, shape (n_time, n_features, n_tetrodes)
Binned clusterless spike times and features. NaN indicates no spike. Non-Nan indicates spike.
"""
place_field_means = place_field_means.reshape(((n_tetrodes, -1)))
if hover_neuron_ind is None:
hover_neuron_ind = place_field_means.size // 2
tetrode_ind, neuron_ind = np.unravel_index(
hover_neuron_ind, place_field_means.shape
)
N_TIME = 50
replay_time = np.arange(N_TIME) / sampling_frequency
spike_time_ind = np.arange(0, N_TIME, 2)
test_multiunits = np.full((replay_time.size, n_features, n_tetrodes), np.nan)
n_neurons = place_field_means.shape[1]
mark_centers = np.arange(0, n_neurons * mark_spacing, mark_spacing)
test_multiunits[spike_time_ind, :, tetrode_ind] = mark_centers[neuron_ind]
return replay_time, test_multiunits
def make_fragmented_replay(
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
sampling_frequency: int = SAMPLING_FREQUENCY,
n_tetrodes: int = N_TETRODES,
n_features: int = N_FEATURES,
mark_spacing: float = MARK_SPACING,
) -> tuple[np.ndarray, np.ndarray]:
"""Creates a simulated fragmented replay.
Parameters
----------
place_field_means : np.ndarray, optional
Location of the center of the Gaussian place fields.
sampling_frequency : int, optional
Samples per second
n_tetrodes : int, optional
Number of simulated tetrodes
n_features : int, optional
Number of simulated features
mark_spacing : float, optional
Spacing between Gaussian mark features
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_multiunits : np.ndarray, shape (n_time, n_features, n_tetrodes)
Binned clusterless spike times and features. NaN indicates no spike. Non-Nan indicates spike.
"""
N_TIME = 10
place_field_means = place_field_means.reshape(((n_tetrodes, -1)))
replay_time = np.arange(N_TIME) / sampling_frequency
n_total_neurons = place_field_means.size
neuron_inds = [1, n_total_neurons - 1, 10, n_total_neurons - 5, 8]
neuron_inds = np.unravel_index(neuron_inds, place_field_means.shape)
spike_time_ind = [1, 3, 5, 7, 9]
test_multiunits = np.full((replay_time.size, n_features, n_tetrodes), np.nan)
n_neurons = place_field_means.shape[1]
mark_centers = np.arange(0, n_neurons * mark_spacing, mark_spacing)
for t_ind, tetrode_ind, neuron_ind in zip(spike_time_ind, *neuron_inds):
test_multiunits[t_ind, :, tetrode_ind] = mark_centers[neuron_ind]
return replay_time, test_multiunits
def make_hover_continuous_hover_replay(
sampling_frequency: int = SAMPLING_FREQUENCY,
place_field_means: np.ndarray = PLACE_FIELD_MEANS,
) -> tuple[np.ndarray, np.ndarray]:
"""Make a simulated replay that first is stationary, then is continuous, then is stationary again.
Parameters
----------
sampling_frequency : int, optional
Samples per second
place_field_means : np.ndarray, optional
Location of the center of the Gaussian place fields.
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_multiunits : np.ndarray, shape (n_time, n_features, n_tetrodes)
Binned clusterless spike times and features. NaN indicates no spike. Non-Nan indicates spike.
"""
_, test_multiunits1 = make_hover_replay(hover_neuron_ind=0)
_, test_multiunits2 = make_continuous_replay()
n_total_neurons = place_field_means.size
_, test_multiunits3 = make_hover_replay(hover_neuron_ind=n_total_neurons - 1)
test_multiunits = np.concatenate(
(test_multiunits1, test_multiunits2, test_multiunits3)
)
replay_time = np.arange(test_multiunits.shape[0]) / sampling_frequency
return replay_time, test_multiunits
def make_fragmented_hover_fragmented_replay(
sampling_frequency: int = SAMPLING_FREQUENCY,
) -> tuple[np.ndarray, np.ndarray]:
"""Makes a simulated replay that first is fragmented, then is stationary, then is fragmented again.
Parameters
----------
sampling_frequency : int, optional
Samples per second
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_multiunits : np.ndarray, shape (n_time, n_features, n_tetrodes)
Binned clusterless spike times and features. NaN indicates no spike. Non-Nan indicates spike.
"""
_, test_multiunits1 = make_fragmented_replay()
_, test_multiunits2 = make_hover_replay(hover_neuron_ind=6)
_, test_multiunits3 = make_fragmented_replay()
test_multiunits = np.concatenate(
(test_multiunits1, test_multiunits2, test_multiunits3)
)
replay_time = np.arange(test_multiunits.shape[0]) / sampling_frequency
return replay_time, test_multiunits
def make_fragmented_continuous_fragmented_replay(
sampling_frequency: int = SAMPLING_FREQUENCY,
) -> tuple[np.ndarray, np.ndarray]:
"""Makes a simulated replay that first is fragmented, then is continuous, then is fragmented again.
Parameters
----------
sampling_frequency : int, optional
Samples per second
Returns
-------
replay_time : np.ndarray, shape (n_time,)
Time in seconds.
test_multiunits : np.ndarray, shape (n_time, n_features, n_tetrodes)
Binned clusterless spike times and features. NaN indicates no spike. Non-Nan indicates spike.
"""
_, test_multiunits1 = make_fragmented_replay()
_, test_multiunits2 = make_continuous_replay()
_, test_multiunits3 = make_fragmented_replay()
test_multiunits = np.concatenate(
(test_multiunits1, test_multiunits2, test_multiunits3)
)
replay_time = np.arange(test_multiunits.shape[0]) / sampling_frequency
return replay_time, test_multiunits | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/clusterless_simulation.py | 0.932752 | 0.58883 | clusterless_simulation.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from replay_trajectory_classification.environments import Environment
import numpy as np
@dataclass
class UniformInitialConditions:
"""Initial conditions where all discrete states and position bins are
equally likely."""
def make_initial_conditions(
self, environments: tuple[Environment], environment_names_to_state: tuple[str]
) -> list[np.ndarray]:
"""Creates initial conditions array
Parameters
----------
environments : tuple[Environment]
Spatial environments in the model
environment_names_to_state : tuple[str]
Mapping of environment names to state
Returns
-------
initial_conditions : list of arrays
"""
n_total_place_bins = 0
initial_conditions = []
for environment_name in environment_names_to_state:
is_track_interior = environments[
environments.index(environment_name)
].is_track_interior_.ravel(order="F")
n_total_place_bins += is_track_interior.sum()
initial_conditions.append(is_track_interior)
return [ic / n_total_place_bins for ic in initial_conditions]
@dataclass
class UniformOneEnvironmentInitialConditions:
"""Initial conditions where all position bins are
equally likely for one environment and zero for other environments."""
environment_name: str = ""
def make_initial_conditions(
self, environments: tuple[Environment], environment_names_to_state: tuple[str]
) -> list[np.ndarray]:
"""Creates initial conditions array
Parameters
----------
environments : tuple[Environment]
Spatial environments in the model
environment_names_to_state : tuple[str]
Mapping of environment names to state
Returns
-------
initial_conditions : list of arrays
"""
n_total_place_bins = 0
initial_conditions = []
for environment_name in environment_names_to_state:
is_track_interior = environments[
environments.index(environment_name)
].is_track_interior_.ravel(order="F")
if self.environment_name == environment_name:
initial_conditions.append(is_track_interior)
n_total_place_bins += is_track_interior.sum()
else:
initial_conditions.append(np.zeros_like(is_track_interior))
return [ic / n_total_place_bins for ic in initial_conditions] | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/initial_conditions.py | 0.934634 | 0.516595 | initial_conditions.py | pypi |
from __future__ import annotations
import numpy as np
from replay_trajectory_classification.environments import diffuse_each_bin, get_bin_ind
from typing import Optional
def estimate_diffusion_position_distance(
positions: np.ndarray,
edges: np.ndarray,
is_track_interior: Optional[np.ndarray] = None,
is_track_boundary: Optional[np.ndarray] = None,
position_std: float = 3.0,
bin_distances: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Estimates a distance between a given position and all position bins
using a diffusion.
Parameters
----------
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : float
Returns
-------
position_distance : np.ndarray, shape (n_time, n_position_bins)
"""
if bin_distances is None:
n_time = positions.shape[0]
dx = edges[0][1] - edges[0][0]
dy = edges[1][1] - edges[1][0]
bin_distances = diffuse_each_bin(
is_track_interior,
is_track_boundary,
dx,
dy,
std=position_std,
).reshape((n_time, -1), order="F")
bin_ind = get_bin_ind(positions, [edge[1:-1] for edge in edges])
linear_ind = np.ravel_multi_index(
bin_ind, [len(edge) - 1 for edge in edges], order="F"
)
return bin_distances[linear_ind]
def estimate_diffusion_position_density(
positions: np.ndarray,
edges: np.ndarray,
is_track_interior: Optional[np.ndarray] = None,
is_track_boundary: Optional[np.ndarray] = None,
position_std: float = 3.0,
bin_distances: Optional[np.ndarray] = None,
block_size: Optional[int] = 100,
) -> np.ndarray:
"""Kernel density estimate over all position bins using diffusion.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : float
Returns
-------
position_density : np.ndarray, shape (n_position_bins,)
"""
n_time = positions.shape[0]
if block_size is None:
block_size = n_time
return np.mean(
estimate_diffusion_position_distance(
positions,
edges,
is_track_interior=is_track_interior,
is_track_boundary=is_track_boundary,
position_std=position_std,
bin_distances=bin_distances,
),
axis=0,
) | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/diffusion.py | 0.968216 | 0.518363 | diffusion.py | pypi |
from __future__ import annotations
from typing import Optional, Union
import numpy as np
from tqdm.autonotebook import tqdm
from replay_trajectory_classification.core import atleast_2d
from replay_trajectory_classification.likelihoods.diffusion import (
diffuse_each_bin,
estimate_diffusion_position_density,
estimate_diffusion_position_distance,
)
try:
import cupy as cp
@cp.fuse
def gaussian_pdf(x: cp.ndarray, mean: cp.ndarray, sigma: cp.ndarray) -> cp.ndarray:
"""Compute the value of a Gaussian probability density function at x with
given mean and sigma."""
return cp.exp(-0.5 * ((x - mean) / sigma) ** 2) / (sigma * cp.sqrt(2.0 * cp.pi))
def estimate_position_distance(
place_bin_centers: cp.ndarray, positions: cp.ndarray, position_std: cp.ndarray
) -> cp.ndarray:
"""Estimates the Euclidean distance between positions and position bins.
Parameters
----------
place_bin_centers : cp.ndarray, shape (n_position_bins, n_position_dims)
positions : cp.ndarray, shape (n_time, n_position_dims)
position_std : cp.ndarray, shape (n_position_dims,)
Returns
-------
position_distance : cp.ndarray, shape (n_time, n_position_bins)
"""
n_time, n_position_dims = positions.shape
n_position_bins = place_bin_centers.shape[0]
if isinstance(position_std, (int, float)):
position_std = [position_std] * n_position_dims
position_distance = cp.ones((n_time, n_position_bins), dtype=cp.float32)
for position_ind, std in enumerate(position_std):
position_distance *= gaussian_pdf(
cp.expand_dims(place_bin_centers[:, position_ind], axis=0),
cp.expand_dims(positions[:, position_ind], axis=1),
std,
)
return position_distance
def estimate_position_density(
place_bin_centers: cp.ndarray,
positions: cp.ndarray,
position_std: Union[cp.ndarray, float],
block_size: Optional[int] = 100,
) -> cp.ndarray:
"""Estimates a kernel density estimate over position bins using
Euclidean distances.
Parameters
----------
place_bin_centers : cp.ndarray, shape (n_position_bins, n_position_dims)
positions : cp.ndarray, shape (n_time, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Returns
-------
position_density : cp.ndarray, shape (n_position_bins,)
"""
n_time = positions.shape[0]
n_position_bins = place_bin_centers.shape[0]
if block_size is None:
block_size = n_time
position_density = cp.empty((n_position_bins,))
for start_ind in range(0, n_position_bins, block_size):
block_inds = slice(start_ind, start_ind + block_size)
position_density[block_inds] = cp.mean(
estimate_position_distance(
place_bin_centers[block_inds], positions, position_std
),
axis=0,
)
return position_density
def estimate_log_intensity(
density: cp.ndarray, occupancy: cp.ndarray, mean_rate: float
):
"""Calculates intensity in log space."""
return cp.log(mean_rate) + cp.log(density) - cp.log(occupancy)
def estimate_intensity(
density: cp.ndarray, occupancy: cp.ndarray, mean_rate: float
) -> cp.ndarray:
"""Calculates intensity.
Parameters
----------
density : np.ndarray, shape (n_bins,)
occupancy : np.ndarray, shape (n_bins,)
mean_rate : float
Returns
-------
intensity : np.ndarray, shape (n_bins,)
"""
return cp.exp(estimate_log_intensity(density, occupancy, mean_rate))
def normal_pdf_integer_lookup(
x: int, mean: int, std: float = 20.0, max_value: int = 6000
):
"""Fast density evaluation for integers by precomputing a hash table of
values.
Parameters
----------
x : int
mean : int
std : float
max_value : int
Returns
-------
probability_density : int
"""
normal_density = gaussian_pdf(cp.arange(-max_value, max_value), 0, std).astype(
cp.float32
)
return normal_density[(x - mean) + max_value]
def estimate_log_joint_mark_intensity(
decoding_marks: cp.ndarray,
encoding_marks: cp.ndarray,
mark_std: float,
occupancy: cp.ndarray,
mean_rate: float,
place_bin_centers: Optional[cp.ndarray] = None,
encoding_positions: Optional[cp.ndarray] = None,
position_std: Union[float, cp.ndarray, None] = None,
max_mark_diff: int = 6000,
set_diag_zero: bool = False,
position_distance: Optional[cp.ndarray] = None,
) -> np.ndarray:
"""Finds the joint intensity of the marks and positions in log space.
Parameters
----------
decoding_marks : np.ndarray, shape (n_decoding_spikes, n_features)
encoding_marks : np.ndarray, shape (n_encoding_spikes, n_features)
mark_std : float
occupancy : np.ndarray, shape (n_position_bins,)
mean_rate : float
place_bin_centers : None or np.ndarray, shape (n_position_bins, n_position_dims)
If None, position distance must be not None
encoding_positions : None or np.ndarray, shape (n_decoding_spikes, n_position_dims)
If None, position distance must be not None
position_std : None or float or array_like, shape (n_position_dims,)
If None, position distance must be not None
max_mark_diff : int
Maximum distance between integer marks.
set_diag_zero : bool
position_distance : np.ndarray, shape (n_encoding_spikes, n_position_bins)
Precalculated distance between position and position bins.
Returns
-------
log_joint_mark_intensity : np.ndarray, shape (n_decoding_spikes, n_position_bins)
"""
n_encoding_spikes, n_marks = encoding_marks.shape
n_decoding_spikes = decoding_marks.shape[0]
mark_distance = cp.ones(
(n_decoding_spikes, n_encoding_spikes), dtype=cp.float32
)
for mark_ind in range(n_marks):
mark_distance *= normal_pdf_integer_lookup(
cp.expand_dims(decoding_marks[:, mark_ind], axis=1),
cp.expand_dims(encoding_marks[:, mark_ind], axis=0),
std=mark_std,
max_value=max_mark_diff,
)
if set_diag_zero:
diag_ind = (cp.arange(n_decoding_spikes), cp.arange(n_decoding_spikes))
mark_distance[diag_ind] = 0.0
if position_distance is None:
position_distance = estimate_position_distance(
place_bin_centers, encoding_positions, position_std
).astype(cp.float32)
return cp.asnumpy(
estimate_log_intensity(
mark_distance @ position_distance / n_encoding_spikes,
occupancy,
mean_rate,
)
)
def fit_multiunit_likelihood_integer_gpu(
position: np.ndarray,
multiunits: np.ndarray,
place_bin_centers: np.ndarray,
mark_std: float,
position_std: Union[float, np.ndarray],
is_track_boundary: Optional[np.ndarray] = None,
is_track_interior: Optional[np.ndarray] = None,
edges: Optional[list[np.ndarray]] = None,
block_size: int = 100,
use_diffusion: bool = False,
**kwargs,
):
"""Fits the clusterless place field model.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
multiunits : np.ndarray, shape (n_time, n_marks, n_electrodes)
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
mark_std : float
Amount of smoothing for the mark features. Standard deviation of kernel.
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
is_track_boundary : None or np.ndarray, shape (n_bins,)
is_track_interior : None or np.ndarray, shape (n_bins,)
edges : None or list of np.ndarray
block_size : int
Size of data to process in chunks
use_diffusion : bool
Use diffusion to respect the track geometry.
Returns
-------
encoding_model : dict
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
position = atleast_2d(position)
place_bin_centers = atleast_2d(place_bin_centers)
interior_place_bin_centers = cp.asarray(
place_bin_centers[is_track_interior.ravel(order="F")], dtype=cp.float32
)
gpu_is_track_interior = cp.asarray(is_track_interior.ravel(order="F"))
not_nan_position = np.all(~np.isnan(position), axis=1)
if use_diffusion & (position.shape[1] > 1):
n_total_bins = np.prod(is_track_interior.shape)
bin_diffusion_distances = diffuse_each_bin(
is_track_interior,
is_track_boundary,
dx=edges[0][1] - edges[0][0],
dy=edges[1][1] - edges[1][0],
std=position_std,
).reshape((n_total_bins, -1), order="F")
else:
bin_diffusion_distances = None
if use_diffusion & (position.shape[1] > 1):
occupancy = cp.asarray(
estimate_diffusion_position_density(
position[not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=cp.float32,
)
else:
occupancy = cp.zeros((place_bin_centers.shape[0],), dtype=cp.float32)
occupancy[gpu_is_track_interior] = estimate_position_density(
interior_place_bin_centers,
cp.asarray(position[not_nan_position], dtype=cp.float32),
position_std,
block_size=block_size,
)
mean_rates = []
summed_ground_process_intensity = cp.zeros(
(place_bin_centers.shape[0],), dtype=cp.float32
)
encoding_marks = []
encoding_positions = []
for multiunit in np.moveaxis(multiunits, -1, 0):
# ground process intensity
is_spike = np.any(~np.isnan(multiunit), axis=1)
mean_rates.append(is_spike.mean())
if is_spike.sum() > 0:
if use_diffusion & (position.shape[1] > 1):
marginal_density = cp.asarray(
estimate_diffusion_position_density(
position[is_spike & not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=cp.float32,
)
else:
marginal_density = cp.zeros(
(place_bin_centers.shape[0],), dtype=cp.float32
)
marginal_density[gpu_is_track_interior] = estimate_position_density(
interior_place_bin_centers,
cp.asarray(
position[is_spike & not_nan_position], dtype=cp.float32
),
position_std,
block_size=block_size,
)
summed_ground_process_intensity += estimate_intensity(
marginal_density, occupancy, mean_rates[-1]
)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
encoding_marks.append(
cp.asarray(
multiunit[np.ix_(is_spike & not_nan_position, is_mark_features)],
dtype=cp.int16,
)
)
encoding_positions.append(position[is_spike & not_nan_position])
summed_ground_process_intensity = cp.asnumpy(
summed_ground_process_intensity
) + np.spacing(1)
return {
"encoding_marks": encoding_marks,
"encoding_positions": encoding_positions,
"summed_ground_process_intensity": summed_ground_process_intensity,
"occupancy": occupancy,
"mean_rates": mean_rates,
"mark_std": mark_std,
"position_std": position_std,
"block_size": block_size,
"bin_diffusion_distances": bin_diffusion_distances,
"use_diffusion": use_diffusion,
"edges": edges,
**kwargs,
}
def estimate_multiunit_likelihood_integer_gpu(
multiunits: np.ndarray,
encoding_marks: cp.ndarray,
mark_std: float,
place_bin_centers: cp.ndarray,
encoding_positions: cp.ndarray,
position_std: Union[float, np.ndarray],
occupancy: cp.ndarray,
mean_rates: list,
summed_ground_process_intensity: np.ndarray,
bin_diffusion_distances: np.ndarray,
edges: list[np.ndarray],
max_mark_diff: int = 6000,
set_diag_zero: bool = False,
is_track_interior: Optional[np.ndarray] = None,
time_bin_size: int = 1,
block_size: int = 100,
ignore_no_spike: bool = False,
disable_progress_bar: bool = False,
use_diffusion: bool = False,
):
"""Estimates the likelihood of position bins given multiunit marks.
Parameters
----------
multiunits : np.ndarray, shape (n_decoding_time, n_marks, n_electrodes)
encoding_marks : cp.ndarray, shape (n_encoding_spikes, n_marks, n_electrodes)
mark_std : float
Amount of smoothing for mark features. Standard deviation of kernel.
place_bin_centers : cp.ndarray, shape (n_bins, n_position_dims)
encoding_positions : cp.ndarray, shape (n_encoding_spikes, n_position_dims)
position_std : float or np.ndarray, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
occupancy : cp.ndarray, (n_bins,)
mean_rates : list, len (n_electrodes,)
summed_ground_process_intensity : np.ndarray, shape (n_bins,)
bin_diffusion_distances : np.ndarray, shape (n_bins, n_bins)
edges : list of np.ndarray
max_mark_diff : int
Maximum difference between mark features
set_diag_zero : bool
Remove influence of the same mark in encoding and decoding.
is_track_interior : None or np.ndarray, shape (n_bins_x, n_bins_y)
time_bin_size : float
Size of time steps
block_size : int
Size of data to process in chunks
ignore_no_spike : bool
Set contribution of no spikes to zero
disable_progress_bar : bool
If False, a progress bar will be displayed.
use_diffusion : bool
Respect track geometry by using diffusion distances
Returns
-------
log_likelihood : (n_time, n_bins)
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
else:
is_track_interior = is_track_interior.ravel(order="F")
n_time = multiunits.shape[0]
if ignore_no_spike:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.zeros((n_time, 1), dtype=np.float32)
)
else:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.ones((n_time, 1), dtype=np.float32)
)
multiunits = np.moveaxis(multiunits, -1, 0)
n_position_bins = is_track_interior.sum()
interior_place_bin_centers = cp.asarray(
place_bin_centers[is_track_interior], dtype=cp.float32
)
gpu_is_track_interior = cp.asarray(is_track_interior)
interior_occupancy = occupancy[gpu_is_track_interior]
for multiunit, enc_marks, enc_pos, mean_rate in zip(
tqdm(multiunits, desc="n_electrodes", disable=disable_progress_bar),
encoding_marks,
encoding_positions,
mean_rates,
):
is_spike = np.any(~np.isnan(multiunit), axis=1)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
decoding_marks = cp.asarray(
multiunit[np.ix_(is_spike, is_mark_features)], dtype=cp.int16
)
n_decoding_marks = decoding_marks.shape[0]
log_joint_mark_intensity = np.zeros(
(n_decoding_marks, n_position_bins), dtype=np.float32
)
if block_size is None:
block_size = n_decoding_marks
if use_diffusion & (place_bin_centers.shape[1] > 1):
position_distance = cp.asarray(
estimate_diffusion_position_distance(
enc_pos,
edges,
bin_distances=bin_diffusion_distances,
)[:, is_track_interior],
dtype=cp.float32,
)
else:
position_distance = estimate_position_distance(
interior_place_bin_centers,
cp.asarray(enc_pos, dtype=cp.float32),
position_std,
).astype(cp.float32)
for start_ind in range(0, n_decoding_marks, block_size):
block_inds = slice(start_ind, start_ind + block_size)
log_joint_mark_intensity[
block_inds
] = estimate_log_joint_mark_intensity(
decoding_marks[block_inds],
enc_marks,
mark_std,
interior_occupancy,
mean_rate,
max_mark_diff=max_mark_diff,
set_diag_zero=set_diag_zero,
position_distance=position_distance,
)
log_likelihood[np.ix_(is_spike, is_track_interior)] += np.nan_to_num(
log_joint_mark_intensity
)
mempool = cp.get_default_memory_pool()
mempool.free_all_blocks()
log_likelihood[:, ~is_track_interior] = np.nan
return log_likelihood
except ImportError:
def estimate_multiunit_likelihood_integer_gpu(*args, **kwargs):
print("Cupy is not installed or no GPU detected...")
def fit_multiunit_likelihood_integer_gpu(*args, **kwargs):
print("Cupy is not installed or no GPU detected...") | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/multiunit_likelihood_integer_gpu.py | 0.976614 | 0.520009 | multiunit_likelihood_integer_gpu.py | pypi |
from __future__ import annotations
from typing import Optional, Union
import numpy as np
from tqdm.autonotebook import tqdm
from replay_trajectory_classification.core import atleast_2d
from replay_trajectory_classification.likelihoods.diffusion import (
diffuse_each_bin,
estimate_diffusion_position_density,
estimate_diffusion_position_distance,
)
def gaussian_pdf(x: np.ndarray, mean: np.ndarray, sigma: np.ndarray) -> np.ndarray:
"""Compute the value of a Gaussian probability density function at x with
given mean and sigma."""
return np.exp(-0.5 * ((x - mean) / sigma) ** 2) / (sigma * np.sqrt(2.0 * np.pi))
def estimate_position_distance(
place_bin_centers: np.ndarray, positions: np.ndarray, position_std: np.ndarray
) -> np.ndarray:
"""Estimates the Euclidean distance between positions and position bins.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : array_like, shape (n_position_dims,)
Returns
-------
position_distance : np.ndarray, shape (n_time, n_position_bins)
"""
n_time, n_position_dims = positions.shape
n_position_bins = place_bin_centers.shape[0]
if isinstance(position_std, (int, float)):
position_std = [position_std] * n_position_dims
position_distance = np.ones((n_time, n_position_bins), dtype=np.float32)
for position_ind, std in enumerate(position_std):
position_distance *= gaussian_pdf(
np.expand_dims(place_bin_centers[:, position_ind], axis=0),
np.expand_dims(positions[:, position_ind], axis=1),
std,
)
return position_distance
def estimate_position_density(
place_bin_centers: np.ndarray,
positions: np.ndarray,
position_std: np.ndarray,
block_size: int = 100,
) -> np.ndarray:
"""Estimates a kernel density estimate over position bins using
Euclidean distances.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Returns
-------
position_density : np.ndarray, shape (n_position_bins,)
"""
n_time = positions.shape[0]
n_position_bins = place_bin_centers.shape[0]
if block_size is None:
block_size = n_time
position_density = np.empty((n_position_bins,))
for start_ind in range(0, n_position_bins, block_size):
block_inds = slice(start_ind, start_ind + block_size)
position_density[block_inds] = np.mean(
estimate_position_distance(
place_bin_centers[block_inds], positions, position_std
),
axis=0,
)
return position_density
def estimate_log_intensity(
density: np.ndarray, occupancy: np.ndarray, mean_rate: float
) -> np.ndarray:
"""Calculates intensity in log space."""
return np.log(mean_rate) + np.log(density) - np.log(occupancy)
def estimate_intensity(
density: np.ndarray, occupancy: np.ndarray, mean_rate: float
) -> np.ndarray:
"""Calculates intensity.
Parameters
----------
density : np.ndarray, shape (n_bins,)
occupancy : np.ndarray, shape (n_bins,)
mean_rate : float
Returns
-------
intensity : np.ndarray, shape (n_bins,)
"""
return np.exp(estimate_log_intensity(density, occupancy, mean_rate))
def estimate_log_joint_mark_intensity(
decoding_marks: np.ndarray,
encoding_marks: np.ndarray,
mark_std: np.ndarray,
occupancy: np.ndarray,
mean_rate: float,
place_bin_centers: Optional[np.ndarray] = None,
encoding_positions: Optional[np.ndarray] = None,
position_std: Union[np.ndarray, float, None] = None,
max_mark_diff: int = 6000,
set_diag_zero: bool = False,
position_distance: Optional[np.ndarray] = None,
):
"""Finds the joint intensity of the marks and positions in log space.
Parameters
----------
decoding_marks : np.ndarray, shape (n_decoding_spikes, n_features)
encoding_marks : np.ndarray, shape (n_encoding_spikes, n_features)
mark_std : float or np.ndarray, shape (n_features,)
occupancy : np.ndarray, shape (n_position_bins,)
mean_rate : float
place_bin_centers : None or np.ndarray, shape (n_position_bins, n_position_dims)
If None, position distance must be not None
encoding_positions : None or np.ndarray, shape (n_decoding_spikes, n_position_dims)
If None, position distance must be not None
position_std : None or float or array_like, shape (n_position_dims,)
If None, position distance must be not None
max_mark_diff : int
Maximum distance between integer marks.
set_diag_zero : bool
position_distance : np.ndarray, shape (n_encoding_spikes, n_position_bins)
Precalculated distance between position and position bins.
Returns
-------
log_joint_mark_intensity : np.ndarray, shape (n_decoding_spikes, n_position_bins)
"""
n_encoding_spikes, n_marks = encoding_marks.shape
n_decoding_spikes = decoding_marks.shape[0]
mark_distance = np.ones((n_decoding_spikes, n_encoding_spikes), dtype=np.float32)
for mark_ind in range(n_marks):
mark_distance *= gaussian_pdf(
np.expand_dims(decoding_marks[:, mark_ind], axis=1),
np.expand_dims(encoding_marks[:, mark_ind], axis=0),
mark_std[mark_ind],
)
if set_diag_zero:
diag_ind = (np.arange(n_decoding_spikes), np.arange(n_decoding_spikes))
mark_distance[diag_ind] = 0.0
if position_distance is None:
position_distance = estimate_position_distance(
place_bin_centers, encoding_positions, position_std
).astype(np.float32)
return estimate_log_intensity(
mark_distance @ position_distance / n_encoding_spikes, occupancy, mean_rate
)
def fit_multiunit_likelihood(
position: np.ndarray,
multiunits: np.ndarray,
place_bin_centers: np.ndarray,
mark_std: Union[np.ndarray, float],
position_std: Union[np.ndarray, float],
is_track_boundary: Optional[np.ndarray] = None,
is_track_interior: Optional[np.ndarray] = None,
edges: Optional[list[np.ndarray]] = None,
block_size: int = 100,
use_diffusion: bool = False,
**kwargs,
) -> dict:
"""Fits the clusterless place field model.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
multiunits : np.ndarray, shape (n_time, n_marks, n_electrodes)
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
mark_std : float or array_like, shape (n_marks,)
Amount of smoothing for the mark features. Standard deviation of kernel.
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
is_track_boundary : None or np.ndarray, shape (n_bins,)
is_track_interior : None or np.ndarray, shape (n_bins,)
edges : None or list of np.ndarray
block_size : int
Size of data to process in chunks
use_diffusion : bool
Use diffusion to respect the track geometry.
Returns
-------
encoding_model : dict
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
position = atleast_2d(position)
place_bin_centers = atleast_2d(place_bin_centers)
interior_place_bin_centers = np.asarray(
place_bin_centers[is_track_interior.ravel(order="F")], dtype=np.float32
)
gpu_is_track_interior = np.asarray(is_track_interior.ravel(order="F"))
not_nan_position = np.all(~np.isnan(position), axis=1)
if use_diffusion & (position.shape[1] > 1):
n_total_bins = np.prod(is_track_interior.shape)
bin_diffusion_distances = diffuse_each_bin(
is_track_interior,
is_track_boundary,
dx=edges[0][1] - edges[0][0],
dy=edges[1][1] - edges[1][0],
std=position_std,
).reshape((n_total_bins, -1), order="F")
else:
bin_diffusion_distances = None
if use_diffusion & (position.shape[1] > 1):
occupancy = np.asarray(
estimate_diffusion_position_density(
position[not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=np.float32,
)
else:
occupancy = np.zeros((place_bin_centers.shape[0],), dtype=np.float32)
occupancy[gpu_is_track_interior] = estimate_position_density(
interior_place_bin_centers,
np.asarray(position[not_nan_position], dtype=np.float32),
position_std,
block_size=block_size,
)
mean_rates = []
summed_ground_process_intensity = np.zeros(
(place_bin_centers.shape[0],), dtype=np.float32
)
encoding_marks = []
encoding_positions = []
n_marks = multiunits.shape[1]
if isinstance(mark_std, (int, float)):
mark_std = np.asarray([mark_std] * n_marks)
else:
mark_std = np.asarray(mark_std)
for multiunit in np.moveaxis(multiunits, -1, 0):
# ground process intensity
is_spike = np.any(~np.isnan(multiunit), axis=1)
mean_rates.append(is_spike.mean())
if is_spike.sum() > 0:
if use_diffusion & (position.shape[1] > 1):
marginal_density = np.asarray(
estimate_diffusion_position_density(
position[is_spike & not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=np.float32,
)
else:
marginal_density = np.zeros(
(place_bin_centers.shape[0],), dtype=np.float32
)
marginal_density[gpu_is_track_interior] = estimate_position_density(
interior_place_bin_centers,
np.asarray(position[is_spike & not_nan_position], dtype=np.float32),
position_std,
block_size=block_size,
)
summed_ground_process_intensity += estimate_intensity(
marginal_density, occupancy, mean_rates[-1]
)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
encoding_marks.append(
np.asarray(
multiunit[np.ix_(is_spike & not_nan_position, is_mark_features)],
dtype=np.float32,
)
)
encoding_positions.append(position[is_spike & not_nan_position])
summed_ground_process_intensity = summed_ground_process_intensity + np.spacing(1)
return {
"encoding_marks": encoding_marks,
"encoding_positions": encoding_positions,
"summed_ground_process_intensity": summed_ground_process_intensity,
"occupancy": occupancy,
"mean_rates": mean_rates,
"mark_std": mark_std,
"position_std": position_std,
"block_size": block_size,
"bin_diffusion_distances": bin_diffusion_distances,
"use_diffusion": use_diffusion,
"edges": edges,
**kwargs,
}
def estimate_multiunit_likelihood(
multiunits: np.ndarray,
encoding_marks: np.ndarray,
mark_std: np.ndarray,
place_bin_centers: np.ndarray,
encoding_positions: np.ndarray,
position_std: Union[float, np.ndarray],
occupancy: np.ndarray,
mean_rates: np.ndarray,
summed_ground_process_intensity: np.ndarray,
bin_diffusion_distances: np.ndarray,
edges: list[np.ndarray],
max_mark_diff: int = 6000,
set_diag_zero: bool = False,
is_track_interior: Optional[np.ndarray] = None,
time_bin_size: int = 1,
block_size: int = 100,
ignore_no_spike: bool = False,
disable_progress_bar: bool = False,
use_diffusion: bool = False,
) -> np.ndarray:
"""Estimates the likelihood of position bins given multiunit marks.
Parameters
----------
multiunits : np.ndarray, shape (n_decoding_time, n_marks, n_electrodes)
encoding_marks : np.ndarray, shape (n_encoding_spikes, n_marks, n_electrodes)
mark_std : list, shape (n_marks,)
Amount of smoothing for mark features
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
encoding_positions : np.ndarray, shape (n_encoding_spikes, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position
occupancy : np.ndarray, (n_bins,)
mean_rates : list, len (n_electrodes,)
summed_ground_process_intensity : np.ndarray, shape (n_bins,)
bin_diffusion_distances : np.ndarray, shape (n_bins, n_bins)
edges : list of np.ndarray
max_mark_diff : int
Maximum difference between mark features
set_diag_zero : bool
Remove influence of the same mark in encoding and decoding.
is_track_interior : None or np.ndarray, shape (n_bins_x, n_bins_y)
time_bin_size : float
Size of time steps
block_size : int
Size of data to process in chunks
ignore_no_spike : bool
Set contribution of no spikes to zero
disable_progress_bar : bool
If False, a progress bar will be displayed.
use_diffusion : bool
Respect track geometry by using diffusion distances
Returns
-------
log_likelihood : (n_time, n_bins)
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
else:
is_track_interior = is_track_interior.ravel(order="F")
n_time = multiunits.shape[0]
if ignore_no_spike:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.zeros((n_time, 1), dtype=np.float32)
)
else:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.ones((n_time, 1), dtype=np.float32)
)
multiunits = np.moveaxis(multiunits, -1, 0)
n_position_bins = is_track_interior.sum()
interior_place_bin_centers = np.asarray(
place_bin_centers[is_track_interior], dtype=np.float32
)
gpu_is_track_interior = np.asarray(is_track_interior)
interior_occupancy = occupancy[gpu_is_track_interior]
for multiunit, enc_marks, enc_pos, mean_rate in zip(
tqdm(multiunits, desc="n_electrodes", disable=disable_progress_bar),
encoding_marks,
encoding_positions,
mean_rates,
):
is_spike = np.any(~np.isnan(multiunit), axis=1)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
decoding_marks = np.asarray(
multiunit[np.ix_(is_spike, is_mark_features)], dtype=np.float32
)
n_decoding_marks = decoding_marks.shape[0]
log_joint_mark_intensity = np.zeros(
(n_decoding_marks, n_position_bins), dtype=np.float32
)
if block_size is None:
block_size = n_decoding_marks
if use_diffusion & (place_bin_centers.shape[1] > 1):
position_distance = np.asarray(
estimate_diffusion_position_distance(
enc_pos,
edges,
bin_distances=bin_diffusion_distances,
)[:, is_track_interior],
dtype=np.float32,
)
else:
position_distance = estimate_position_distance(
interior_place_bin_centers,
np.asarray(enc_pos, dtype=np.float32),
position_std,
).astype(np.float32)
for start_ind in range(0, n_decoding_marks, block_size):
block_inds = slice(start_ind, start_ind + block_size)
log_joint_mark_intensity[block_inds] = estimate_log_joint_mark_intensity(
decoding_marks[block_inds],
enc_marks,
mark_std[is_mark_features],
interior_occupancy,
mean_rate,
max_mark_diff=max_mark_diff,
set_diag_zero=set_diag_zero,
position_distance=position_distance,
)
log_likelihood[np.ix_(is_spike, is_track_interior)] += np.nan_to_num(
log_joint_mark_intensity
)
log_likelihood[:, ~is_track_interior] = np.nan
return log_likelihood | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/multiunit_likelihood.py | 0.987658 | 0.656975 | multiunit_likelihood.py | pypi |
with a spline basis"""
from __future__ import annotations
import logging
from typing import Optional
import dask
import numpy as np
import pandas as pd
import scipy.stats
import xarray as xr
from dask.distributed import Client, get_client
from patsy import DesignInfo, DesignMatrix, build_design_matrices, dmatrix
from regularized_glm import penalized_IRLS
from statsmodels.api import families
from replay_trajectory_classification.environments import get_n_bins
def make_spline_design_matrix(
position: np.ndarray, place_bin_edges: np.ndarray, knot_spacing: float = 10.0
) -> DesignMatrix:
"""Creates a design matrix for regression with a position spline basis.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
place_bin_edges : np.ndarray, shape (n_bins, n_position_dims)
Returns
-------
design_matrix : patsy.DesignMatrix
"""
inner_knots = []
for pos, edges in zip(position.T, place_bin_edges.T):
n_points = get_n_bins(edges, bin_size=knot_spacing)
knots = np.linspace(edges.min(), edges.max(), n_points)[1:-1]
knots = knots[(knots > pos.min()) & (knots < pos.max())]
inner_knots.append(knots)
inner_knots = np.meshgrid(*inner_knots)
n_position_dims = position.shape[1]
data = {}
formula = "1 + te("
for ind in range(n_position_dims):
formula += f"cr(x{ind}, knots=inner_knots[{ind}])"
formula += ", "
data[f"x{ind}"] = position[:, ind]
formula += 'constraints="center")'
return dmatrix(formula, data)
def make_spline_predict_matrix(
design_info: DesignInfo, place_bin_centers: np.ndarray
) -> DesignMatrix:
"""Make a design matrix for position bins"""
predict_data = {}
for ind in range(place_bin_centers.shape[1]):
predict_data[f"x{ind}"] = place_bin_centers[:, ind]
return build_design_matrices([design_info], predict_data)[0]
def get_firing_rate(
design_matrix: DesignMatrix, results: tuple, sampling_frequency: int = 1
):
"""Predicts the firing rate given fitted model coefficents."""
if np.any(np.isnan(results.coefficients)):
n_time = design_matrix.shape[0]
rate = np.zeros((n_time,))
else:
rate = np.exp(design_matrix @ results.coefficients) * sampling_frequency
return rate
@dask.delayed
def fit_glm(
response: np.ndarray,
design_matrix: np.ndarray,
penalty: Optional[float] = None,
tolerance: float = 1e-5,
) -> tuple:
"""Fits a L2-penalized GLM.
Parameters
----------
response : np.ndarray, shape (n_time,)
Calcium activity trace
design_matrix : np.ndarray, shape (n_time, n_coefficients)
penalty : None or float
L2 penalty on regression. If None, penalty is smallest possible.
tolerance : float
Smallest difference between iterations to consider model fitting
converged.
Returns
-------
results : tuple
"""
if penalty is not None:
penalty = np.ones((design_matrix.shape[1],)) * penalty
penalty[0] = 0.0 # don't penalize the intercept
else:
penalty = np.finfo(float).eps
return penalized_IRLS(
design_matrix,
response.squeeze(),
family=families.Poisson(),
penalty=penalty,
tolerance=tolerance,
)
def poisson_log_likelihood(
spikes: np.ndarray, conditional_intensity: np.ndarray
) -> np.ndarray:
"""Probability of parameters given spiking at a particular time.
Parameters
----------
spikes : np.ndarray, shape (n_time,)
Indicator of spike or no spike at current time.
conditional_intensity : np.ndarray, shape (n_place_bins,)
Instantaneous probability of observing a spike
Returns
-------
poisson_log_likelihood : np.ndarray, shape (n_time, n_place_bins)
"""
# Logarithm of the absolute value of the gamma function is always 0 when
# spikes are 0 or 1
return scipy.stats.poisson.logpmf(
spikes[:, np.newaxis], conditional_intensity[np.newaxis, :] + np.spacing(1)
)
def combined_likelihood(
spikes: np.ndarray, conditional_intensity: np.ndarray
) -> np.ndarray:
"""Combines the likelihoods of all the cells.
Parameters
----------
spikes : np.ndarray, shape (n_time, n_neurons)
conditional_intensity : np.ndarray, shape (n_bins, n_neurons)
"""
n_time = spikes.shape[0]
n_bins = conditional_intensity.shape[0]
log_likelihood = np.zeros((n_time, n_bins))
for is_spike, ci in zip(spikes.T, conditional_intensity.T):
log_likelihood += poisson_log_likelihood(is_spike, ci)
return log_likelihood
def estimate_spiking_likelihood(
spikes: np.ndarray,
conditional_intensity: np.ndarray,
is_track_interior: Optional[np.ndarray] = None,
) -> np.ndarray:
"""
Parameters
----------
spikes : np.ndarray, shape (n_time, n_neurons)
conditional_intensity : np.ndarray, shape (n_bins, n_neurons)
is_track_interior : None or np.ndarray, optional, shape (n_x_position_bins,
n_y_position_bins)
Returns
-------
likelihood : np.ndarray, shape (n_time, n_bins)
"""
if is_track_interior is not None:
is_track_interior = is_track_interior.ravel(order="F")
else:
n_bins = conditional_intensity.shape[0]
is_track_interior = np.ones((n_bins,), dtype=bool)
log_likelihood = combined_likelihood(spikes, conditional_intensity)
mask = np.ones_like(is_track_interior, dtype=float)
mask[~is_track_interior] = np.nan
return log_likelihood * mask
def estimate_place_fields(
position: np.ndarray,
spikes: np.ndarray,
place_bin_centers: np.ndarray,
place_bin_edges: np.ndarray,
edges: Optional[np.ndarray] = None,
is_track_boundary: Optional[np.ndarray] = None,
is_track_interior: Optional[np.ndarray] = None,
penalty: float = 1e-1,
knot_spacing: int = 10,
) -> xr.DataArray:
"""Gives the conditional intensity of the neurons' spiking with respect to
position.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
spikes : np.ndarray, shape (n_time, n_neurons)
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
place_bin_edges : np.ndarray, shape (n_bins + 1, n_position_dims)
is_track_boundary : None or np.ndarray
is_track_interior : None or np.ndarray
penalty : None or float, optional
L2 penalty on regression. If None, penalty is smallest possible.
knot_spacing : int, optional
Spacing of position knots. Controls how smooth the firing rate is.
Returns
-------
conditional_intensity : xr.DataArray, shape (n_bins, n_neurons)
"""
if np.any(np.ptp(place_bin_edges, axis=0) <= knot_spacing):
logging.warning("Range of position is smaller than knot spacing.")
design_matrix = make_spline_design_matrix(position, place_bin_edges, knot_spacing)
design_info = design_matrix.design_info
try:
client = get_client()
except ValueError:
client = Client()
design_matrix = client.scatter(np.asarray(design_matrix), broadcast=True)
results = [fit_glm(is_spike, design_matrix, penalty) for is_spike in spikes.T]
results = dask.compute(*results)
predict_matrix = make_spline_predict_matrix(design_info, place_bin_centers)
place_fields = np.stack(
[get_firing_rate(predict_matrix, result) for result in results], axis=1
)
DIMS = ["position", "neuron"]
if position.shape[1] == 1:
names = ["position"]
coords = {"position": place_bin_centers.squeeze()}
elif position.shape[1] == 2:
names = ["x_position", "y_position"]
coords = {
"position": pd.MultiIndex.from_arrays(
place_bin_centers.T.tolist(), names=names
)
}
return xr.DataArray(data=place_fields, coords=coords, dims=DIMS) | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/spiking_likelihood_glm.py | 0.969671 | 0.61757 | spiking_likelihood_glm.py | pypi |
using GPUs"""
from __future__ import annotations
from typing import Optional, Union
import numpy as np
import pandas as pd
import xarray as xr
from tqdm.auto import tqdm
from replay_trajectory_classification.core import atleast_2d
from replay_trajectory_classification.likelihoods.diffusion import (
diffuse_each_bin,
estimate_diffusion_position_density,
)
try:
import cupy as cp
from cupyx.scipy.special import xlogy
def gaussian_pdf(x: np.ndarray, mean: np.ndarray, sigma: np.ndarray) -> np.ndarray:
"""Compute the value of a Gaussian probability density function at x with
given mean and sigma."""
return np.exp(-0.5 * ((x - mean) / sigma) ** 2) / (sigma * np.sqrt(2.0 * np.pi))
def estimate_position_distance(
place_bin_centers: np.ndarray, positions: np.ndarray, position_std: np.ndarray
) -> np.ndarray:
"""Estimates the Euclidean distance between positions and position bins.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : np.ndarray, shape (n_position_dims,)
Returns
-------
position_distance : np.ndarray, shape (n_time, n_position_bins)
"""
n_time, n_position_dims = positions.shape
n_position_bins = place_bin_centers.shape[0]
position_distance = np.ones((n_time, n_position_bins), dtype=np.float32)
if isinstance(position_std, (int, float)):
position_std = [position_std] * n_position_dims
for position_ind, std in enumerate(position_std):
position_distance *= gaussian_pdf(
np.expand_dims(place_bin_centers[:, position_ind], axis=0),
np.expand_dims(positions[:, position_ind], axis=1),
std,
)
return position_distance
def estimate_position_density(
place_bin_centers: np.ndarray,
positions: np.ndarray,
position_std: Union[float, np.ndarray],
block_size: int = 100,
) -> np.ndarray:
"""Estimates a kernel density estimate over position bins using
Euclidean distances.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Returns
-------
position_density : np.ndarray, shape (n_position_bins,)
"""
n_time = positions.shape[0]
n_position_bins = place_bin_centers.shape[0]
if block_size is None:
block_size = n_time
position_density = np.empty((n_position_bins,))
for start_ind in range(0, n_position_bins, block_size):
block_inds = slice(start_ind, start_ind + block_size)
position_density[block_inds] = np.mean(
estimate_position_distance(
place_bin_centers[block_inds], positions, position_std
),
axis=0,
)
return position_density
def get_firing_rate(
is_spike: np.ndarray,
position: np.ndarray,
place_bin_centers: np.ndarray,
is_track_interior: np.ndarray,
not_nan_position: np.ndarray,
occupancy: np.ndarray,
position_std: np.ndarray,
block_size: Optional[int] = None,
) -> np.ndarray:
if is_spike.sum() > 0:
mean_rate = is_spike.mean()
marginal_density = np.zeros((place_bin_centers.shape[0],), dtype=np.float32)
marginal_density[is_track_interior] = estimate_position_density(
place_bin_centers[is_track_interior],
np.asarray(position[is_spike & not_nan_position], dtype=np.float32),
position_std,
block_size=block_size,
)
return np.exp(
np.log(mean_rate) + np.log(marginal_density) - np.log(occupancy)
)
else:
return np.zeros_like(occupancy)
def get_diffusion_firing_rate(
is_spike: np.ndarray,
position: np.ndarray,
edges: list[np.ndarray],
bin_diffusion_distances: np.ndarray,
occupancy: np.ndarray,
not_nan_position: np.ndarray,
) -> np.ndarray:
if is_spike.sum() > 0:
mean_rate = is_spike.mean()
marginal_density = estimate_diffusion_position_density(
position[is_spike & not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
).astype(np.float32)
return np.exp(
np.log(mean_rate) + np.log(marginal_density) - np.log(occupancy)
)
else:
return np.zeros_like(occupancy)
def estimate_place_fields_kde_gpu(
position: np.ndarray,
spikes: np.ndarray,
place_bin_centers: np.ndarray,
position_std: Union[float, np.ndarray],
is_track_boundary: Optional[np.ndarray] = None,
is_track_interior: Optional[np.ndarray] = None,
edges: Optional[list[np.ndarray]] = None,
place_bin_edges: Optional[np.ndarray] = None,
use_diffusion: bool = False,
block_size: Optional[int] = None,
) -> xr.DataArray:
"""Gives the conditional intensity of the neurons' spiking with respect to
position.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
spikes : np.ndarray, shape (n_time,)
Indicator of spike or no spike at current time.
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
is_track_boundary : None or np.ndarray, shape (n_bins,)
is_track_interior : None or np.ndarray, shape (n_bins,)
edges : None or list of np.ndarray
place_bin_edges : np.ndarray, shape (n_bins + 1, n_position_dims)
use_diffusion : bool
Respect track geometry by using diffusion distances
block_size : int
Size of data to process in chunks
Returns
-------
conditional_intensity : xr.DataArray, shape (n_bins, n_neurons)
"""
position = atleast_2d(position).astype(np.float32)
place_bin_centers = atleast_2d(place_bin_centers).astype(np.float32)
not_nan_position = np.all(~np.isnan(position), axis=1)
spikes = np.asarray(spikes, dtype=bool)
if use_diffusion & (position.shape[1] > 1):
n_total_bins = np.prod(is_track_interior.shape)
bin_diffusion_distances = diffuse_each_bin(
is_track_interior,
is_track_boundary,
dx=edges[0][1] - edges[0][0],
dy=edges[1][1] - edges[1][0],
std=position_std,
).reshape((n_total_bins, -1), order="F")
occupancy = estimate_diffusion_position_density(
position[not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
).astype(np.float32)
place_fields = np.stack(
[
get_diffusion_firing_rate(
is_spike,
position,
edges,
bin_diffusion_distances,
occupancy,
not_nan_position,
)
for is_spike in tqdm(spikes.T)
],
axis=1,
)
else:
occupancy = np.zeros((place_bin_centers.shape[0],), dtype=np.float32)
occupancy[is_track_interior.ravel(order="F")] = estimate_position_density(
place_bin_centers[is_track_interior.ravel(order="F")],
position[not_nan_position],
position_std,
block_size=block_size,
)
place_fields = np.stack(
[
get_firing_rate(
is_spike,
position,
place_bin_centers,
is_track_interior.ravel(order="F"),
not_nan_position,
occupancy,
position_std,
)
for is_spike in tqdm(spikes.T)
],
axis=1,
)
DIMS = ["position", "neuron"]
if position.shape[1] == 1:
names = ["position"]
coords = {"position": place_bin_centers.squeeze()}
elif position.shape[1] == 2:
names = ["x_position", "y_position"]
coords = {
"position": pd.MultiIndex.from_arrays(
place_bin_centers.T.tolist(), names=names
)
}
return xr.DataArray(data=place_fields, coords=coords, dims=DIMS)
def poisson_log_likelihood(
spikes: cp.ndarray, conditional_intensity: cp.ndarray
) -> cp.ndarray:
"""Probability of parameters given spiking at a particular time.
Parameters
----------
spikes : cp.ndarray, shape (n_time,)
Indicator of spike or no spike at current time.
conditional_intensity : cp.ndarray, shape (n_place_bins,)
Instantaneous probability of observing a spike
Returns
-------
poisson_log_likelihood : cp.ndarray, shape (n_time, n_place_bins)
"""
# Logarithm of the absolute value of the gamma function is always 0 when
# spikes are 0 or 1
return (
xlogy(spikes[:, np.newaxis], conditional_intensity[cp.newaxis, :])
- conditional_intensity[cp.newaxis, :]
)
def combined_likelihood(
spikes: np.ndarray, conditional_intensity: np.ndarray
) -> np.ndarray:
"""
Parameters
----------
spikes : np.ndarray, shape (n_time, n_neurons)
conditional_intensity : np.ndarray, shape (n_bins, n_neurons)
"""
n_time = spikes.shape[0]
n_bins = conditional_intensity.shape[0]
log_likelihood = cp.zeros((n_time, n_bins), dtype=cp.float32)
conditional_intensity = np.clip(conditional_intensity, a_min=1e-15, a_max=None)
mempool = cp.get_default_memory_pool()
for is_spike, ci in zip(
tqdm(cp.asarray(spikes, dtype=cp.float32).T),
cp.asarray(conditional_intensity, dtype=cp.float32).T,
):
log_likelihood += poisson_log_likelihood(is_spike, ci)
mempool.free_all_blocks()
return cp.asnumpy(log_likelihood)
def estimate_spiking_likelihood_kde_gpu(
spikes: np.ndarray,
conditional_intensity: np.ndarray,
is_track_interior: Optional[np.ndarray] = None,
) -> np.ndarray:
"""
Parameters
----------
spikes : np.ndarray, shape (n_time, n_neurons)
conditional_intensity : np.ndarray, shape (n_bins, n_neurons)
is_track_interior : None or np.ndarray, optional, shape (n_x_position_bins,
n_y_position_bins)
Returns
-------
likelihood : np.ndarray, shape (n_time, n_bins)
"""
spikes = np.asarray(spikes, dtype=np.float32)
if is_track_interior is not None:
is_track_interior = is_track_interior.ravel(order="F")
else:
n_bins = conditional_intensity.shape[0]
is_track_interior = np.ones((n_bins,), dtype=bool)
log_likelihood = combined_likelihood(spikes, conditional_intensity)
mask = np.ones_like(is_track_interior, dtype=np.float32)
mask[~is_track_interior] = np.nan
return log_likelihood * mask
except ImportError:
def estimate_place_fields_kde_gpu(*args, **kwargs):
pass
def estimate_spiking_likelihood_kde_gpu(*args, **kwargs):
pass | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/spiking_likelihood_kde_gpu.py | 0.959639 | 0.516778 | spiking_likelihood_kde_gpu.py | pypi |
from __future__ import annotations
import copy
import math
import dask.bag as db
import networkx as nx
import numba
import numpy as np
SQRT_2PI = np.sqrt(2.0 * np.pi)
@numba.vectorize(["float64(float64, float64)"], nopython=True, cache=True)
def gaussian_kernel(distance, sigma):
"""Compute the value of a Gaussian probability density function at x with
given mean and sigma."""
return math.exp(-0.5 * (distance / sigma) ** 2) / (sigma * SQRT_2PI)
def _distance_to_bin_centers(
left_node,
right_node,
distance_left_node,
distance_right_node,
time_ind,
copy_graph,
place_bin_center_node_ids,
):
node_name = f"actual_position_{time_ind}"
copy_graph.add_node(node_name)
copy_graph.add_edge(left_node, node_name, distance=distance_left_node)
copy_graph.add_edge(node_name, right_node, distance=distance_right_node)
distance_to_bin_centers = [
nx.shortest_path_length(
copy_graph, source=bin_center, target=node_name, weight="distance"
)
for bin_center in place_bin_center_node_ids
]
copy_graph.remove_node(node_name)
return distance_to_bin_centers
def _find_adjacent_nodes(nodes_df, linear_position):
# Find the index of the nodes to insert between
right_bin_ind = np.searchsorted(
nodes_df.linear_position.values, linear_position, side="right"
)
left_bin_ind = right_bin_ind - 1
# Fix indices that are exactly the maximum bin edge
n_bins = nodes_df.linear_position.shape[0]
right_bin_ind[right_bin_ind >= n_bins] -= 1
# Fix for ones that fall into invalid track positions
# Note: Need to test on different binnings
# Another solution is to find the edge this falls on directly.
not_same_edge = (
nodes_df.edge_id.values[left_bin_ind] != nodes_df.edge_id.values[right_bin_ind]
)
right_bin_ind[not_same_edge] = right_bin_ind[not_same_edge] - 1
left_bin_ind[not_same_edge] = left_bin_ind[not_same_edge] - 1
# Get adjacent node names and distance
left_node = nodes_df.reset_index().node_id.values[left_bin_ind]
right_node = nodes_df.reset_index().node_id.values[right_bin_ind]
distance_left_node = np.abs(
nodes_df.loc[left_node].linear_position.values - linear_position
)
distance_right_node = np.abs(
nodes_df.loc[right_node].linear_position.values - linear_position
)
return left_node, right_node, distance_left_node, distance_right_node
def get_distance_to_bin_centers(linear_position, decoder, npartitions=100):
copy_graph = copy.deepcopy(decoder.track_graph_with_bin_centers_edges_)
linear_position = linear_position.squeeze()
nodes_df = decoder.nodes_df_.set_index("node_id")
place_bin_center_node_ids = (
nodes_df.loc[~nodes_df.is_bin_edge].reset_index().node_id.values
)
(
left_node,
right_node,
distance_left_node,
distance_right_node,
) = _find_adjacent_nodes(nodes_df, linear_position)
left_node = db.from_sequence(left_node, npartitions=npartitions)
right_node = db.from_sequence(right_node, npartitions=npartitions)
distance_left_node = db.from_sequence(distance_left_node, npartitions=npartitions)
distance_right_node = db.from_sequence(distance_right_node, npartitions=npartitions)
time_ind = db.from_sequence(
range(linear_position.shape[0]), npartitions=npartitions
)
return np.asarray(
left_node.map(
_distance_to_bin_centers,
right_node,
distance_left_node,
distance_right_node,
time_ind,
copy_graph,
place_bin_center_node_ids,
).compute()
) | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/multiunit_likelihood_track_graph.py | 0.894513 | 0.500122 | multiunit_likelihood_track_graph.py | pypi |
from __future__ import annotations
from typing import Optional, Union
import numpy as np
from tqdm.autonotebook import tqdm
from replay_trajectory_classification.core import atleast_2d
from replay_trajectory_classification.likelihoods.diffusion import (
diffuse_each_bin,
estimate_diffusion_position_density,
estimate_diffusion_position_distance,
)
def gaussian_pdf(x: np.ndarray, mean: np.ndarray, sigma: np.ndarray) -> np.ndarray:
"""Compute the value of a Gaussian probability density function at x with
given mean and sigma."""
return np.exp(-0.5 * ((x - mean) / sigma) ** 2) / (sigma * np.sqrt(2.0 * np.pi))
def estimate_position_distance(
place_bin_centers: np.ndarray, positions: np.ndarray, position_std: np.ndarray
) -> np.ndarray:
"""Estimates the Euclidean distance between positions and position bins.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : array_like, shape (n_position_dims,)
Returns
-------
position_distance : np.ndarray, shape (n_time, n_position_bins)
"""
n_time, n_position_dims = positions.shape
n_position_bins = place_bin_centers.shape[0]
if isinstance(position_std, (int, float)):
position_std = [position_std] * n_position_dims
position_distance = np.ones((n_time, n_position_bins), dtype=np.float32)
for position_ind, std in enumerate(position_std):
position_distance *= gaussian_pdf(
np.expand_dims(place_bin_centers[:, position_ind], axis=0),
np.expand_dims(positions[:, position_ind], axis=1),
std,
)
return position_distance
def estimate_position_density(
place_bin_centers: np.ndarray,
positions: np.ndarray,
position_std: Union[np.ndarray, float],
block_size: int = 100,
) -> np.ndarray:
"""Estimates a kernel density estimate over position bins using
Euclidean distances.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Returns
-------
position_density : np.ndarray, shape (n_position_bins,)
"""
n_time = positions.shape[0]
n_position_bins = place_bin_centers.shape[0]
if block_size is None:
block_size = n_time
position_density = np.empty((n_position_bins,))
for start_ind in range(0, n_position_bins, block_size):
block_inds = slice(start_ind, start_ind + block_size)
position_density[block_inds] = np.mean(
estimate_position_distance(
place_bin_centers[block_inds], positions, position_std
),
axis=0,
)
return position_density
def estimate_log_intensity(
density: np.ndarray, occupancy: np.ndarray, mean_rate: float
) -> np.ndarray:
"""Calculates intensity in log space."""
return np.log(mean_rate) + np.log(density) - np.log(occupancy)
def estimate_intensity(density, occupancy, mean_rate):
"""Calculates intensity.
Parameters
----------
density : np.ndarray, shape (n_bins,)
occupancy : np.ndarray, shape (n_bins,)
mean_rate : float
Returns
-------
intensity : np.ndarray, shape (n_bins,)
"""
return np.exp(estimate_log_intensity(density, occupancy, mean_rate))
def normal_pdf_integer_lookup(
x: int, mean: int, std: float = 20.0, max_value: int = 6000
) -> int:
"""Fast density evaluation for integers by precomputing a hash table of
values.
Parameters
----------
x : int
mean : int
std : float
max_value : int
Returns
-------
probability_density : int
"""
normal_density = gaussian_pdf(np.arange(-max_value, max_value), 0, std).astype(
np.float32
)
return normal_density[(x - mean) + max_value]
def estimate_log_joint_mark_intensity(
decoding_marks: np.ndarray,
encoding_marks: np.ndarray,
mark_std: Union[np.ndarray, float],
occupancy: np.ndarray,
mean_rate: float,
place_bin_centers: Optional[np.ndarray] = None,
encoding_positions: Optional[np.ndarray] = None,
position_std: Optional[np.ndarray] = None,
max_mark_diff: int = 6000,
set_diag_zero: bool = False,
position_distance: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Finds the joint intensity of the marks and positions in log space.
Parameters
----------
decoding_marks : np.ndarray, shape (n_decoding_spikes, n_features)
encoding_marks : np.ndarray, shape (n_encoding_spikes, n_features)
mark_std : float or np.ndarray, shape (n_features,)
occupancy : np.ndarray, shape (n_position_bins,)
mean_rate : float
place_bin_centers : None or np.ndarray, shape (n_position_bins, n_position_dims)
If None, position distance must be not None
encoding_positions : None or np.ndarray, shape (n_decoding_spikes, n_position_dims)
If None, position distance must be not None
position_std : None or float or array_like, shape (n_position_dims,)
If None, position distance must be not None
max_mark_diff : int
Maximum distance between integer marks.
set_diag_zero : bool
position_distance : np.ndarray, shape (n_encoding_spikes, n_position_bins)
Precalculated distance between position and position bins.
Returns
-------
log_joint_mark_intensity : np.ndarray, shape (n_decoding_spikes, n_position_bins)
"""
n_encoding_spikes, n_marks = encoding_marks.shape
n_decoding_spikes = decoding_marks.shape[0]
mark_distance = np.ones((n_decoding_spikes, n_encoding_spikes), dtype=np.float32)
for mark_ind in range(n_marks):
mark_distance *= normal_pdf_integer_lookup(
np.expand_dims(decoding_marks[:, mark_ind], axis=1),
np.expand_dims(encoding_marks[:, mark_ind], axis=0),
std=mark_std,
max_value=max_mark_diff,
)
if set_diag_zero:
diag_ind = (np.arange(n_decoding_spikes), np.arange(n_decoding_spikes))
mark_distance[diag_ind] = 0.0
if position_distance is None:
position_distance = estimate_position_distance(
place_bin_centers, encoding_positions, position_std
).astype(np.float32)
return estimate_log_intensity(
mark_distance @ position_distance / n_encoding_spikes, occupancy, mean_rate
)
def fit_multiunit_likelihood_integer(
position: np.ndarray,
multiunits: np.ndarray,
place_bin_centers: np.ndarray,
mark_std: np.ndarray,
position_std: Union[np.ndarray, float],
is_track_boundary: Optional[np.ndarray] = None,
is_track_interior: Optional[np.ndarray] = None,
edges: Optional[list[np.ndarray]] = None,
block_size: int = 100,
use_diffusion: bool = False,
**kwargs,
) -> dict:
"""Fits the clusterless place field model.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
multiunits : np.ndarray, shape (n_time, n_marks, n_electrodes)
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
mark_std : float
Amount of smoothing for the mark features. Standard deviation of kernel.
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
is_track_boundary : None or np.ndarray, shape (n_bins,)
is_track_interior : None or np.ndarray, shape (n_bins,)
edges : None or list of np.ndarray
block_size : int
Size of data to process in chunks
use_diffusion : bool
Use diffusion to respect the track geometry.
Returns
-------
encoding_model : dict
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
position = atleast_2d(position)
place_bin_centers = atleast_2d(place_bin_centers)
interior_place_bin_centers = np.asarray(
place_bin_centers[is_track_interior.ravel(order="F")], dtype=np.float32
)
gpu_is_track_interior = np.asarray(is_track_interior.ravel(order="F"))
not_nan_position = np.all(~np.isnan(position), axis=1)
if use_diffusion & (position.shape[1] > 1):
n_total_bins = np.prod(is_track_interior.shape)
bin_diffusion_distances = diffuse_each_bin(
is_track_interior,
is_track_boundary,
dx=edges[0][1] - edges[0][0],
dy=edges[1][1] - edges[1][0],
std=position_std,
).reshape((n_total_bins, -1), order="F")
else:
bin_diffusion_distances = None
if use_diffusion & (position.shape[1] > 1):
occupancy = np.asarray(
estimate_diffusion_position_density(
position[not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=np.float32,
)
else:
occupancy = np.zeros((place_bin_centers.shape[0],), dtype=np.float32)
occupancy[gpu_is_track_interior] = estimate_position_density(
interior_place_bin_centers,
np.asarray(position[not_nan_position], dtype=np.float32),
position_std,
block_size=block_size,
)
mean_rates = []
summed_ground_process_intensity = np.zeros(
(place_bin_centers.shape[0],), dtype=np.float32
)
encoding_marks = []
encoding_positions = []
for multiunit in np.moveaxis(multiunits, -1, 0):
# ground process intensity
is_spike = np.any(~np.isnan(multiunit), axis=1)
mean_rates.append(is_spike.mean())
if is_spike.sum() > 0:
if use_diffusion & (position.shape[1] > 1):
marginal_density = np.asarray(
estimate_diffusion_position_density(
position[is_spike & not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=np.float32,
)
else:
marginal_density = np.zeros(
(place_bin_centers.shape[0],), dtype=np.float32
)
marginal_density[gpu_is_track_interior] = estimate_position_density(
interior_place_bin_centers,
np.asarray(position[is_spike & not_nan_position], dtype=np.float32),
position_std,
block_size=block_size,
)
summed_ground_process_intensity += estimate_intensity(
marginal_density, occupancy, mean_rates[-1]
)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
encoding_marks.append(
np.asarray(
multiunit[np.ix_(is_spike & not_nan_position, is_mark_features)],
dtype=np.int16,
)
)
encoding_positions.append(position[is_spike & not_nan_position])
summed_ground_process_intensity = summed_ground_process_intensity + np.spacing(1)
return {
"encoding_marks": encoding_marks,
"encoding_positions": encoding_positions,
"summed_ground_process_intensity": summed_ground_process_intensity,
"occupancy": occupancy,
"mean_rates": mean_rates,
"mark_std": mark_std,
"position_std": position_std,
"block_size": block_size,
"bin_diffusion_distances": bin_diffusion_distances,
"use_diffusion": use_diffusion,
"edges": edges,
**kwargs,
}
def estimate_multiunit_likelihood_integer(
multiunits: np.ndarray,
encoding_marks: np.ndarray,
mark_std: np.ndarray,
place_bin_centers: np.ndarray,
encoding_positions: np.ndarray,
position_std: np.ndarray,
occupancy: np.ndarray,
mean_rates: np.ndarray,
summed_ground_process_intensity: np.ndarray,
bin_diffusion_distances: np.ndarray,
edges: list[np.ndarray],
max_mark_diff: int = 6000,
set_diag_zero: bool = False,
is_track_interior: Optional[np.ndarray] = None,
time_bin_size: int = 1,
block_size: int = 100,
ignore_no_spike: bool = False,
disable_progress_bar: bool = False,
use_diffusion: bool = False,
) -> np.ndarray:
"""Estimates the likelihood of position bins given multiunit marks.
Parameters
----------
multiunits : np.ndarray, shape (n_decoding_time, n_marks, n_electrodes)
encoding_marks : np.ndarray, shape (n_encoding_spikes, n_marks, n_electrodes)
mark_std : float
Amount of smoothing for mark features. Standard deviation of kernel.
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
encoding_positions : np.ndarray, shape (n_encoding_spikes, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
occupancy : np.ndarray, (n_bins,)
mean_rates : list, len (n_electrodes,)
summed_ground_process_intensity : np.ndarray, shape (n_bins,)
bin_diffusion_distances : np.ndarray, shape (n_bins, n_bins)
edges : list of np.ndarray
max_mark_diff : int
Maximum difference between mark features
set_diag_zero : bool
Remove influence of the same mark in encoding and decoding.
is_track_interior : None or np.ndarray, shape (n_bins_x, n_bins_y)
time_bin_size : float
Size of time steps
block_size : int
Size of data to process in chunks
ignore_no_spike : bool
Set contribution of no spikes to zero
disable_progress_bar : bool
If False, a progress bar will be displayed.
use_diffusion : bool
Respect track geometry by using diffusion distances
Returns
-------
log_likelihood : np.ndarray, shape (n_time, n_bins)
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
else:
is_track_interior = is_track_interior.ravel(order="F")
n_time = multiunits.shape[0]
if ignore_no_spike:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.zeros((n_time, 1), dtype=np.float32)
)
else:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.ones((n_time, 1), dtype=np.float32)
)
multiunits = np.moveaxis(multiunits, -1, 0)
n_position_bins = is_track_interior.sum()
interior_place_bin_centers = np.asarray(
place_bin_centers[is_track_interior], dtype=np.float32
)
gpu_is_track_interior = np.asarray(is_track_interior)
interior_occupancy = occupancy[gpu_is_track_interior]
for multiunit, enc_marks, enc_pos, mean_rate in zip(
tqdm(multiunits, desc="n_electrodes", disable=disable_progress_bar),
encoding_marks,
encoding_positions,
mean_rates,
):
is_spike = np.any(~np.isnan(multiunit), axis=1)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
decoding_marks = np.asarray(
multiunit[np.ix_(is_spike, is_mark_features)], dtype=np.int16
)
n_decoding_marks = decoding_marks.shape[0]
log_joint_mark_intensity = np.zeros(
(n_decoding_marks, n_position_bins), dtype=np.float32
)
if block_size is None:
block_size = n_decoding_marks
if use_diffusion & (place_bin_centers.shape[1] > 1):
position_distance = np.asarray(
estimate_diffusion_position_distance(
enc_pos,
edges,
bin_distances=bin_diffusion_distances,
)[:, is_track_interior],
dtype=np.float32,
)
else:
position_distance = estimate_position_distance(
interior_place_bin_centers,
np.asarray(enc_pos, dtype=np.float32),
position_std,
).astype(np.float32)
for start_ind in range(0, n_decoding_marks, block_size):
block_inds = slice(start_ind, start_ind + block_size)
log_joint_mark_intensity[block_inds] = estimate_log_joint_mark_intensity(
decoding_marks[block_inds],
enc_marks,
mark_std,
interior_occupancy,
mean_rate,
max_mark_diff=max_mark_diff,
set_diag_zero=set_diag_zero,
position_distance=position_distance,
)
log_likelihood[np.ix_(is_spike, is_track_interior)] += np.nan_to_num(
log_joint_mark_intensity
)
log_likelihood[:, ~is_track_interior] = np.nan
return log_likelihood | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/multiunit_likelihood_integer.py | 0.979146 | 0.670622 | multiunit_likelihood_integer.py | pypi |
features of the spike waveform using GPUs. Features are float32."""
from __future__ import annotations
from typing import Optional, Union
import numpy as np
from tqdm.autonotebook import tqdm
from replay_trajectory_classification.core import atleast_2d
from replay_trajectory_classification.likelihoods.diffusion import (
diffuse_each_bin,
estimate_diffusion_position_density,
estimate_diffusion_position_distance,
)
try:
import cupy as cp
@cp.fuse
def gaussian_pdf(x: cp.ndarray, mean: cp.ndarray, sigma: cp.ndarray) -> cp.ndarray:
"""Compute the value of a Gaussian probability density function at x with
given mean and sigma."""
return cp.exp(-0.5 * ((x - mean) / sigma) ** 2) / (sigma * cp.sqrt(2.0 * cp.pi))
def estimate_position_distance(
place_bin_centers: cp.ndarray,
positions: cp.ndarray,
position_std: Union[float, cp.ndarray],
) -> cp.ndarray:
"""Estimates the Euclidean distance between positions and position bins.
Parameters
----------
place_bin_centers : cp.ndarray, shape (n_position_bins, n_position_dims)
positions : cp.ndarray, shape (n_time, n_position_dims)
position_std : cp.ndarray, shape (n_position_dims,)
Returns
-------
position_distance : cp.ndarray, shape (n_time, n_position_bins)
"""
n_time, n_position_dims = positions.shape
n_position_bins = place_bin_centers.shape[0]
if isinstance(position_std, (int, float)):
position_std = [position_std] * n_position_dims
position_distance = cp.ones((n_time, n_position_bins), dtype=cp.float32)
for position_ind, std in enumerate(position_std):
position_distance *= gaussian_pdf(
cp.expand_dims(place_bin_centers[:, position_ind], axis=0),
cp.expand_dims(positions[:, position_ind], axis=1),
std,
)
return position_distance
def estimate_position_density(
place_bin_centers: cp.ndarray,
positions: cp.ndarray,
position_std: Union[float, cp.ndarray],
block_size: int = 100,
) -> cp.ndarray:
"""Estimates a kernel density estimate over position bins using
Euclidean distances.
Parameters
----------
place_bin_centers : cp.ndarray, shape (n_position_bins, n_position_dims)
positions : cp.ndarray, shape (n_time, n_position_dims)
position_std : float or cp.ndarray, shape (n_position_dims,)
Returns
-------
position_density : cp.ndarray, shape (n_position_bins,)
"""
n_time = positions.shape[0]
n_position_bins = place_bin_centers.shape[0]
if block_size is None:
block_size = n_time
position_density = cp.empty((n_position_bins,))
for start_ind in range(0, n_position_bins, block_size):
block_inds = slice(start_ind, start_ind + block_size)
position_density[block_inds] = cp.mean(
estimate_position_distance(
place_bin_centers[block_inds], positions, position_std
),
axis=0,
)
return position_density
def estimate_log_intensity(
density: cp.ndarray, occupancy: cp.ndarray, mean_rate: float
) -> cp.ndarray:
"""Calculates intensity in log space."""
return cp.log(mean_rate) + cp.log(density) - cp.log(occupancy)
def estimate_intensity(
density: cp.ndarray, occupancy: cp.ndarray, mean_rate: float
) -> cp.ndarray:
"""Calculates intensity.
Parameters
----------
density : cp.ndarray, shape (n_bins,)
occupancy : cp.ndarray, shape (n_bins,)
mean_rate : float
Returns
-------
intensity : cp.ndarray, shape (n_bins,)
"""
return cp.exp(estimate_log_intensity(density, occupancy, mean_rate))
def estimate_log_joint_mark_intensity(
decoding_marks: cp.ndarray,
encoding_marks: cp.ndarray,
mark_std: Union[float, cp.ndarray],
occupancy: cp.ndarray,
mean_rate: float,
place_bin_centers: Optional[cp.ndarray] = None,
encoding_positions: Optional[cp.ndarray] = None,
position_std: Union[float, cp.ndarray, None] = None,
max_mark_diff: int = 6000,
set_diag_zero: bool = False,
position_distance: Optional[cp.ndarray] = None,
) -> np.ndarray:
"""Finds the joint intensity of the marks and positions in log space.
Parameters
----------
decoding_marks : np.ndarray, shape (n_decoding_spikes, n_features)
encoding_marks : np.ndarray, shape (n_encoding_spikes, n_features)
mark_std : float or np.ndarray, shape (n_features,)
occupancy : np.ndarray, shape (n_position_bins,)
mean_rate : float
place_bin_centers : None or np.ndarray, shape (n_position_bins, n_position_dims)
If None, position distance must be not None
encoding_positions : None or np.ndarray, shape (n_decoding_spikes, n_position_dims)
If None, position distance must be not None
position_std : None or float or array_like, shape (n_position_dims,)
If None, position distance must be not None
max_mark_diff : int
Maximum distance between integer marks.
set_diag_zero : bool
position_distance : np.ndarray, shape (n_encoding_spikes, n_position_bins)
Precalculated distance between position and position bins.
Returns
-------
log_joint_mark_intensity : np.ndarray, shape (n_decoding_spikes, n_position_bins)
"""
n_encoding_spikes, n_marks = encoding_marks.shape
n_decoding_spikes = decoding_marks.shape[0]
mark_distance = cp.ones(
(n_decoding_spikes, n_encoding_spikes), dtype=cp.float32
)
for mark_ind in range(n_marks):
mark_distance *= gaussian_pdf(
cp.expand_dims(decoding_marks[:, mark_ind], axis=1),
cp.expand_dims(encoding_marks[:, mark_ind], axis=0),
mark_std[mark_ind],
)
if set_diag_zero:
diag_ind = (cp.arange(n_decoding_spikes), cp.arange(n_decoding_spikes))
mark_distance[diag_ind] = 0.0
if position_distance is None:
position_distance = estimate_position_distance(
place_bin_centers, encoding_positions, position_std
).astype(cp.float32)
return cp.asnumpy(
estimate_log_intensity(
mark_distance @ position_distance / n_encoding_spikes,
occupancy,
mean_rate,
)
)
def fit_multiunit_likelihood_gpu(
position: np.ndarray,
multiunits: np.ndarray,
place_bin_centers: np.ndarray,
mark_std: Union[float, np.ndarray],
position_std: Union[float, np.ndarray],
is_track_boundary: Optional[np.ndarray] = None,
is_track_interior: Optional[np.ndarray] = None,
edges: Optional[list[np.ndarray]] = None,
block_size: int = 100,
use_diffusion: bool = False,
**kwargs,
) -> dict[
cp.ndarray,
cp.ndarray,
cp.ndarray,
cp.ndarray,
cp.ndarray,
np.ndarray,
np.ndarray,
int,
np.ndarray,
list[np.ndarray],
]:
"""Fits the clusterless place field model.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
multiunits : np.ndarray, shape (n_time, n_marks, n_electrodes)
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
mark_std : float or np.ndarray, shape (n_marks,)
Amount of smoothing for the mark features. Standard deviation of kernel.
position_std : float or np.ndarray, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
is_track_boundary : None or np.ndarray, shape (n_bins,)
is_track_interior : None or np.ndarray, shape (n_bins,)
edges : None or list of np.ndarray
block_size : int
Size of data to process in chunks
use_diffusion : bool
Use diffusion to respect the track geometry.
Returns
-------
encoding_model : dict
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
position = atleast_2d(position)
place_bin_centers = atleast_2d(place_bin_centers)
interior_place_bin_centers = cp.asarray(
place_bin_centers[is_track_interior.ravel(order="F")], dtype=cp.float32
)
gpu_is_track_interior = cp.asarray(is_track_interior.ravel(order="F"))
not_nan_position = np.all(~np.isnan(position), axis=1)
if use_diffusion & (position.shape[1] > 1):
n_total_bins = np.prod(is_track_interior.shape)
bin_diffusion_distances = diffuse_each_bin(
is_track_interior,
is_track_boundary,
dx=edges[0][1] - edges[0][0],
dy=edges[1][1] - edges[1][0],
std=position_std,
).reshape((n_total_bins, -1), order="F")
else:
bin_diffusion_distances = None
if use_diffusion & (position.shape[1] > 1):
occupancy = cp.asarray(
estimate_diffusion_position_density(
position[not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=cp.float32,
)
else:
occupancy = cp.zeros((place_bin_centers.shape[0],), dtype=cp.float32)
occupancy[gpu_is_track_interior] = estimate_position_density(
interior_place_bin_centers,
cp.asarray(position[not_nan_position], dtype=cp.float32),
position_std,
block_size=block_size,
)
mean_rates = []
summed_ground_process_intensity = cp.zeros(
(place_bin_centers.shape[0],), dtype=cp.float32
)
encoding_marks = []
encoding_positions = []
n_marks = multiunits.shape[1]
if isinstance(mark_std, (int, float)):
mark_std = np.asarray([mark_std] * n_marks)
else:
mark_std = np.asarray(mark_std)
for multiunit in np.moveaxis(multiunits, -1, 0):
# ground process intensity
is_spike = np.any(~np.isnan(multiunit), axis=1)
mean_rates.append(is_spike.mean())
if is_spike.sum() > 0:
if use_diffusion & (position.shape[1] > 1):
marginal_density = cp.asarray(
estimate_diffusion_position_density(
position[is_spike & not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=cp.float32,
)
else:
marginal_density = cp.zeros(
(place_bin_centers.shape[0],), dtype=cp.float32
)
marginal_density[gpu_is_track_interior] = estimate_position_density(
interior_place_bin_centers,
cp.asarray(
position[is_spike & not_nan_position], dtype=cp.float32
),
position_std,
block_size=block_size,
)
summed_ground_process_intensity += estimate_intensity(
marginal_density, occupancy, mean_rates[-1]
)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
encoding_marks.append(
cp.asarray(
multiunit[np.ix_(is_spike & not_nan_position, is_mark_features)],
dtype=cp.float32,
)
)
encoding_positions.append(position[is_spike & not_nan_position])
summed_ground_process_intensity = cp.asnumpy(
summed_ground_process_intensity
) + np.spacing(1)
return {
"encoding_marks": encoding_marks,
"encoding_positions": encoding_positions,
"summed_ground_process_intensity": summed_ground_process_intensity,
"occupancy": occupancy,
"mean_rates": mean_rates,
"mark_std": mark_std,
"position_std": position_std,
"block_size": block_size,
"bin_diffusion_distances": bin_diffusion_distances,
"use_diffusion": use_diffusion,
"edges": edges,
**kwargs,
}
def estimate_multiunit_likelihood_gpu(
multiunits: np.ndarray,
encoding_marks: cp.ndarray,
mark_std: np.ndarray,
place_bin_centers: np.ndarray,
encoding_positions: cp.ndarray,
position_std: np.ndarray,
occupancy: cp.ndarray,
mean_rates: cp.ndarray,
summed_ground_process_intensity: np.ndarray,
bin_diffusion_distances: np.ndarray,
edges: list[np.ndarray],
max_mark_diff: int = 6000,
set_diag_zero: bool = False,
is_track_interior: Optional[np.ndarray] = None,
time_bin_size: int = 1,
block_size: int = 100,
ignore_no_spike: bool = False,
disable_progress_bar: bool = False,
use_diffusion: bool = False,
) -> np.ndarray:
"""Estimates the likelihood of position bins given multiunit marks.
Parameters
----------
multiunits : np.ndarray, shape (n_decoding_time, n_marks, n_electrodes)
encoding_marks : cp.ndarray, shape (n_encoding_spikes, n_marks, n_electrodes)
mark_std : list, shape (n_marks,)
Amount of smoothing for mark features
place_bin_centers : cp.ndarray, shape (n_bins, n_position_dims)
encoding_positions : cp.ndarray, shape (n_encoding_spikes, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position
occupancy : cp.ndarray, (n_bins,)
mean_rates : list, len (n_electrodes,)
summed_ground_process_intensity : np.ndarray, shape (n_bins,)
bin_diffusion_distances : np.ndarray, shape (n_bins, n_bins)
edges : list of np.ndarray
max_mark_diff : int
Maximum difference between mark features
set_diag_zero : bool
Remove influence of the same mark in encoding and decoding.
is_track_interior : None or np.ndarray, shape (n_bins_x, n_bins_y)
time_bin_size : float
Size of time steps
block_size : int
Size of data to process in chunks
ignore_no_spike : bool
Set contribution of no spikes to zero
disable_progress_bar : bool
If False, a progress bar will be displayed.
use_diffusion : bool
Respect track geometry by using diffusion distances
Returns
-------
log_likelihood : (n_time, n_bins)
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
else:
is_track_interior = is_track_interior.ravel(order="F")
n_time = multiunits.shape[0]
if ignore_no_spike:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.zeros((n_time, 1), dtype=np.float32)
)
else:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.ones((n_time, 1), dtype=np.float32)
)
multiunits = np.moveaxis(multiunits, -1, 0)
n_position_bins = is_track_interior.sum()
interior_place_bin_centers = cp.asarray(
place_bin_centers[is_track_interior], dtype=cp.float32
)
gpu_is_track_interior = cp.asarray(is_track_interior)
interior_occupancy = occupancy[gpu_is_track_interior]
for multiunit, enc_marks, enc_pos, mean_rate in zip(
tqdm(multiunits, desc="n_electrodes", disable=disable_progress_bar),
encoding_marks,
encoding_positions,
mean_rates,
):
is_spike = np.any(~np.isnan(multiunit), axis=1)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
decoding_marks = cp.asarray(
multiunit[np.ix_(is_spike, is_mark_features)], dtype=cp.float32
)
n_decoding_marks = decoding_marks.shape[0]
log_joint_mark_intensity = np.zeros(
(n_decoding_marks, n_position_bins), dtype=np.float32
)
if block_size is None:
block_size = n_decoding_marks
if use_diffusion & (place_bin_centers.shape[1] > 1):
position_distance = cp.asarray(
estimate_diffusion_position_distance(
enc_pos,
edges,
bin_distances=bin_diffusion_distances,
)[:, is_track_interior],
dtype=cp.float32,
)
else:
position_distance = estimate_position_distance(
interior_place_bin_centers,
cp.asarray(enc_pos, dtype=cp.float32),
position_std,
).astype(cp.float32)
for start_ind in range(0, n_decoding_marks, block_size):
block_inds = slice(start_ind, start_ind + block_size)
log_joint_mark_intensity[
block_inds
] = estimate_log_joint_mark_intensity(
decoding_marks[block_inds],
enc_marks,
mark_std[is_mark_features],
interior_occupancy,
mean_rate,
max_mark_diff=max_mark_diff,
set_diag_zero=set_diag_zero,
position_distance=position_distance,
)
log_likelihood[np.ix_(is_spike, is_track_interior)] += np.nan_to_num(
log_joint_mark_intensity
)
mempool = cp.get_default_memory_pool()
mempool.free_all_blocks()
log_likelihood[:, ~is_track_interior] = np.nan
return log_likelihood
except ImportError:
def estimate_multiunit_likelihood_gpu(*args, **kwargs):
print("Cupy is not installed or no GPU detected...")
def fit_multiunit_likelihood_gpu(*args, **kwargs):
print("Cupy is not installed or no GPU detected...") | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/multiunit_likelihood_gpu.py | 0.982143 | 0.707998 | multiunit_likelihood_gpu.py | pypi |
from __future__ import annotations
from typing import Optional, Union
import numpy as np
import pandas as pd
import xarray as xr
from scipy.special import xlogy
from tqdm.auto import tqdm
from replay_trajectory_classification.core import atleast_2d
from replay_trajectory_classification.likelihoods.diffusion import (
diffuse_each_bin,
estimate_diffusion_position_density,
)
def gaussian_pdf(x: np.ndarray, mean: np.ndarray, sigma: np.ndarray) -> np.ndarray:
"""Compute the value of a Gaussian probability density function at x with
given mean and sigma."""
return np.exp(-0.5 * ((x - mean) / sigma) ** 2) / (sigma * np.sqrt(2.0 * np.pi))
def estimate_position_distance(
place_bin_centers: np.ndarray, positions: np.ndarray, position_std: np.ndarray
) -> np.ndarray:
"""Estimates the Euclidean distance between positions and position bins.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : array_like, shape (n_position_dims,)
Returns
-------
position_distance : np.ndarray, shape (n_time, n_position_bins)
"""
n_time, n_position_dims = positions.shape
n_position_bins = place_bin_centers.shape[0]
position_distance = np.ones((n_time, n_position_bins), dtype=np.float32)
if isinstance(position_std, (int, float)):
position_std = [position_std] * n_position_dims
for position_ind, std in enumerate(position_std):
position_distance *= gaussian_pdf(
np.expand_dims(place_bin_centers[:, position_ind], axis=0),
np.expand_dims(positions[:, position_ind], axis=1),
std,
)
return position_distance
def estimate_position_density(
place_bin_centers: np.ndarray,
positions: np.ndarray,
position_std: Union[float, np.ndarray],
block_size: int = 100,
) -> np.ndarray:
"""Estimates a kernel density estimate over position bins using
Euclidean distances.
Parameters
----------
place_bin_centers : np.ndarray, shape (n_position_bins, n_position_dims)
positions : np.ndarray, shape (n_time, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
block_size : int
Returns
-------
position_density : np.ndarray, shape (n_position_bins,)
"""
n_time = positions.shape[0]
n_position_bins = place_bin_centers.shape[0]
if block_size is None:
block_size = n_time
position_density = np.empty((n_position_bins,))
for start_ind in range(0, n_position_bins, block_size):
block_inds = slice(start_ind, start_ind + block_size)
position_density[block_inds] = np.mean(
estimate_position_distance(
place_bin_centers[block_inds], positions, position_std
),
axis=0,
)
return position_density
def get_firing_rate(
is_spike: np.ndarray,
position: np.ndarray,
place_bin_centers: np.ndarray,
is_track_interior: np.ndarray,
not_nan_position: np.ndarray,
occupancy: np.ndarray,
position_std: np.ndarray,
block_size: Optional[int] = None,
) -> np.ndarray:
if is_spike.sum() > 0:
mean_rate = is_spike.mean()
marginal_density = np.zeros((place_bin_centers.shape[0],), dtype=np.float32)
marginal_density[is_track_interior] = estimate_position_density(
place_bin_centers[is_track_interior],
np.asarray(position[is_spike & not_nan_position], dtype=np.float32),
position_std,
block_size=block_size,
)
return np.exp(np.log(mean_rate) + np.log(marginal_density) - np.log(occupancy))
else:
return np.zeros_like(occupancy)
def get_diffusion_firing_rate(
is_spike: np.ndarray,
position: np.ndarray,
edges: list[np.ndarray],
bin_diffusion_distances: np.ndarray,
occupancy: np.ndarray,
not_nan_position: np.ndarray,
) -> np.ndarray:
if is_spike.sum() > 0:
mean_rate = is_spike.mean()
marginal_density = estimate_diffusion_position_density(
position[is_spike & not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
).astype(np.float32)
return np.exp(np.log(mean_rate) + np.log(marginal_density) - np.log(occupancy))
else:
return np.zeros_like(occupancy)
def estimate_place_fields_kde(
position: np.ndarray,
spikes: np.ndarray,
place_bin_centers: np.ndarray,
position_std: np.ndarray,
is_track_boundary: Optional[np.ndarray] = None,
is_track_interior: Optional[np.ndarray] = None,
edges: Optional[list[np.ndarray]] = None,
place_bin_edges: Optional[np.ndarray] = None,
use_diffusion: bool = False,
block_size: Optional[int] = None,
) -> xr.DataArray:
"""Gives the conditional intensity of the neurons' spiking with respect to
position.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
spikes : np.ndarray, shape (n_time,)
Indicator of spike or no spike at current time.
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
is_track_boundary : None or np.ndarray, shape (n_bins,)
is_track_interior : None or np.ndarray, shape (n_bins,)
edges : None or list of np.ndarray
place_bin_edges : np.ndarray, shape (n_bins + 1, n_position_dims)
use_diffusion : bool
Respect track geometry by using diffusion distances
block_size : int
Size of data to process in chunks
Returns
-------
conditional_intensity : np.ndarray, shape (n_bins, n_neurons)
"""
position = atleast_2d(position).astype(np.float32)
place_bin_centers = atleast_2d(place_bin_centers).astype(np.float32)
not_nan_position = np.all(~np.isnan(position), axis=1)
if use_diffusion & (position.shape[1] > 1):
n_total_bins = np.prod(is_track_interior.shape)
bin_diffusion_distances = diffuse_each_bin(
is_track_interior,
is_track_boundary,
dx=edges[0][1] - edges[0][0],
dy=edges[1][1] - edges[1][0],
std=position_std,
).reshape((n_total_bins, -1), order="F")
occupancy = estimate_diffusion_position_density(
position[not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
).astype(np.float32)
place_fields = np.stack(
[
get_diffusion_firing_rate(
is_spike,
position,
edges,
bin_diffusion_distances,
occupancy,
not_nan_position,
)
for is_spike in np.asarray(spikes, dtype=bool).T
],
axis=1,
)
else:
occupancy = np.zeros((place_bin_centers.shape[0],), dtype=np.float32)
occupancy[is_track_interior.ravel(order="F")] = estimate_position_density(
place_bin_centers[is_track_interior.ravel(order="F")],
position[not_nan_position],
position_std,
block_size=block_size,
)
place_fields = np.stack(
[
get_firing_rate(
is_spike,
position,
place_bin_centers,
is_track_interior.ravel(order="F"),
not_nan_position,
occupancy,
position_std,
)
for is_spike in np.asarray(spikes, dtype=bool).T
],
axis=1,
)
DIMS = ["position", "neuron"]
if position.shape[1] == 1:
names = ["position"]
coords = {"position": place_bin_centers.squeeze()}
elif position.shape[1] == 2:
names = ["x_position", "y_position"]
coords = {
"position": pd.MultiIndex.from_arrays(
place_bin_centers.T.tolist(), names=names
)
}
return xr.DataArray(data=place_fields, coords=coords, dims=DIMS)
def poisson_log_likelihood(
spikes: np.ndarray, conditional_intensity: np.ndarray
) -> np.ndarray:
"""Probability of parameters given spiking at a particular time.
Parameters
----------
spikes : np.ndarray, shape (n_time,)
Indicator of spike or no spike at current time.
conditional_intensity : np.ndarray, shape (n_place_bins,)
Instantaneous probability of observing a spike
Returns
-------
poisson_log_likelihood : array_like, shape (n_time, n_place_bins)
"""
return (
xlogy(spikes[:, np.newaxis], conditional_intensity[np.newaxis, :])
- conditional_intensity[np.newaxis, :]
)
def combined_likelihood(
spikes: np.ndarray, conditional_intensity: np.ndarray
) -> np.ndarray:
"""
Parameters
----------
spikes : np.ndarray, shape (n_time, n_neurons)
conditional_intensity : np.ndarray, shape (n_bins, n_neurons)
"""
n_time = spikes.shape[0]
n_bins = conditional_intensity.shape[0]
log_likelihood = np.zeros((n_time, n_bins))
conditional_intensity = np.clip(conditional_intensity, a_min=1e-15, a_max=None)
for is_spike, ci in zip(tqdm(spikes.T), conditional_intensity.T):
log_likelihood += poisson_log_likelihood(is_spike, ci)
return log_likelihood
def estimate_spiking_likelihood_kde(
spikes: np.ndarray,
conditional_intensity: np.ndarray,
is_track_interior: Optional[np.ndarray] = None,
) -> np.ndarray:
"""
Parameters
----------
spikes : np.ndarray, shape (n_time, n_neurons)
conditional_intensity : np.ndarray, shape (n_bins, n_neurons)
is_track_interior : None or np.ndarray, optional, shape (n_x_position_bins,
n_y_position_bins)
Returns
-------
likelihood : np.ndarray, shape (n_time, n_bins)
"""
spikes = np.asarray(spikes, dtype=np.float32)
if is_track_interior is not None:
is_track_interior = is_track_interior.ravel(order="F")
else:
n_bins = conditional_intensity.shape[0]
is_track_interior = np.ones((n_bins,), dtype=bool)
log_likelihood = combined_likelihood(spikes, conditional_intensity)
mask = np.ones_like(is_track_interior, dtype=float)
mask[~is_track_interior] = np.nan
return log_likelihood * mask | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/spiking_likelihood_kde.py | 0.970826 | 0.556159 | spiking_likelihood_kde.py | pypi |
# flake8: noqa
from replay_trajectory_classification.likelihoods.calcium_likelihood import (
estimate_calcium_likelihood,
estimate_calcium_place_fields,
)
from replay_trajectory_classification.likelihoods.multiunit_likelihood import (
estimate_multiunit_likelihood,
fit_multiunit_likelihood,
)
from replay_trajectory_classification.likelihoods.multiunit_likelihood_gpu import (
estimate_multiunit_likelihood_gpu,
fit_multiunit_likelihood_gpu,
)
from replay_trajectory_classification.likelihoods.multiunit_likelihood_integer import (
estimate_multiunit_likelihood_integer,
fit_multiunit_likelihood_integer,
)
from replay_trajectory_classification.likelihoods.multiunit_likelihood_integer_gpu import (
estimate_multiunit_likelihood_integer_gpu,
fit_multiunit_likelihood_integer_gpu,
)
from replay_trajectory_classification.likelihoods.spiking_likelihood_glm import (
estimate_place_fields,
estimate_spiking_likelihood,
)
from replay_trajectory_classification.likelihoods.spiking_likelihood_kde import (
estimate_place_fields_kde,
estimate_spiking_likelihood_kde,
)
from replay_trajectory_classification.likelihoods.spiking_likelihood_kde_gpu import (
estimate_place_fields_kde_gpu,
estimate_spiking_likelihood_kde_gpu,
)
from .multiunit_likelihood_integer_gpu_log import (
estimate_multiunit_likelihood_integer_gpu_log,
fit_multiunit_likelihood_integer_gpu_log,
)
_ClUSTERLESS_ALGORITHMS = {
"multiunit_likelihood": (fit_multiunit_likelihood, estimate_multiunit_likelihood),
"multiunit_likelihood_gpu": (
fit_multiunit_likelihood_gpu,
estimate_multiunit_likelihood_gpu,
),
"multiunit_likelihood_integer": (
fit_multiunit_likelihood_integer,
estimate_multiunit_likelihood_integer,
),
"multiunit_likelihood_integer_gpu": (
fit_multiunit_likelihood_integer_gpu,
estimate_multiunit_likelihood_integer_gpu,
),
"multiunit_likelihood_integer_gpu_log": (
fit_multiunit_likelihood_integer_gpu_log,
estimate_multiunit_likelihood_integer_gpu_log,
),
}
_SORTED_SPIKES_ALGORITHMS = {
"spiking_likelihood_glm": (estimate_place_fields, estimate_spiking_likelihood),
"spiking_likelihood_kde": (
estimate_place_fields_kde,
estimate_spiking_likelihood_kde,
),
"spiking_likelihood_kde_gpu": (
estimate_place_fields_kde_gpu,
estimate_spiking_likelihood_kde_gpu,
),
}
_CALCIUM_ALGORITHMS = {
"calcium_likelihood": (estimate_calcium_place_fields, estimate_calcium_likelihood)
} | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/__init__.py | 0.766731 | 0.351728 | __init__.py | pypi |
from __future__ import annotations
import math
import numpy as np
from numba import cuda
from replay_trajectory_classification.core import atleast_2d
from replay_trajectory_classification.likelihoods.diffusion import (
diffuse_each_bin,
estimate_diffusion_position_density,
estimate_diffusion_position_distance,
)
from tqdm.autonotebook import tqdm
try:
import cupy as cp
def logsumexp(a, axis):
a_max = cp.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~cp.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
return cp.log(cp.sum(cp.exp(a - a_max), axis=axis, keepdims=True)) + a_max
def log_mean(x, axis):
return cp.squeeze(logsumexp(x, axis=axis) - cp.log(x.shape[axis]))
@cp.fuse()
def log_gaussian_pdf(x, sigma):
return -cp.log(sigma) - 0.5 * cp.log(2 * cp.pi) - 0.5 * (x / sigma) ** 2
@cp.fuse()
def estimate_log_intensity(log_density, log_occupancy, log_mean_rate):
return log_mean_rate + log_density - log_occupancy
@cp.fuse()
def estimate_intensity(log_density, log_occupancy, log_mean_rate):
return cp.exp(estimate_log_intensity(log_density, log_occupancy, log_mean_rate))
def estimate_log_position_distance(place_bin_centers, positions, position_std):
"""Estimates the Euclidean distance between positions and position bins.
Parameters
----------
place_bin_centers : cp.ndarray, shape (n_position_bins, n_position_dims)
positions : cp.ndarray, shape (n_time, n_position_dims)
position_std : array_like, shape (n_position_dims,)
Returns
-------
log_position_distance : np.ndarray, shape (n_time, n_position_bins)
"""
n_time, n_position_dims = positions.shape
n_position_bins = place_bin_centers.shape[0]
if isinstance(position_std, (int, float)):
position_std = [position_std] * n_position_dims
log_position_distance = cp.zeros((n_time, n_position_bins), dtype=cp.float32)
for position_ind, std in enumerate(position_std):
log_position_distance += log_gaussian_pdf(
cp.expand_dims(place_bin_centers[:, position_ind], axis=0)
- cp.expand_dims(positions[:, position_ind], axis=1),
std,
)
return log_position_distance
def estimate_log_position_density(
place_bin_centers, positions, position_std, block_size=100
):
"""Estimates a kernel density estimate over position bins using
Euclidean distances.
Parameters
----------
place_bin_centers : cp.ndarray, shape (n_position_bins, n_position_dims)
positions : cp.ndarray, shape (n_time, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
block_size : int
Returns
-------
log_position_density : cp.ndarray, shape (n_position_bins,)
"""
n_time = positions.shape[0]
n_position_bins = place_bin_centers.shape[0]
if block_size is None:
block_size = n_time
log_position_density = cp.empty((n_position_bins,))
for start_ind in range(0, n_position_bins, block_size):
block_inds = slice(start_ind, start_ind + block_size)
log_position_density[block_inds] = log_mean(
estimate_log_position_distance(
place_bin_centers[block_inds], positions, position_std
),
axis=0,
)
return log_position_density
@cuda.jit()
def log_mean_over_bins(log_mark_distances, log_position_distances, output):
"""
Parameters
----------
log_mark_distances : cp.ndarray, shape (n_decoding_spikes, n_encoding_spikes)
log_position_distances : cp.ndarray, shape (n_encoding_spikes, n_position_bins)
Returns
-------
output : cp.ndarray, shape (n_decoding_spikes, n_position_bins)
"""
thread_id = cuda.grid(1)
n_decoding_spikes, n_position_bins = output.shape
n_encoding_spikes = log_position_distances.shape[0]
decoding_ind = thread_id // n_position_bins
pos_bin_ind = thread_id % n_position_bins
if (decoding_ind < n_decoding_spikes) and (pos_bin_ind < n_position_bins):
# find maximum
max_exp = (
log_mark_distances[decoding_ind, 0]
+ log_position_distances[0, pos_bin_ind]
)
for encoding_ind in range(1, n_encoding_spikes):
candidate_max = (
log_mark_distances[decoding_ind, encoding_ind]
+ log_position_distances[encoding_ind, pos_bin_ind]
)
if candidate_max > max_exp:
max_exp = candidate_max
# logsumexp
tmp = 0.0
for encoding_ind in range(n_encoding_spikes):
tmp += math.exp(
log_mark_distances[decoding_ind, encoding_ind]
+ log_position_distances[encoding_ind, pos_bin_ind]
- max_exp
)
output[decoding_ind, pos_bin_ind] = math.log(tmp) + max_exp
# divide by n_spikes to get the mean
output[decoding_ind, pos_bin_ind] -= math.log(n_encoding_spikes)
def estimate_log_joint_mark_intensity(
decoding_marks,
encoding_marks,
mark_std,
log_position_distances,
log_occupancy,
log_mean_rate,
max_mark_diff=6000,
set_diag_zero=False,
):
"""Finds the joint intensity of the marks and positions in log space.
Parameters
----------
decoding_marks : cp.ndarray, shape (n_decoding_spikes, n_features)
encoding_marks : cp.ndarray, shape (n_encoding_spikes, n_features)
mark_std : float
log_position_distance : cp.ndarray, shape (n_encoding_spikes, n_position_bins)
Precalculated distance between position and position bins.
log_occupancy : cp.ndarray, shape (n_position_bins,)
log_mean_rate : float
max_mark_diff : int
Maximum distance between integer marks.
set_diag_zero : bool
Returns
-------
log_joint_mark_intensity : np.ndarray, shape (n_decoding_spikes, n_position_bins)
"""
n_encoding_spikes, n_marks = encoding_marks.shape
n_decoding_spikes = decoding_marks.shape[0]
log_mark_distances = cp.zeros(
(n_decoding_spikes, n_encoding_spikes), dtype=cp.float32
)
log_normal_pdf_lookup = cp.asarray(
log_gaussian_pdf(cp.arange(-max_mark_diff, max_mark_diff), mark_std),
dtype=cp.float32,
)
for mark_ind in range(n_marks):
log_mark_distances += log_normal_pdf_lookup[
(
cp.expand_dims(decoding_marks[:, mark_ind], axis=1)
- cp.expand_dims(encoding_marks[:, mark_ind], axis=0)
)
+ max_mark_diff
]
if set_diag_zero:
diag_ind = (cp.arange(n_decoding_spikes), cp.arange(n_decoding_spikes))
log_mark_distances[diag_ind] = cp.nan_to_num(cp.log(0).astype(cp.float32))
n_position_bins = log_position_distances.shape[1]
pdf = cp.empty((n_decoding_spikes, n_position_bins), dtype=cp.float32)
log_mean_over_bins.forall(pdf.size)(
log_mark_distances, log_position_distances, pdf
)
return cp.asnumpy(estimate_log_intensity(pdf, log_occupancy, log_mean_rate))
def fit_multiunit_likelihood_integer_gpu_log(
position,
multiunits,
place_bin_centers,
mark_std,
position_std,
is_track_boundary=None,
is_track_interior=None,
edges=None,
block_size=100,
use_diffusion=False,
**kwargs,
):
"""Fits the clusterless place field model.
Parameters
----------
position : np.ndarray, shape (n_time, n_position_dims)
multiunits : np.ndarray, shape (n_time, n_marks, n_electrodes)
place_bin_centers : np.ndarray, shape (n_bins, n_position_dims)
mark_std : float
Amount of smoothing for the mark features. Standard deviation of kernel.
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position. Standard deviation of kernel.
is_track_boundary : None or np.ndarray, shape (n_bins,)
is_track_interior : None or np.ndarray, shape (n_bins,)
edges : None or list of np.ndarray
block_size : int
Size of data to process in chunks
use_diffusion : bool
Use diffusion to respect the track geometry.
Returns
-------
encoding_model : dict
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
position = atleast_2d(position)
place_bin_centers = atleast_2d(place_bin_centers)
interior_place_bin_centers = cp.asarray(
place_bin_centers[is_track_interior.ravel(order="F")], dtype=cp.float32
)
gpu_is_track_interior = cp.asarray(is_track_interior.ravel(order="F"))
not_nan_position = np.all(~np.isnan(position), axis=1)
if use_diffusion & (position.shape[1] > 1):
n_total_bins = np.prod(is_track_interior.shape)
bin_diffusion_distances = diffuse_each_bin(
is_track_interior,
is_track_boundary,
dx=edges[0][1] - edges[0][0],
dy=edges[1][1] - edges[1][0],
std=position_std,
).reshape((n_total_bins, -1), order="F")
else:
bin_diffusion_distances = None
if use_diffusion & (position.shape[1] > 1):
log_occupancy = cp.log(
cp.asarray(
estimate_diffusion_position_density(
position[not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=cp.float32,
)
)
else:
log_occupancy = cp.zeros((place_bin_centers.shape[0],), dtype=cp.float32)
log_occupancy[gpu_is_track_interior] = estimate_log_position_density(
interior_place_bin_centers,
cp.asarray(position[not_nan_position], dtype=cp.float32),
position_std,
block_size=block_size,
)
log_mean_rates = []
encoding_marks = []
encoding_positions = []
summed_ground_process_intensity = cp.zeros(
(place_bin_centers.shape[0],), dtype=cp.float32
)
for multiunit in np.moveaxis(multiunits, -1, 0):
# ground process intensity
is_spike = np.any(~np.isnan(multiunit), axis=1)
log_mean_rates.append(np.log(is_spike.mean()))
if is_spike.sum() > 0:
if use_diffusion & (position.shape[1] > 1):
log_marginal_density = cp.log(
cp.asarray(
estimate_diffusion_position_density(
position[is_spike & not_nan_position],
edges,
bin_distances=bin_diffusion_distances,
),
dtype=cp.float32,
)
)
else:
log_marginal_density = cp.zeros(
(place_bin_centers.shape[0],), dtype=cp.float32
)
log_marginal_density[
gpu_is_track_interior
] = estimate_log_position_density(
interior_place_bin_centers,
cp.asarray(
position[is_spike & not_nan_position], dtype=cp.float32
),
position_std,
block_size=block_size,
)
summed_ground_process_intensity += estimate_intensity(
log_marginal_density, log_occupancy, log_mean_rates[-1]
)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
encoding_marks.append(
cp.asarray(
multiunit[np.ix_(is_spike & not_nan_position, is_mark_features)],
dtype=cp.int16,
)
)
encoding_positions.append(position[is_spike & not_nan_position])
summed_ground_process_intensity = cp.asnumpy(
summed_ground_process_intensity
) + np.spacing(1)
return {
"encoding_marks": encoding_marks,
"encoding_positions": encoding_positions,
"summed_ground_process_intensity": summed_ground_process_intensity,
"log_occupancy": log_occupancy,
"log_mean_rates": log_mean_rates,
"mark_std": mark_std,
"position_std": position_std,
"block_size": block_size,
"bin_diffusion_distances": bin_diffusion_distances,
"use_diffusion": use_diffusion,
"edges": edges,
**kwargs,
}
def estimate_multiunit_likelihood_integer_gpu_log(
multiunits,
encoding_marks,
mark_std,
place_bin_centers,
encoding_positions,
position_std,
log_occupancy,
log_mean_rates,
summed_ground_process_intensity,
bin_diffusion_distances,
edges,
max_mark_diff=6000,
set_diag_zero=False,
is_track_interior=None,
time_bin_size=1,
block_size=100,
ignore_no_spike=False,
disable_progress_bar=False,
use_diffusion=False,
):
"""Estimates the likelihood of position bins given multiunit marks.
Parameters
----------
multiunits : np.ndarray, shape (n_decoding_time, n_marks, n_electrodes)
encoding_marks : cp.ndarray, shape (n_encoding_spikes, n_marks, n_electrodes)
mark_std : float
Amount of smoothing for mark features
place_bin_centers : cp.ndarray, shape (n_bins, n_position_dims)
encoding_positions : cp.ndarray, shape (n_encoding_spikes, n_position_dims)
position_std : float or array_like, shape (n_position_dims,)
Amount of smoothing for position
log_occupancy : cp.ndarray, (n_bins,)
log_mean_rates : list, len (n_electrodes,)
summed_ground_process_intensity : np.ndarray, shape (n_bins,)
bin_diffusion_distances : np.ndarray, shape (n_bins, n_bins)
edges : list of np.ndarray
max_mark_diff : int
Maximum difference between mark features
set_diag_zero : bool
Remove influence of the same mark in encoding and decoding.
is_track_interior : None or np.ndarray, shape (n_bins_x, n_bins_y)
time_bin_size : float
Size of time steps
block_size : int
Size of data to process in chunks
ignore_no_spike : bool
Set contribution of no spikes to zero
disable_progress_bar : bool
If False, a progress bar will be displayed.
use_diffusion : bool
Respect track geometry by using diffusion distances
Returns
-------
log_likelihood : (n_time, n_bins)
"""
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],), dtype=bool)
else:
is_track_interior = is_track_interior.ravel(order="F")
n_time = multiunits.shape[0]
if ignore_no_spike:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.zeros((n_time, 1), dtype=np.float32)
)
else:
log_likelihood = (
-time_bin_size
* summed_ground_process_intensity
* np.ones((n_time, 1), dtype=np.float32)
)
multiunits = np.moveaxis(multiunits, -1, 0)
n_position_bins = is_track_interior.sum()
interior_place_bin_centers = cp.asarray(
place_bin_centers[is_track_interior], dtype=cp.float32
)
gpu_is_track_interior = cp.asarray(is_track_interior)
interior_log_occupancy = log_occupancy[gpu_is_track_interior]
for multiunit, enc_marks, enc_pos, log_mean_rate in zip(
tqdm(multiunits, desc="n_electrodes", disable=disable_progress_bar),
encoding_marks,
encoding_positions,
log_mean_rates,
):
is_spike = np.any(~np.isnan(multiunit), axis=1)
is_mark_features = np.any(~np.isnan(multiunit), axis=0)
decoding_marks = cp.asarray(
multiunit[np.ix_(is_spike, is_mark_features)], dtype=cp.int16
)
n_decoding_marks = decoding_marks.shape[0]
log_joint_mark_intensity = np.zeros(
(n_decoding_marks, n_position_bins), dtype=np.float32
)
if block_size is None:
block_size = n_decoding_marks
if use_diffusion & (place_bin_centers.shape[1] > 1):
log_position_distances = cp.log(
cp.asarray(
estimate_diffusion_position_distance(
enc_pos,
edges,
bin_distances=bin_diffusion_distances,
)[:, is_track_interior],
dtype=cp.float32,
)
)
else:
log_position_distances = estimate_log_position_distance(
interior_place_bin_centers,
cp.asarray(enc_pos, dtype=cp.float32),
position_std,
).astype(cp.float32)
for start_ind in range(0, n_decoding_marks, block_size):
block_inds = slice(start_ind, start_ind + block_size)
log_joint_mark_intensity[
block_inds
] = estimate_log_joint_mark_intensity(
decoding_marks[block_inds],
enc_marks,
mark_std,
log_position_distances,
interior_log_occupancy,
log_mean_rate,
max_mark_diff=max_mark_diff,
set_diag_zero=set_diag_zero,
)
log_likelihood[np.ix_(is_spike, is_track_interior)] += np.nan_to_num(
log_joint_mark_intensity
)
mempool = cp.get_default_memory_pool()
mempool.free_all_blocks()
log_likelihood[:, ~is_track_interior] = np.nan
return log_likelihood
except ImportError:
def estimate_multiunit_likelihood_integer_gpu_log(*args, **kwargs):
print("Cupy is not installed or no GPU detected...")
def fit_multiunit_likelihood_integer_gpu_log(*args, **kwargs):
print("Cupy is not installed or no GPU detected...") | /replay_trajectory_classification-1.3.15-py3-none-any.whl/replay_trajectory_classification/likelihoods/multiunit_likelihood_integer_gpu_log.py | 0.924851 | 0.460228 | multiunit_likelihood_integer_gpu_log.py | pypi |
# Project CARS Replay Enhancer [](https://travis-ci.org/SenorPez/project-cars-replay-enhancer) [](https://codecov.io/gh/SenorPez/project-cars-replay-enhancer)
Combines telemetry data with replay video to improve Project CARS replays.
Current release: 0.6
Current edge state: Close to Smooth
Current mood: Excited
The Project CARS Replay Enhancer (I'd call it PCRE, [but that's taken](http://www.pcre.org/ "PCRE")) is intended to augment Project CARS replays by combining captured telemetry data with replay video.
The scripts are currently not fast enough for live broadcasting.
## Requirements
* [Python 3.3](https://www.python.org/download/releases/3.3.0/ "Python 3.3.0") or greater
* [moviepy](http://zulko.github.io/moviepy/ "moviepy")
* [natsort](https://pypi.python.org/pypi/natsort "natsort")
* [Pillow](https://pypi.python.org/pypi/Pillow "Pillow")
* [tqdm](https://pypi.python.org/pypi/tqdm "tqdm")
##Installation
### Short Version:
The Project CARS Replay Enhancer can be installed with `pip`: `pip install replayenhancer`.
### Longer Version:
Depending on your environment:
* You may need to use `sudo` or `sudo -H` to install the packages. For example: `sudo -H pip install replayenhancer`.
* You may need to explicitly call pip using your python installation. For example: `python -m pip install replayenhancer` or `python3 -m pip install replayenhancer`.
* You may need to manually install dependencies for some of the packages required by the replayenhancer. The following packages are required:
* GCC (`gcc`)
* Python development libraries (`python35-devel` or similar)
* ZLIB development libraries (`zlib-devel` or similar)
* JPEG development libraries (`libjpeg-turbo-devel` or similar)
For a complete list of commands to get the Project CARS Replay Enhancer running from a newly-created [Amazon EC2 Instance](https://aws.amazon.com/ec2/), see [Commands From Scratch](https://github.com/SenorPez/project-cars-replay-enhancer/wiki/Commands-From-Scratch).
## Usage
### Telemetry Capture:
Telemetry packet capture is performed by running the command `packetcapture` on the network. This captures UDP packets broadcast by Project CARS (make sure you've enabled UDP broadcast) and store them to a subdirectory for future processing.
> **NOTE:** As most internet video runs at 30 frames per second, you want to set your UDP broadcast rate to at least 30 packets per second, otherwise there may be noticeable "phasing" between video and data displays.
#### Telemetry Capture Best Practices:
There are a few things to do to optimize the telemetry data used by the Project CARS Replay Enhancer.
* Telemetry capture should be started before entering the race. The preferred time for this would be at the menu before clicking the **Drive** button on the menu.
* At the end of the race, do not click **Continue** or stop telemetry capture until all the **Race Time** numbers post. (Make sure you scroll down to see the later cars!) Despite you being on the results screen, the remaining cars will finish their lap and this is captured by the telemetry.
* Each collection of telemetry packet captures should only contain a single race. Restarting, if that is allowed, is allowed. The parser automatically detects the latest complete (start->finish) race in each collection; if multiple complete races are included in the telemetry, only the last one will be used.
### Video Capture:
There is no video capture functionality included in the scripts. How you get the video to your local machine is left as an exercise for the reader. For my PS4, I stream the replay to YouTube which then archives it and then can be downloaded using [youtube-dl](https://rg3.github.io/youtube-dl/ "youtube-dl"). Video capture devices such as an Elgato work just fine as well (probably better, actually).
### Configuration Files:
Project CARS Replay Enhancer configuration files are JSON files, and can be created by using the [Project CARS Replay Enhancer UI](https://github.com/SenorPez/project-cars-replay-enhancer-ui) or by creating them by hand. See [Configuration File Format](https://github.com/SenorPez/project-cars-replay-enhancer/wiki/Configuration-File-Format) for details on recognized fields.
### Telemetry Synchronization:
For best results, the telemetry data feed must be synchronized to the video feed; there is no way to automatically perform this. To aid with this synchronization, run the Project CARS Replay Enhancer is the `-s` option and the desired configuration file. For example, `replayenhancer -s config.json`.
A low-quality video that encompasses only the first lap of the race is created, along with a timer overlay. To determine the telemetry synchronization, compare the time on this overlay with the lap time of a car as it crosses the start-finish line. These two times should be identical; if they are not, the syncronization needs to be adjusted.
* If the value on the timer is greater than the lap time (this is the typical scenario), add the difference between the timer and the lap time to the telemetry synchronization value.
* If the value on the timer is less than the lap time, subtract the difference between the timer and the lap time from the telemetry synchronization value.
After adjusting the telemetry synchronization, another low-quality, shortened video is created to confirm the synchronization.
For an illustrated tutorial of telemetry synchronization, see [Determining Telemetry Synchronization Value](https://github.com/SenorPez/project-cars-replay-enhancer/wiki/Determining-Telemetry-Synchronization-Value).
### Creating a Replay
To create the full, enhanced replay, provide the Project Cars Replay Enhancer with a valid configuration file. For example, `replayenhancer config.json`.
## Enhancing the Enhancer?
You're more than welcome to do so! Write new modules, speed up new modules, feel free. If you have any issues or questions please communicate them here! I'm always looking for help.
| /replayenhancer-0.6.1rc1-py3-none-any.whl/replayenhancer-0.6.1rc1.dist-info/DESCRIPTION.rst | 0.877109 | 0.810741 | DESCRIPTION.rst | pypi |
<p align='center'>
<img src='https://i.imgur.com/1vtNQHs.png' />
</p>
<p align='center'>
<em><strong>[ ˈrɛplɪkət ]</strong></em>
</p>
# Replicat
Configurable and lightweight backup utility with deduplication and encryption.
## Compatibility
Python 3.9 (or newer) running on Linux, MacOS, or Windows.
## Supported backup destinations
- local path
- Backblaze B2
- Amazon S3
- any S3-compatible service
You can implement and use your own adapter for pretty much any backup destination without
changing the source code of Replicat (more on that later).
## Installation
[It's available on PyPI](https://pypi.org/project/replicat), so
```bash
pip install replicat
```
## Reasoning
For various reasons, I wasn't 100% happy with any of the similar projects that I've tried.
It's likely that I will never be 100% happy with Replicat either, but at least it will be
easier for me to fix problems or add new features.
Highlights/goals:
- efficient, concise, easily auditable implementation
- high customizability
- few external dependencies
- well-documented behaviour
- unified repository layout
- API that exists
This project borrows a few ideas from those other projects, but not enough to be considered
a copycat.
# Introduction
You can use Replicat to backup files from your machine to a *repository*, located on a *backend*
such as a local directory or cloud storage (like Backblaze B2). Your files are transfered and stored
in an optionally encrypted and chunked form, and references to *chunks* (i.e. their hash digests)
are stored in *snapshots* along with file name and metadata.
Replicat supports two types of repositories: encrypted (the default) and unencrypted.
Chunks, snapshots, and all other pieces of data inside unencrypted repositories are stored
unencrypted. The storage names for chunks and snapshots are simply the hash digests of their
contents.
Currently, the only supported type of encryption is symmetric encryption. To use symmetric encryption,
you will need a key and the password associated with that key. A key contains parameters for the
KDF and an encrypted (private) section, which can only be decrypted by the owner of the key using
the matching password. That section contains secrets for the cryptographic primitives that control
how the data is split into chunks, visibility of chunks of data, and more.
You can create multiple keys with different passwords and settings. When adding a new key to a
repository with symmetric encryption, you'll have to unlock it with one of the existing keys.
You have a choice to either share secrets with the other key OR generate new secrets. Owners of
keys with shared secrets ("shared" keys) can use deduplication features *together*, i.e., chunks
of data that were uploaded by the owner of one such key can be accessed and decrypted by the owner
of the other key. Assume that they will also be able to check whether you have a specific piece
of data. To avoid such risk, you can create a key with new secrets (an "independent" key).
That way, Replicat will isolate your data and make it inaccessible to the owners of other keys.
Of course, if you use your key to create a yet another (new) key, you will also have the ability
to share your secrets with others, even if they were originally copied from some other key.
This creates a web of trust of sorts.
In contrast with unencrypted repositories, the storage name for the chunk is derived from
the hash digest of its contents **and** one of the aforementioned secrets, in order to reduce
the chance of successful "confirmation of file" attacks. The chunk itself is encrypted with
the combination of the hash digest of its contents **and** another one of those secrets, since
the usual convergent encryption is vulnerable to that same "confirmation of file" attack. Table
of chunk references inside a snapshot is encrypted similarly, but the list of files that reference
those chunks is encrypted using the key and the password that were used to unlock the repository,
and therefore can only be decrypted by the owner of that key (even in the case of shared secrets).
A snapshot created using an independent key will not be visible. A snapshot created using a
shared key will be visible, but there will be no available information about it beyond its storage
name and the table of chunk references.
## Deeper dive
You're about to see diagrams illustrating how replicat processes data, along with example contents
of the configuration file, keys, and snapshots. Here's the terminology:
- **`Encrypt(data, key)`/`Decrypt(data, key)`** -- encrypts/decrypts `data` with the encryption key
`key` using an authenticated encryption algorithm. It's normally used to encrypt/decrypt private
sections in keys, as well as chunks and snapshots.
- **`Hash(data)`** -- computes the hash digest of `data` using a hashing algorithm.
It's used to check integrity of data and to derive encryption keys for chunks and snapshots.
- **`Mac(data, key)`** -- computes the message authentication code for `data` using suitable `key`
and a MAC algorithm. It's mainly used to verify ownership of chunks.
- **`SlowKdf(ikm, salt[, context])`/`FastKdf(ikm, salt[, context])`** -- calls a "slow"/"fast" key derivation
function to obtain an encryption key from `ikm` using `salt` and an optional `context`. As a general rule,
replicat uses "slow" KDF for low-entropy inputs and "fast" KDF for high-entropy inputs. The output length
will match the encryption key length of the chosen encryption algorithm.
- **`UserKey`** -- encryption key derived as `SlowKdf(Password, UserKdfParams)`, where `Password`
is the user's password and `UserKdfParams` is the salt. `UserKey` is used to encrypt sensitive
personal data: private sections in keys and file metadata in snapshots.
- **`SharedKey`**, **`SharedKdfParams`**, **`SharedMacKey`**, **`SharedChunkerKey`** -- secrets stored in
the private sections of keys. `SharedKey` and `SharedKdfParams` are used to derive encryption keys using
"fast" KDF (they will encrypt shared data, like chunks and chunk references). `SharedMacKey` is the MAC key.
`SharedChunkerKey` personalises content-defined chunking (CDC) to prevent watermarking attacks.
- **`GetChunkLocation(name, authentication_tag)`/`GetSnapshotLocation(name, authentication_tag)`** -- obtains the
location for a chunk/snapshot using its name and the corresponding authentication tag.
- **`Upload(data, location)`** -- uploads `data` to the backend to the given `location`.
- **`Download(location)`** -- downloads data from the backend at the given `location`.




# Command line interface
The installer will create the `replicat` command (same as `python -m replicat`).
There are several available subcommands:
- `init` -- initializes the repository using the provided settings
- `snapshot` -- creates a new snapshot in the repository
- `list-snapshots`/`ls` -- lists snapshots
- `list-files`/`lf` -- lists files across snapshots
- `restore` -- restores files from snapshots
- `add-key` -- creates a new key for the encrypted repository
- `delete` -- deletes snapshots by their names
- `clean` -- performs garbage collection
- `upload` -- uploads files to the backend (no chunking, no encryption, keeping original names)
> ⚠️ **WARNING**: actions that read from or upload to the repository can safely be run
> concurrently; however, there are presently no guards in place that would make it safe
> for you to run destructive actions (`delete`, `clean`) concurrently with those actions
> *unless* you use independent keys (see the explanation above). I do plan to implement them
> soon-ish, but in the meantime **DO NOT** use shared keys (or, naturally, the same key)
> to `snapshot` and `clean` at the same time, for example.
>
> As far as the upcoming implementation of such guards, it'll be based on locks. I'm familiar
> with the lock-free deduplication strategy (like in Duplicacy), but I don't like it much.
There are several command line arguments that are common to all subcommands:
- `-r`/`--repository` -- used to specify the type and location of the repository backend
(backup destination). The format is `<backend>:<connection string>`, where `<backend>` is
the short name of a Replicat-compatible backend and `<connection string>` is open to
interpretation by the adapter for the selected backend. Examples:
`b2:bucket-name` for the B2 backend or `local:some/local/path` for the local backend
(or just `some/local/path`, since the `<backend>:` part can be omitted for local
destinations). If the backend requires additional arguments, they will appear in the
`--help` output. Refer to the section on backends for more detailed information.
- `-q`/`--hide-progress` -- suppresses progress indication for commands that support it
- `-c`/`--concurrent` -- the number of concurrent connections to the backend
- `--cache-directory` -- specifies the directory to use for cache. `--no-cache` disables
cache completely.
- `-v`/`--verbose` -- specifies the logging verbosity. The default verbosity is `WARNING`,
`-v` means `INFO`, `-vv` means `DEBUG`.
Encrypted repositories require a key and a matching password for every operation:
- `-K`/`--key-file` -- the path to the key file
- `-p`/`--password` -- the password in plaintext. **However**, it's more secure to provide the
password in a file via the `-P`/`--password-file` argument, or as an environment variable
`REPLICAT_PASSWORD`.
## `init` examples
```bash
# Unencrypted repository in some/directory. The --encryption none flag disables encryption
$ replicat init -r some/directory --encryption none
# Encrypted repository with initial password taken from string.
# The new key will be printed to stdout
$ replicat init -r some/directory -p 'password string'
# Encrypted repository with initial password taken from a file.
# The new key will be written to path/to/key/file
$ replicat init -r some/directory -P path/to/password/file -o path/to/key/file
# Specifies the cipher
$ replicat init -r some/directory -p '...' --encryption.cipher.name chacha20_poly1305
# Specifies the cipher name and parameters
$ replicat init -r some/directory \
-p '...' \
--encryption.cipher.name aes_gcm \
--encryption.cipher.key_bits 128
# Specifies the KDF name and parameters (for the key)
$ replicat init -r some/directory \
-p '...' \
--encryption.kdf.name scrypt \
--encryption.kdf.n 1048576
# Specifies the chunking parameters
$ replicat init -r some/directory \
-p '...' \
--chunking.min-length 128_000 \
--chunking.max-length 2_048_000
# Equivalent (dashes in argument names are converted to underscores)
$ replicat init -r some/directory \
-p '...' \
--chunking.min_length 128_000 \
--chunking.max_length 2_048_000
```
## `snapshot` examples
```bash
# Unlocks the repository, uploads provided files in encrypted chunks,
# using no more than 10 concurrent connections, creating a snapshot
$ replicat snapshot -r some/directory \
-P path/to/password/file \
-K path/to/key/file \
-c 10 \
-n 'A note (optional)'
image.jpg some-directory another-directory and/more.text
```
## `list-snapshots`/`ls` examples
```bash
# Unlocks the repository and lists all of the snapshots
$ replicat list-snapshots -r some/directory -P path/to/password/file -K path/to/key/file
# Equivalent
$ replicat ls -r some/directory -P path/to/password/file -K path/to/key/file
```
## `list-files`/`lf` examples
```bash
# Unlocks the repository and lists all versions of all the files
$ replicat list-files -r some/directory -P path/to/password/file -K path/to/key/file
# Equivalent
$ replicat lf -r some/directory -P path/to/password/file -K path/to/key/file
# Only lists files with paths matching the -F regex
$ replicat lf -r some/directory \
-P path/to/password/file \
-K path/to/key/file \
-F '\.(jpg|text)$'
```
## `restore` examples
```bash
# Unlocks the repository and restores the latest versions of all files to target-directory
$ replicat restore -r some/directory \
-P path/to/password/file \
-K path/to/key/file \
target-directory
# Unlocks the repository and restores the latest versions of files with paths matching the
# -F regex in snapshots matching the -S regex to 'target-directory'
$ replicat restore -r some/directory \
-P path/to/password/file \
-K path/to/key/file \
-F '\.(jpg|text)$' \
-S 'abcdef' \
target-directory
```
## `add-key` examples
```bash
# Unlocks the repository and creates an independent key, which will be printed to stdout
$ replicat add-key -r some/directory -P path/to/password/file -K path/to/key/file
# Unlocks the repository and creates a shared key (i.e. with shared secrets)
$ replicat add-key -r some/directory -P path/to/password/file -K path/to/key/file --shared
# Unlocks the repository and creates an independent key, which will be written
# to path/to/new/key/file
$ replicat add-key -r some/directory \
-P path/to/password/file \
-K path/to/key/file \
-o path/to/new/key/file
# Unlocks the repository and creates an independent key with some custom settings
# (cipher params as well as chunking and hashing settings are repository-wide)
$ replicat add-key -r some/directory \
-P path/to/password/file \
-K path/to/key/file \
--encryption.kdf.name scrypt \
--encryption.kdf.n 1048576
```
## `delete` examples
```bash
# Unlocks the repository and deletes snapshots by name (as returned by ls/list-snapshots).
# Chunks that aren't referenced by any other snapshot will be deleted automatically
$ replicat delete -r some/directory \
-P path/to/password/file \
-K path/to/key/file \
NAME1 NAME2 NAME3 ...
```
## `clean` examples
```bash
# Unlocks the repository and deletes all chunks that are not referenced by any snapshot
$ replicat clean -r some/directory -P path/to/password/file -K path/to/key/file
```
## `upload` examples
```bash
# Uploads files directly to the backend without any additional processing.
# File path -> resulting name:
# /working/directory/some/file -> some/file
# /working/directory/another/file -> another/file
# /working/directory/another/directory/another-file -> another/directory/another-file
# /absolute/directory/path/with-file -> absolute/directory/path/with-file
# /absolute/file -> absolute/file
/working/directory$ replicat upload -r some:repository \
some/file \
/working/directory/another/directory \
/absolute/directory/path \
/absolute/file
# Uploads files that do not yet exist in the repository (only checks the file names)
$ replicat upload -r some:repository --skip-existing some/file some/directory
```
## Check version
```bash
replicat --version
```
# Backends
Run `replicat` commands with `-r <backend>:<connection string>` and additional arguments
that are specific to the selected backend. Those arguments may have defaults and may also
be provided via environment variables. Use
```bash
replicat <command> -r <backend>:<connection string> --help
```
to see them.
## Local
The format is `-r local:some/local/path` or simply `-r some/local/path`.
## B2
The format is `-r b2:bucket-id` or `-r b2:bucket-name`. This backend uses B2 native API.
The required arguments are `--key-id` (keyId) and `--application-key` (applicationKey).
Sign into your Backblaze B2 account to generate them. You can use master application key
or a normal application key (which can also be restricted to a single bucket).
## S3
The format is `-r s3:bucket-name`. Requires arguments `--key-id`, `--access-key`, and
`--region`.
## S3-compatible
The format is `-r s3c:bucket-name`. Requires arguments `--key-id`, `--access-key`,
`--region`, and `--host`. `--host` must *not* include the scheme. The default scheme is
`https`, but can be changed via the `--scheme` argument.
# Custom backends
`replicat.backends` is a namespace package, making it possible to add custom backends
without changing `replicat` source code.
Suppose your backend of choice is a hypothetical low low cost cloud storage
Proud Cloud (`pc` for short). The most barebones implementation of the
Replicat-compatible adapter for the `pc` backend will require a directory with
the following structure:
```bash
$ tree proud-cloud/
proud-cloud/
└── replicat
└── backends
└── pc.py
```
The `-r` argument of `replicat` commands will take the form of `-r pc:<connection string>`.
Replicat will use it to locate the `pc` module inside the `replicat.backends` package,
load the `replicat.backends.pc.Client` class, and pass the `<connection string>`
to its constructor to create the backend instance. In case there are some additional
parameters that are required to connect to Proud Cloud (account id, secret token, etc.),
you should add them to the `replicat.backends.pc.Client` constructor as keyword-only arguments.
If present, Replicat will generate the corresponding command line arguments with defaults *and*
you'll even be able to use environment variables to provide them.
`replicat.backends.pc.Client` must subclass `replicat.backends.base.Backend` and implement all
of the methods marked as abstract. You could use implementations of existing
`replicat.backends.base.Backend` subclasses as examples. To make your implementation visible
to Replicat, you'll need to add it to the module search path before invoking `replicat`
(you could use the
[`PYTHONPATH`](https://docs.python.org/3/using/cmdline.html#envvar-PYTHONPATH) environment
variable for that).
Here's an example:
```python
# ./proud-cloud/replicat/backends/pc.py
from .base import Backend
class ProudCloud(Backend):
def __init__(self, connection_string, *, account_id, secret, port=9_876, legacy=False):
print(f'PC args: {connection_string=}, {account_id=}, {secret=}, {port=}, {legacy=}')
...
...
Client = ProudCloud
```
```bash
$ PYTHONPATH=proud-cloud replicat init -r pc:... --help
usage: replicat init [-h] [-r REPO] [-q] [-c CONCURRENT] [-v] [-K KEYFILE]
[-p PASSWORD | -P PASSWORD_FILE_PATH] [--account-id ACCOUNT_ID]
[--secret SECRET] [--port PORT] [--legacy LEGACY] [-o KEY_OUTPUT_FILE]
optional arguments:
...
arguments specific to the ProudCloud backend:
--account-id ACCOUNT_ID
or the PROUDCLOUD_ACCOUNT_ID environment variable
--secret SECRET or the PROUDCLOUD_SECRET environment variable
--port PORT or the PROUDCLOUD_PORT environment variable, or the constructor default 9876
--legacy LEGACY or the PROUDCLOUD_LEGACY environment variable, or the constructor default False
```
```bash
$ PYTHONPATH=proud-cloud PROUDCLOUD_LEGACY=true PROUDCLOUD_SECRET='pr0ud' \
replicat init -r pc:... \
--account-id 12345 \
--port 9877
PC args: connection_string='...', account_id=12345, secret='pr0ud', port=9877, legacy=True
...
```
If you've created a Replicat-compatible adapter for a backend that Replicat doesn't already
support *and* your implementation doesn't depend on additional third-party libraries
(or at least they are not too heavy and can be moved to extras), consider submitting a PR
to include it in this repository.
# Security
If you believe you've found a security issue with Replicat, please report it to
[flwaultah@gmail.com](mailto:flwaultah@gmail.com) (or DM me on Twitter or Telegram).
| /replicat-1.2.0.tar.gz/replicat-1.2.0/README.md | 0.617974 | 0.93276 | README.md | pypi |
import io
import os
from typing import Dict, List, Union
import replicate
from jko_api_utils.utils.save_data import save_data
from tqdm import tqdm
def call_whisper_api(version, filepath, **params):
# Transcribe the audio
try:
result = version.predict(
audio=open(f"{filepath}", "rb"), **params
)["transcription"]
except Exception as e:
print("Error: ", e)
result = None
# Return the result
return result
def transcribe(target: Union[str, List[str]], dest: str = None, duplicate: str = "skip", return_data=True, create_dirs=False, version=None, **params: Dict) -> List[str]:
"""
Transcribes one or more audio files or directories of audio files using the openai/whisper model.
:param target: A file path or a list of file paths to the audio files to transcribe.
:param dest: The folder where the transcribed files should be saved.
:param duplicate: What to do if a transcribed file already exists in the `dest` folder. Can be "skip", "overwrite",
or "rename".
:param return_data: Whether to return the data as a string. If False, an error is raised if dest is None.
:param create_dirs: Whether to create the directories in dest if they don't exist.
:param version: The version of the openai/whisper model to use. If None, the default version is used.
:param params: Additional parameters to pass to the transcription model.
:return: A list of transcribed text strings.
"""
if version is None:
version = replicate.models.get("openai/whisper").versions.get(
"30414ee7c4fffc37e260fcab7842b5be470b9b840f2b608f5baa9bbef9a259ed")
# Determine the input type of the `target` parameter and call the corresponding function
if isinstance(target, str) and os.path.isfile(target):
results = transcribe_files(
version, [target], dest, duplicate, return_data, create_dirs, **params)
elif isinstance(target, str) and os.path.isdir(target):
results = transcribe_dir(
version, target, dest, duplicate, return_data, create_dirs, **params)
elif isinstance(target, list) and all([os.path.isfile(item) for item in target]):
results = transcribe_files(
version, target, dest, duplicate, return_data, create_dirs, **params)
elif isinstance(target, list) and all([os.path.isdir(item) for item in target]):
results = transcribe_dirs(
version, target, dest, duplicate, return_data, create_dirs, **params)
else:
raise ValueError("Invalid input type for `target`")
return results
def transcribe_file(version, filepath, dest=None, duplicate="skip", return_data=True, create_dirs=False, **params):
# Transcribe the file
# File name with text extension
result = call_whisper_api(version, filepath, **params)
if dest is not None:
new_file_name = os.path.basename(filepath).replace(".wav", ".txt")
new_file_path = os.path.join(dest, new_file_name)
else:
new_file_path = None
return save_data(result, dest_file=new_file_path, return_data=return_data, create_dirs=create_dirs)
def transcribe_files(version, filepaths, dest=None, duplicate="skip", return_data=True, create_dirs=False, **params):
results = []
for filepath in tqdm(filepaths, total=len(filepaths), desc="Transcribing files"):
# Transcribe the file
result = transcribe_file(
version, filepath, dest, duplicate, return_data, create_dirs, **params)
results.append(save_data(result, filepath,
return_data=return_data, create_dirs=create_dirs))
return list(filter(None, results))
def transcribe_dir(version, folder, dest=None, duplicate="skip", return_data=True, create_dirs=False, **params):
# Get the list of filepaths in the folder
filepaths = [os.path.join(folder, filename)
for filename in os.listdir(folder)]
return transcribe_files(version, filepaths, dest, duplicate, return_data, create_dirs, **params)
def transcribe_dirs(version, folders, dest=None, duplicate="skip", return_data=True, create_dirs=False, **params):
results = []
for folder in tqdm(folders, total=len(folders), desc="Transcribing folders"):
result = transcribe_dir(version, folder, dest, duplicate, return_data, create_dirs, **params)
results.extend(result)
return results | /replicate_api_utils-0.1.6.tar.gz/replicate_api_utils-0.1.6/replicate_api_utils/whisper/main.py | 0.723114 | 0.313183 | main.py | pypi |
# Replicate Python client
This is a Python client for [Replicate](https://replicate.com). It lets you run models from your Python code or Jupyter notebook, and do various other things on Replicate.
> **👋** Check out an interactive version of this tutorial on [Google Colab](https://colab.research.google.com/drive/1K91q4p-OhL96FHBAVLsv9FlwFdu6Pn3c).
>
> [](https://colab.research.google.com/drive/1K91q4p-OhL96FHBAVLsv9FlwFdu6Pn3c)
## Install
```sh
pip install replicate
```
## Authenticate
Before running any Python scripts that use the API, you need to set your Replicate API token in your environment.
Grab your token from [replicate.com/account](https://replicate.com/account) and set it as an environment variable:
```
export REPLICATE_API_TOKEN=<your token>
```
We recommend not adding the token directly to your source code, because you don't want to put your credentials in source control. If anyone used your API key, their usage would be charged to your account.
## Run a model
Create a new Python file and add the following code:
```python
>>> import replicate
>>> replicate.run(
"stability-ai/stable-diffusion:27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478",
input={"prompt": "a 19th century portrait of a wombat gentleman"}
)
['https://replicate.com/api/models/stability-ai/stable-diffusion/files/50fcac81-865d-499e-81ac-49de0cb79264/out-0.png']
```
Some models, like [methexis-inc/img2prompt](https://replicate.com/methexis-inc/img2prompt), receive images as inputs. To pass a file as an input, use a file handle or URL:
```python
>>> output = replicate.run(
"salesforce/blip:2e1dddc8621f72155f24cf2e0adbde548458d3cab9f00c0139eea840d0ac4746",
input={"image": open("path/to/mystery.jpg", "rb")},
)
"an astronaut riding a horse"
```
## Run a model in the background
You can start a model and run it in the background:
```python
>>> model = replicate.models.get("kvfrans/clipdraw")
>>> version = model.versions.get("5797a99edc939ea0e9242d5e8c9cb3bc7d125b1eac21bda852e5cb79ede2cd9b")
>>> prediction = replicate.predictions.create(
version=version,
input={"prompt":"Watercolor painting of an underwater submarine"})
>>> prediction
Prediction(...)
>>> prediction.status
'starting'
>>> dict(prediction)
{"id": "...", "status": "starting", ...}
>>> prediction.reload()
>>> prediction.status
'processing'
>>> print(prediction.logs)
iteration: 0, render:loss: -0.6171875
iteration: 10, render:loss: -0.92236328125
iteration: 20, render:loss: -1.197265625
iteration: 30, render:loss: -1.3994140625
>>> prediction.wait()
>>> prediction.status
'succeeded'
>>> prediction.output
'https://.../output.png'
```
## Run a model in the background and get a webhook
You can run a model and get a webhook when it completes, instead of waiting for it to finish:
```python
model = replicate.models.get("kvfrans/clipdraw")
version = model.versions.get("5797a99edc939ea0e9242d5e8c9cb3bc7d125b1eac21bda852e5cb79ede2cd9b")
prediction = replicate.predictions.create(
version=version,
input={"prompt":"Watercolor painting of an underwater submarine"},
webhook="https://example.com/your-webhook",
webhook_events_filter=["completed"]
)
```
## Compose models into a pipeline
You can run a model and feed the output into another model:
```python
laionide = replicate.models.get("afiaka87/laionide-v4").versions.get("b21cbe271e65c1718f2999b038c18b45e21e4fba961181fbfae9342fc53b9e05")
swinir = replicate.models.get("jingyunliang/swinir").versions.get("660d922d33153019e8c263a3bba265de882e7f4f70396546b6c9c8f9d47a021a")
image = laionide.predict(prompt="avocado armchair")
upscaled_image = swinir.predict(image=image)
```
## Get output from a running model
Run a model and get its output while it's running:
```python
iterator = replicate.run(
"pixray/text2image:5c347a4bfa1d4523a58ae614c2194e15f2ae682b57e3797a5bb468920aa70ebf",
input={"prompts": "san francisco sunset"}
)
for image in iterator:
display(image)
```
## Cancel a prediction
You can cancel a running prediction:
```python
>>> model = replicate.models.get("kvfrans/clipdraw")
>>> version = model.versions.get("5797a99edc939ea0e9242d5e8c9cb3bc7d125b1eac21bda852e5cb79ede2cd9b")
>>> prediction = replicate.predictions.create(
version=version,
input={"prompt":"Watercolor painting of an underwater submarine"}
)
>>> prediction.status
'starting'
>>> prediction.cancel()
>>> prediction.reload()
>>> prediction.status
'canceled'
```
## List predictions
You can list all the predictions you've run:
```python
replicate.predictions.list()
# [<Prediction: 8b0ba5ab4d85>, <Prediction: 494900564e8c>]
```
## Load output files
Output files are returned as HTTPS URLs. You can load an output file as a buffer:
```python
import replicate
from urllib.request import urlretrieve
model = replicate.models.get("stability-ai/stable-diffusion")
version = model.versions.get("27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478")
out = version.predict(prompt="wavy colorful abstract patterns, cgsociety")
urlretrieve(out[0], "/tmp/out.png")
background = Image.open("/tmp/out.png")
```
## Development
See [CONTRIBUTING.md](CONTRIBUTING.md)
| /replicate-0.9.0.tar.gz/replicate-0.9.0/README.md | 0.719778 | 0.927363 | README.md | pypi |
from gi.repository import Gtk
from src.model_mapper import ModelMapper
class DeleteDatabasesDialog:
def __init__(self, builder):
self._win = builder.get_object('dialog_delete_databases', target=self, include_children=True)
self._databases = None
self._selected_databases = None
def run(self, databases):
self._databases = databases
self._selected_databases = None
result = self._win.run()
self._win.hide()
self._databases = None
if result == Gtk.ResponseType.OK:
self._selected_databases = self._get_selected_databases()
return result
def _get_selected_databases(self):
selected_databases = []
model = self.treeview_delete_databases.get_model()
itr = model.get_iter_first()
while itr is not None:
if model.get_value(itr, 1):
path = model.get_path(itr)
row = model[path]
db = ModelMapper.get_item_instance(row)
selected_databases.append(db)
itr = model.iter_next(itr)
return selected_databases
def set_button_ok_active_state(self):
sensitive = len(self._get_selected_databases()) > 0
self.button_delete_databases_dialog_ok.set_sensitive(sensitive)
# region Properties
@property
def selected_databases(self):
return self._selected_databases
# endregion
# region Events
def on_dialog_delete_databases_show(self, dialog):
model = Gtk.ListStore(str, bool, object)
for db in self._databases:
mapper = ModelMapper(db, [lambda i: i.db_name, lambda i: True])
model.append(mapper)
self.treeview_delete_databases.set_model(model)
model.connect('row-changed', self.on_row_changed)
self.set_button_ok_active_state()
def on_button_delete_databases_dialog_ok(self, button):
self._win.response(Gtk.ResponseType.OK)
def on_button_delete_databases_dialog_cancel(self, button):
self._win.response(Gtk.ResponseType.CANCEL)
def on_cellrenderertoggle_delete_toggled(self, widget, path):
model = self.treeview_delete_databases.get_model()
itr = model.get_iter(path)
val = model.get_value(itr, 1)
model.set(itr, 1, not val)
def on_row_changed(self, model, path, iter):
self.set_button_ok_active_state()
# endregion | /replication-monitor-0.1.9.tar.gz/replication-monitor-0.1.9/ui/dialogs/delete_databases_dialog.py | 0.674158 | 0.230389 | delete_databases_dialog.py | pypi |
from gi.repository import GObject
from gi.repository import Gtk
class ListViewModel(GObject.Object, Gtk.TreeModel):
class ColDefinition:
def __init__(self, name, col_type):
self._name = name
self._type = col_type
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def compare(self):
def compare_strings(a, b):
return -1 if a < b else 1 if a > b else 0
return compare_strings if self.type == str else None
class Sorted(Gtk.TreeModelSort, Gtk.TreeDragDest, Gtk.TreeDragSource):
def __init__(self, child_model):
super().__init__(child_model)
self._attach_sort_functions(child_model)
def __getitem__(self, item):
return self.get_model()[item]
def __setitem__(self, key, value):
self.get_model()[key] = value
def append(self, row):
return self.get_model().append(row)
def remove(self, it):
return self.get_model().remove(it)
def clear(self):
return self.get_model().clear()
def _attach_sort_functions(self, child_model):
for (i, item) in enumerate(child_model.cols):
if item.compare is not None:
def compare_cols(compare, name):
def callback(m, it_x, it_y, _):
x = child_model[it_x]
y = child_model[it_y]
return compare(getattr(x, name), getattr(y, name))
return callback
super().set_sort_func(i, compare_cols(item.compare, item.name))
# region Event overrides
def do_drag_data_delete(self, path):
do_drag_data_delete = getattr(super().get_model(), 'do_drag_data_delete', None)
return do_drag_data_delete(path) if callable(do_drag_data_delete) else False
def do_drag_data_get(self, path, selection):
do_drag_data_get = getattr(super().get_model(), 'do_drag_data_get', None)
if callable(do_drag_data_get):
do_drag_data_get(path, selection)
def do_row_draggable(self, path):
do_row_draggable = getattr(super().get_model(), 'do_row_draggable', None)
return do_row_draggable(path) if callable(do_row_draggable) else False
def get_iter_first(self):
return self.get_model().get_iter_first()
def get_iter(self, path):
return self.get_model().get_iter(path)
def iter_next(self, it):
return self.get_model().iter_next(it)
def get_path(self, it):
return self.get_model().get_path(it)
# endregion
def __init__(self, cols):
super().__init__()
self._cols = cols
self._data = []
def __getitem__(self, item):
index = self._get_index(item)
return self._data[index]
def __setitem__(self, key, value):
index = self._get_index(key)
self._data[index] = value
it = self._get_iter(index)
super().emit('row-changed', self.do_get_path(it), it)
def append(self, row):
self._data.append(row)
it = self._get_iter(len(self._data) - 1)
super().row_inserted(self.do_get_path(it), it)
def remove(self, it):
super().row_deleted(self.do_get_path(it))
index = self._get_index(it)
self._data.pop(index)
def clear(self):
for index in range(len(self._data) - 1, -1, -1):
path = self._get_path(index)
super().row_deleted(path)
self._data.clear()
@property
def cols(self):
return self._cols
@property
def rows(self):
return self._data
# region Model overrides
def do_get_flags(self):
return Gtk.TreeModelFlags.LIST_ONLY
def do_get_n_columns(self):
return len(self._cols)
def do_get_column_type(self, n):
return self._cols[n].type
def get_iter_first(self):
it = None
if len(self._data) > 0:
it = self._get_iter(0)
return it
def do_get_iter(self, path):
# Return False and an empty iter when out of range
index = path.get_indices()[0]
if index < 0 or index >= len(self._data):
return False, None
it = self._get_iter(index)
return True, it
def do_get_path(self, it):
return self._get_path(it)
def do_get_value(self, it, column):
index = self._get_index(it)
row = self._data[index]
name = self._cols[column].name
func = name if callable(name) else None
if func:
value = func(row)
else:
value = getattr(row, name, None)
return value
def do_iter_next(self, it):
# Return False if there is not a next item
next = self._get_index(it) + 1
if next >= len(self._data):
return False
# Set the iters data and return True
it.user_data = next
return True
def do_iter_previous(self, it):
prev = self._get_index(it) - 1
if prev < 0:
return False
it.user_data = prev
return True
def do_iter_children(self, parent):
# If parent is None return the first item
if parent is None:
it = self._get_iter(0)
return True, it
return False, None
def do_iter_has_child(self, it):
return it is None
def do_iter_n_children(self, it):
# If iter is None, return the number of top level nodes
if it is None:
return len(self._data)
return 0
def do_iter_nth_child(self, parent, n):
if parent is not None or n >= len(self._data):
return False, None
elif parent is None:
# If parent is None, return the nth iter
it = self._get_iter(n)
return True, it
def do_iter_parent(self, child):
return False, None
def do_drag_data_delete(self):
return False
def do_drag_data_get(self, path, selection):
pass
def do_row_draggable(self, path):
return True
# endregion
# region Static methods
@staticmethod
def _get_index(value):
index = value
if isinstance(value, str):
index = int(value)
elif isinstance(value, Gtk.TreePath):
index = value.get_indices()[0]
elif isinstance(value, Gtk.TreeIter):
index = value.user_data
return index
@staticmethod
def _get_iter(value):
it = Gtk.TreeIter()
if isinstance(value, str):
it.user_data = int(value)
elif isinstance(value, Gtk.TreePath):
it.user_data = value.get_indices()[0]
elif isinstance(value, int):
it.user_data = value
return it
@staticmethod
def _get_path(value):
path = None
if isinstance(value, str):
path = Gtk.TreePath((int(str),))
elif isinstance(value, Gtk.TreeIter):
path = Gtk.TreePath((ListViewModel._get_index(value),))
elif isinstance(value, int):
path = Gtk.TreePath((value,))
return path
# endregion | /replication-monitor-0.1.9.tar.gz/replication-monitor-0.1.9/src/listview_model.py | 0.769643 | 0.174445 | listview_model.py | pypi |
import threading
from gi.repository import Gtk, GObject
class GtkHelper:
"""
A class which makes living with GTK and multiple threads slightly easier
"""
@staticmethod
def is_gtk_thread():
"""
Determines if the current thread is the main GTK thread
:return: True if the current thread is the main GTK thread, False otherwise
"""
return threading.current_thread().name is 'MainThread'
@staticmethod
def invoke(func, async=True):
"""
Invokes a callable func on the main GTK thread
:param func: The callable to invoke
:param async: When True the callable will execute asynchronously
:return: if executed on the main thread or synchronously then the returns the result of func, otherwise None
"""
result = None
if GtkHelper.is_gtk_thread():
result = func()
else:
event = threading.Event() if async is not True else None
def task():
nonlocal func, result
result = func()
if event is not None:
event.set()
GObject.idle_add(task)
if event is not None:
event.wait()
return result
@staticmethod
def idle(task):
"""
Adds a task to the Gtk queue for processing
:param task: the task (function/lambda) to run
:return: nothing
"""
GObject.idle_add(task)
@staticmethod
def invoke_func(func):
"""
A decorator for functions which should be run on the main Gtk thread. The function is
executed asynchronously
:param func: The callable to run on the UI thread
:return: nothing
"""
def inner(*args, **kwargs):
GtkHelper.invoke(lambda: func(*args, **kwargs))
return inner
@staticmethod
def invoke_func_sync(func):
"""
A decorator for functions which should be run on the main Gtk thread. If run from a non-UI
thread the caller will block until the function completes
:param func: The callable to run on the UI thread
:return: The value returned by the callable
"""
def inner(*args, **kwargs):
return GtkHelper.invoke(lambda: func(*args, **kwargs), False)
return inner
@staticmethod
def run_dialog(win, message_type, buttons_type, msg):
dialog = Gtk.MessageDialog(win, 0, message_type, buttons_type, msg)
response = dialog.run()
dialog.destroy()
return response | /replication-monitor-0.1.9.tar.gz/replication-monitor-0.1.9/src/gtk_helper.py | 0.705481 | 0.33674 | gtk_helper.py | pypi |
from gi.repository import Gtk
import xml.etree.ElementTree as ET
import codecs
class Builder:
"""
A class which behaves a bit like the GTK builder - but is more useful.
It will load a glade file and create member variables for child objects
and wire events to member functions.
"""
_builder = Gtk.Builder()
def __init__(self, filename):
"""
Construct a builder object based on a glade file
:param filename: The path to the glade file
:return: Nothing
"""
self._builder.add_from_file(filename)
with codecs.open(filename, 'r', 'utf-8') as f:
ui = f.read()
self.ui_root = ET.fromstring(ui)
def get_object(self, ui_id, target=None, include_children=False):
"""
Finds the object in the glade DOM and optionally wires up signals and creates member fields for
GTK+ child objects.
:param ui_id: The id of the object in the glade DOM
:param target: The target python object to wire GTK+ events to
:param include_children: When True an instance variable will be added to target for each child GTK+ object
:return: The GTK+ window instance
"""
win = self._builder.get_object(ui_id)
if win and target:
path = ".//object[@id='" + ui_id + "']"
ui_root_object = self.ui_root.find(path)
ui_objects = ui_root_object.findall('.//object/signal/..')
ui_objects.append(ui_root_object)
for ui_object in ui_objects:
ui_child_id = ui_object.attrib['id']
child_win = self._builder.get_object(ui_child_id)
ui_signals = ui_object.findall('./signal')
for ui_signal in ui_signals:
event_name = ui_signal.attrib['name']
handler_name = ui_signal.attrib['handler']
handler = getattr(target, handler_name)
child_win.connect(event_name, handler)
if include_children:
self._get_children(ui_id, target)
return win
def _get_children(self, ui_id, target):
path = ".//object[@id='" + ui_id + "']//object"
ui_objects = self.ui_root.findall(path)
for ui_object in ui_objects:
child_id = ui_object.attrib['id']
child_win = self._builder.get_object(child_id)
setattr(target, child_id, child_win) | /replication-monitor-0.1.9.tar.gz/replication-monitor-0.1.9/src/builder.py | 0.789477 | 0.244284 | builder.py | pypi |
.. _specifying_an_archetype:
Specifying an Archetype
~~~~~~~~~~~~~~~~~~~~~~~
In this section, we explain how to customize a data set archetype to
obtain synthetic data that suits your needs.
.. _basic_parameters:
Basic Parameters
^^^^^^^^^^^^^^^^
Basic parameters of each :py:class:`Archetype <repliclust.base.Archetype>`
include the desired number of clusters `n_clusters`,
the number of dimensions `dim` of the data, the desired total number of
data points `n_samples` in each synthetic
dataset and the name of the archetype (`name`).
Overlaps Between Clusters
^^^^^^^^^^^^^^^^^^^^^^^^^
We quantify the overlap between any pair of clusters as a percentage.
Roughly, an overlap of 0.05 indicates that the outer 5% of the clusters’
probability densities overlap.
In a data set with *k* clusters, there are *k(k-1)/2* pairs of clusters.
To quantify the desired overlap for the whole data set, you can use the
parameters :py:obj:`min_overlap <repliclust.maxmin.archetype.MaxMinArchetype>`
and :py:obj:`max_overlap <repliclust.maxmin.archetype.MaxMinArchetype>`.
The latter parameter imposes an upper limit on the overlap between any
pair of clusters. Hence, decrease `max_overlap` if you want to ensure
that clusters are farther apart. On the other, `min_overlap` sets a
lower limit on the overlap between a cluster and its closest neighbor.
In other words,
increase `min_overlap` if you want to avoid isolated clusters. Choose
similar values for `min_overlap` and `max_overlap` if you would like
to impose a consistent overlap across all synthetic data sets. However,
keep in mind that `max_overlap` must always exceed `min_overlap`; in
addition, when the gap between `min_overlap` and `max_overlap` is
too small, data generation may take unacceptably long.
The simulation below generates synthetic data sets for various choices
of `min_overlap` and `max_overlap`. We discuss the results below.
.. code:: ipython3
from repliclust import set_seed, Archetype, DataGenerator
import matplotlib.pyplot as plt
set_seed(2)
eps = 1e-4
overlap_settings = [{'min_overlap': 1e-3, 'max_overlap': 1e-3+eps},
{'min_overlap': 1e-3, 'max_overlap': 0.5},
{'min_overlap': 0.5, 'max_overlap': 0.5+eps}]
for i, overlaps in enumerate(overlap_settings):
fig, ax = plt.subplots(figsize=(10,2),dpi=300,nrows=1, ncols=4)
description = (
r"$\bf{Cluster~Overlaps~around~0.1\%}$" if i==0
else (r"$\bf{Cluster~Overlaps~between~0.1\%~and~50\%}$" if (i==1)
else r"$\bf{Cluster~Overlaps~around~50\%}$")
)
fig.suptitle(description + '\n'
+ "min_overlap" + r"$ \approx $"
+ str(overlaps['min_overlap'])
+ ", max_overlap" + r"$ \approx $"
+ str(round(overlaps['max_overlap'],3)),
y=1.15)
for j in range(4):
archetype = Archetype(
min_overlap=overlaps['min_overlap'],
max_overlap=overlaps['max_overlap']
)
X, y, archetype_name = (DataGenerator(archetype)
.synthesize(quiet=True))
ax[j].scatter(X[:,0], X[:,1], c=y, alpha=0.25)
ax[j].set_xticks([]); ax[j].set_yticks([])
fig.subplots_adjust(hspace=0.5)
.. image:: ./user_guide_img/3_0.svg
.. image:: ./user_guide_img/3_1.svg
.. image:: ./user_guide_img/3_2.svg
The plots above demonstrate the impact of varying `min_overlap` and
`max_overlap`. The middle series of plots shows that
the difference between `max_overlap` and `min_overlap` plays an
important role as well.
In the top row of plots, ``min_overlap=0.001`` and
``max_overlap=0.0011``. The small difference between ``max_overlap``
and ``min_overlap`` means that we are controlling cluster overlap
rather tightly around 0.1%. Not only must no pair of clusters overlap
more than 0.1%, but also each cluster
must overlap at least 0.1% with its closest neighbor. The bottom row
paints a similar picture, except with more overlap between clusters
(50% vs 0.1%).
The middle row shows a different scenario because we leave a
substantial gap between ``min_overlap=0.001`` and ``max_overlap=0.5``.
In this case, all clusters must overlap less than 50%, but we permit
much smaller overlaps. This choice increases the variability of
synthetic data sets because within the range of 0.001 to 0.5 we leave
the actual overlaps to chance. For example, the clusters in the
second data set from the left overlap more than those in the third data
set. Such variation may or not be helpful for your application.
Cluster Aspect Ratios
^^^^^^^^^^^^^^^^^^^^^
Each cluster has an ellipsoidal shape that may be round like a ball, or
long and slender like a rod. The *aspect ratio* of a cluster is the
ratio of the length of its longest axis to the length of its shortest
axis. In other words, a high aspect ratio indicates a long and slender
cluster, whereas a low aspect ratio indicates a round cluster. Possible
values for the aspect ratio range from 1 (a perfect sphere) to
infinitely large.
When generating synthetic data using ``repliclust``, you can influence
the cluster aspect ratios by changing the parameters
:py:obj:`aspect_ref <repliclust.maxmin.archetype.MaxMinArchetype>`
and :py:obj:`aspect_maxmin <repliclust.maxmin.archetype.MaxMinArchetype>`.
The reference aspect ratio, `aspect_ref`, determines the typical aspect
ratio for all clusters in a synthetic data set. For example, if
``aspect_ref=3``, the typical cluster is oblong with an aspect ratio of
three. On the other hand, the max-min ratio `aspect_maxmin` determines
the variability of cluster aspect ratios within the same data set.
More precisely, `aspect_maxmin` is the ratio of the highest aspect ratio
to the lowest aspect ratio in each data set. For example, if
``aspect_maxmin=3``, then the "longest" cluster is four
times longer than the most "round" cluster.
The simulation below demonstrates the effect of changing
`aspect_ref` and `aspect_maxmin`.
.. code:: ipython3
import matplotlib.pyplot as plt
import repliclust
repliclust.set_seed(1)
fig, ax = plt.subplots(figsize=(8,8), dpi=300, nrows=2, ncols=2)
for i, aspect_ref in enumerate([1, 3]):
for j, aspect_maxmin in enumerate([1, 3]):
archetype = repliclust.Archetype(n_clusters=5, n_samples=750,
aspect_ref=aspect_ref,
aspect_maxmin=aspect_maxmin,
radius_maxmin=1.0,
min_overlap=0.04,
max_overlap=0.05,
distributions=['normal'])
X, y, _ = repliclust.DataGenerator(archetype).synthesize(quiet=True)
ax[i,j].scatter(X[:,0], X[:,1],c=y, alpha=0.25)
aspect_ref_description = (r"$\bf{Round~Shape}$" if (i==0)
else r"$\bf{Long~Shape}$")
aspect_maxmin_description = (r"$\bf{-~no~Variability}$" if (j==0)
else r"$\bf{-~3x~Variability}$")
ax[i,j].set_title(aspect_ref_description + " "
+ aspect_maxmin_description + "\n"
+r"$ aspect\_ref $=" + str(aspect_ref) + ", "
+r"$ aspect\_maxmin $=" + str(aspect_maxmin),
fontsize=10, y=1.05)
ax[i,j].set_aspect('equal')
ax[i,j].set_xticks([]); ax[i,j].set_yticks([])
plt.subplots_adjust(hspace=0.3, wspace=0.15)
.. image:: ./user_guide_img/4.svg
Cluster Volumes
^^^^^^^^^^^^^^^
The volume of a cluster is the volume spanned by the inner 75% of its
probability mass. Since cluster volume grows rapidly in high dimensions,
we quantify the spatial extent of a cluster in terms of its radius
instead. The radius of an ellipsoidal cluster is the spherical radius
of a ball with the same volume.
When generating synthetic data with ``repliclust``, you can influence
the variability in cluster volumes by changing the
:py:obj:`radius_maxmin <repliclust.maxmin.archetype.MaxMinArchetype>`
parameter. This parameter sets the ratio between the
largest and smallest cluster radii within a data set. For example, if
`radius_maxmin` is 10 and the smallest cluster has unit radius, then the
biggest cluster has a radius of 10. Note that volumes scale
differently from radii. In *dim* dimensions, ``radius_maxmin=10``
implies that the biggest cluster volume is `10**dim` times
greater than the smallest.
The simulation below demonstrates the effect of varying
``radius_maxmin``.
.. code:: ipython3
import repliclust
import matplotlib.pyplot as plt
repliclust.set_seed(1)
fig, ax = plt.subplots(figsize=(10,3.3), dpi=300, nrows=1, ncols=3)
for i, radius_maxmin in enumerate([1,3,10]):
archetype = repliclust.Archetype(radius_maxmin=radius_maxmin,
max_overlap=0.05,min_overlap=0.04)
X, y, _ = repliclust.DataGenerator(archetype).synthesize(quiet=True)
description = (
r"$\bf{Equal~Cluster~Volumes}$"
if i==0
else (r"$\bf{3x~Variability}$"
if (i==1)
else r"$\bf{10x~Variability}$")
)
ax[i].scatter(X[:,0], X[:,1], c=y, alpha=0.25)
ax[i].set_xticks([]); ax[i].set_yticks([])
ax[i].set_title(description + '\n'
+ r'$ radius\_maxmin $'+ " = " + str(radius_maxmin))
.. image:: ./user_guide_img/5.svg
Cluster Probability Distributions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Each cluster consists of data points spread around a central point
according to a probability distribution. While a cluster’s overall
ellipsoidal shape depends on its covariance matrix, the choice of
probability distribution determines how quickly the density of data
points drops with increasing
distance from the central point. For example, the `normal`
distribution spreads all data points rather tightly around the central
point. By contrast, the `exponential`
distribution spreads the probability mass further out in space, leaving
a larger share of data points away from the cluster center.
Going even further, heavy-tailed distributions such as the
`standard t` distribution
with ``df=1`` degrees of freedom give rise to *outliers*, data points
very far from the cluster center.
When generating synthetic data using ``repliclust``, you can use the
``distributions`` parameter to customize the probability distributions
appearing in your synthetic data sets. As an example, the scatter plots
below visualize the differences between the normal,
exponential, and standard t distributions.
.. image:: ./user_guide_img/6.svg
Note the vastly different scales of the
`X1` and `X2` axes. On the left, the normal distribution keeps all
data points within about two units of distance from the cluster center.
On the right, the heavy-tailed standard t distribution leads to outliers
as far as 200 units away. The exponential distribution in the middle
strikes a compromise, with distances of up to about five units from the
center.
Besides choosing a single probability distribution, you can use multiple
distributions. This choice leads to synthetic
data sets in which different clusters have different probability
distributions. In general, the parameter ``distributions`` is a list
containing the names of all probability distributions, as well as their
parameters. Not all distributions have parameters. To obtain a list of
the probability distributions currently supported in ``repliclust``, as
well as their parameters, call ``get_supported_distributions()``.
.. code:: ipython3
from repliclust import get_supported_distributions
get_supported_distributions()
.. parsed-literal::
{'normal': {},
'standard_t': {'df': 1},
'exponential': {},
'beta': {'a': 0.3, 'b': 0.5},
'chisquare': {'df': 1},
'gumbel': {'scale': 1.0},
'weibull': {'a': 2},
'gamma': {'shape': 0.5, 'scale': 1.0},
'pareto': {'a': 1},
'f': {'dfnum': 1, 'dfden': 1},
'lognormal': {'sigma': 1.0}}
It is important to
spell the names of distributions exactly as shown above. All names are
adapted from the ``numpy.random.Generator`` module. To understand the
meaning of the distributional parameters, see the ``numpy``
documentation. For example, click `here <https://numpy.org/doc/stable/reference/random/generated/numpy.random.Generator.gamma.html>`_
to see documentation for the gamma distribution.
When specifying a probability distribution with parameters, the
corresponding entry in ``distributions`` should be a tuple
*(name, parameters)*, where *name* is the name of the distribution and
*parameters* is a dictionary of distributional parameters. For example,
the gamma distribution has parameters `shape` and `scale`. Below
we generate synthetic data based on an archetype with gamma-distributed
clusters. Note that in ``repliclust`` you can only change the parameters
listed when calling
:py:func:`get_supported_distributions() <repliclust.base.get_supported_distributions>`,
even though the corresponding ``numpy`` class might have additional
parameters. For example, the normal and exponential distributions have
no parameters in ``repliclust``.
The simulation below generates a synthetic data set with
gamma-distributed clusters.
.. code:: ipython3
import repliclust
import matplotlib.pyplot as plt
repliclust.set_seed(1)
my_archetype = repliclust.Archetype(
min_overlap=0.01, max_overlap=0.05,
distributions=[('gamma', {'shape': 1, 'scale': 2.0})])
X, y, _ = repliclust.DataGenerator(my_archetype).synthesize(quiet=True)
plt.scatter(X[:,0],X[:,1],c=y,alpha=0.35)
plt.gcf().set_dpi(300)
plt.gca().set_xticks([]); plt.gca().set_yticks([])
plt.title(r"$\bf{Gamma{-}Distributed~Clusters}$" + '\n'
+ r"$distributions=[('gamma', \{'shape': 1, 'scale': 2.0\})]$");
.. image:: ./user_guide_img/7.svg
When using multiple distributions, ``repliclust``
randomly assigns a distribution to each cluster. For example, the
choice ``distributions=['normal', 'exponential']`` makes half of the
clusters normally distributed, and the other half exponentially
distributed. To customize these proportions, use the parameter
``distribution_proportions``. For example, to raise the share of
exponentially distributed clusters to 75%, set
``distribution_proportions=[0.25,0.75]``. The simulation below
demonstrates such possibilities in a more complex example.
.. code:: ipython3
import repliclust
import matplotlib.pyplot as plt
repliclust.set_seed(2)
distr_list = ['normal','exponential',('gamma', {'shape': 1, 'scale': 2.0})]
distr_proportions = [0.25,0.5,0.25]
my_archetype = repliclust.Archetype(
n_clusters=8, min_overlap=0.005, max_overlap=0.006,
distributions=distr_list,
distribution_proportions=distr_proportions
)
X, y, _ = repliclust.DataGenerator(my_archetype).synthesize(quiet=True)
plt.scatter(X[:,0],X[:,1],c=y,alpha=0.35)
plt.gcf().set_dpi(300)
ax[i].set_xticks([]); ax[i].set_yticks([])
plt.title(r"$\bf{Using~Multiple~Probability~Distributions}$"
+ '\n' + r"$ distributions=['normal', 'exponential',"
+ r"('gamma', \{'shape': 1, 'scale': 2.0\})] $,"
+ '\n' + r"$ distribution\_proportions=[0.25,0.5,0.25] $",
fontsize=10);
.. image:: ./user_guide_img/8.svg
Can you spot which of the clusters above have normal, exponential, or
gamma distributions?
Group Sizes
^^^^^^^^^^^
The *group size* of a cluster is the number of data points in it. When
group sizes vary significantly between clusters in the same data set, we
speak of *class imbalance*. When generating synthetic data using
``repliclust``, you can vary the class imbalance by specifying the
``imbalance_ratio``. This parameter sets the ratio of the greatest to
the smallest number of data points among all clusters in the same data
set. For example, if ``imbalance_ratio=10`` then the cluster with the
most data points has ten times more data points than the cluster with the
least number of data points. By contrast, the total number of
data points in the whole data set depends on the parameter ``n_samples``
introduced in the :ref:`Basic Parameters <basic_parameters>` section.
The simulation below demonstrates the effect of changing the
``imbalance_ratio``.
.. code:: ipython3
import matplotlib
import repliclust
repliclust.set_seed(1)
fig, ax = plt.subplots(figsize=(10,5), dpi=300, nrows=1, ncols=2)
for i, imbalance_ratio in enumerate([1, 10]):
archetype = repliclust.Archetype(
n_clusters=2, n_samples=120,
distributions=['normal'],
imbalance_ratio=imbalance_ratio)
X, y, _ = repliclust.DataGenerator(archetype).synthesize(quiet=True)
ax[i].scatter(X[:,0], X[:,1],c=y, alpha=0.5)
plot_description = (r"$\bf{Perfect~Balance}$" if (i==0)
else r"$\bf{10x~Imbalance}$")
ax[i].set_title(plot_description + "\n" +r"$ imbalance\_ratio $="
+ str(imbalance_ratio))
ax[i].set_xticks([]); ax[i].set_yticks([])
.. image:: ./user_guide_img/9.svg
In the scatter plots above, both datasets have ``n_samples=120``
data points. On the left, both clusters have the same number of data
points (class balance). On the right, the bigger cluster has ten
times more data points than the smaller cluster (class imbalance). | /repliclust-0.0.3.tar.gz/repliclust-0.0.3/docs/source/specifying_an_archetype.rst | 0.935176 | 0.772015 | specifying_an_archetype.rst | pypi |
Search.setIndex({"docnames": ["base", "basic_usage", "distributions", "generating_multiple_datasets", "index", "install", "maxmin", "overlap", "reference", "specifying_an_archetype", "user_guide"], "filenames": ["base.rst", "basic_usage.rst", "distributions.rst", "generating_multiple_datasets.rst", "index.rst", "install.rst", "maxmin.rst", "overlap.rst", "reference.rst", "specifying_an_archetype.rst", "user_guide.rst"], "titles": ["Core Framework", "Basic Usage", "Probability Distributions", "Generating Multiple Similar Datasets", "Synthetic Data for Cluster Analysis", "Install", "Max-Min Implementation", "Cluster Overlap Control", "Reference", "Specifying an Archetype", "User Guide"], "terms": {"provid": [0, 6, 7], "an": [0, 1, 2, 6, 7, 10], "archetyp": [0, 1, 3, 4, 7, 8, 10], "defin": [0, 1, 3, 6], "overal": [0, 1, 4, 6, 9], "geometri": [0, 1, 6], "synthet": [0, 1, 3, 9], "data": [0, 1, 2, 3, 6, 9], "set": [0, 1, 3, 4, 6, 7, 9], "feed": 0, "one": [0, 6], "sever": 0, "datagener": [0, 1, 3, 9], "allow": [0, 2, 4, 6, 7], "you": [0, 1, 2, 3, 4, 5, 6, 9], "sampl": [0, 1, 2, 4, 6, 7], "desir": [0, 1, 2, 6, 7, 9], "function": [0, 1, 2, 6, 7], "set_se": [0, 1, 3, 9], "random": [0, 1, 2, 7, 9], "seed": [0, 1], "reproduc": [0, 1], "get_supported_distribut": [0, 6, 9], "obtain": [0, 9], "dictionari": [0, 6, 9], "support": [0, 6, 9], "probabl": [0, 6, 8, 10], "distribut": [0, 6, 8, 10], "class": [0, 2, 6, 7, 9], "probabilist": [0, 2], "mixtur": [0, 2, 6], "model": [0, 2, 6], "geometr": [0, 4, 6], "structur": [0, 4], "mixturemodel": 0, "cluster": [0, 1, 2, 6, 8, 10], "shape": [0, 9], "locat": [0, 7], "distributionmix": [0, 2], "mechan": 0, "assign": [0, 1, 2, 9], "when": [0, 7, 9], "via": 0, "singleclusterdistribut": [0, 2], "singl": [0, 2, 6, 9], "groupsizesampl": [0, 6], "number": [0, 1, 2, 6, 7, 9], "point": [0, 6, 9], "each": [0, 2, 6, 7, 9], "clustercentersampl": [0, 7], "center": [0, 8, 9], "covariancesampl": 0, "n_cluster": [0, 1, 2, 3, 6, 9], "int": [0, 2, 6, 7], "dim": [0, 1, 3, 6, 9], "n_sampl": [0, 1, 3, 6, 9], "500": [0, 1, 3, 6], "name": [0, 1, 2, 3, 6, 9], "none": [0, 6], "scale": [0, 6, 9], "float": [0, 6, 7], "1": [0, 1, 2, 3, 6, 7, 9], "0": [0, 1, 2, 3, 6, 7, 9], "covariance_sampl": 0, "option": [0, 3], "center_sampl": 0, "groupsize_sampl": 0, "distribution_mix": 0, "kwarg": 0, "object": [0, 1, 2, 6, 7], "thi": [0, 1, 2, 6, 7, 9], "first": [0, 1, 6], "all": [0, 2, 3, 6, 7, 9], "final": 0, "subclass": 0, "implement": [0, 2, 7, 8], "concret": 0, "wai": [0, 1], "wrapper": 0, "run": [0, 1, 5, 7], "s": [0, 1, 6, 9], "constructor": 0, "certain": 0, "choic": [0, 1, 9], "paramet": [0, 2, 6, 7, 10], "altern": 0, "possibl": [0, 9], "directli": 0, "construct": [0, 6], "manual": 0, "specifi": [0, 1, 6, 10], "The": [0, 1, 2, 6, 7, 9], "dimens": [0, 6, 9], "default": [0, 1, 6], "total": [0, 1, 6, 7, 9], "str": [0, 2, 3, 6, 9], "typic": [0, 1, 6, 9], "length": [0, 6, 9], "increas": [0, 7, 9], "make": [0, 1, 6, 9], "bigger": [0, 9], "without": 0, "chang": [0, 9], "rel": 0, "size": [0, 2, 6, 10], "posit": 0, "sampler": 0, "covari": [0, 8, 9], "dict": [0, 2, 6], "extra": 0, "argument": [0, 2, 6], "us": [0, 1, 3, 6, 7, 9], "store": [0, 6], "addit": [0, 9], "attribut": [0, 1, 7], "maxminarchetyp": [0, 6], "dataset": [0, 1, 4, 9, 10], "sample_mixture_model": 0, "quiet": [0, 3, 7, 9], "fals": [0, 7], "accord": [0, 2, 9], "return": [0, 2, 6, 7], "mixture_model": 0, "A": [0, 6], "type": [0, 2, 6, 7], "py": 0, "overrid": 0, "sample_cluster_cent": [0, 7], "method": [0, 1, 2], "which": [0, 1, 2, 6, 7, 9], "call": [0, 1, 3, 6, 9], "signatur": 0, "should": [0, 6, 9], "follow": [0, 5], "By": [0, 9], "contrast": [0, 9], "own": 0, "restrict": 0, "constrainedoverlapcent": [0, 7], "below": [0, 1, 3, 6, 9], "matrix": [0, 6, 9], "whose": [0, 2, 6], "i": [0, 2, 3, 6, 9], "th": [0, 2, 6], "row": [0, 6, 9], "give": [0, 1, 2, 9], "ndarrai": [0, 2, 6, 7], "sample_covari": [0, 6], "maxmincovariancesampl": [0, 6], "axes_list": [0, 6], "axis_lengths_list": [0, 6], "tupl": [0, 1, 2, 6, 9], "two": [0, 6, 7, 9], "compon": [0, 6], "list": [0, 2, 6, 9], "entri": [0, 2, 6, 9], "princip": [0, 6], "ax": [0, 3, 6, 9], "axi": [0, 1, 6, 9], "second": [0, 6, 9], "vector": [0, 6], "j": [0, 3, 6, 9], "numpi": [0, 2, 9], "n_dataset": 0, "10": [0, 3, 6, 9], "prefix": 0, "gener": [0, 1, 2, 4, 6, 9, 10], "instanc": 0, "indic": [0, 9], "There": 0, "ar": [0, 1, 2, 6, 7, 9], "three": [0, 1, 9], "differ": [0, 3, 9], "after": [0, 1], "dg": 0, "can": [0, 1, 3, 9], "write": 0, "x": [0, 1, 3, 9], "y": [0, 1, 3, 9], "archetype_nam": [0, 1, 3, 9], "synthes": [0, 1, 3, 9], "iter": [0, 7], "over": 0, "_n_dataset": 0, "correspond": [0, 2, 6, 9], "have": [0, 1, 2, 6, 7, 9], "If": [0, 1, 5, 7], "either": [0, 6], "In": [0, 1, 3, 6, 9], "case": [0, 9], "output": [0, 1, 6, 7], "format": [0, 3, 6], "contain": [0, 9], "variabl": [0, 1, 9], "label": [0, 1], "One": [0, 1], "consist": [0, 9], "more": [0, 1, 5, 6, 9], "than": [0, 1, 9], "cycl": 0, "through": [0, 1], "given": 0, "bool": [0, 7], "true": [0, 3, 7, 9], "suppress": [0, 7], "print": [0, 3, 6, 7], "place": 0, "mani": [0, 3, 4], "success": 0, "new": 0, "third": [0, 9], "arch_nam": 0, "wa": 0, "creat": [0, 1], "assign_distribut": [0, 2], "fixedproportionmix": [0, 2, 6], "element": [0, 2, 6], "repres": 0, "group": [0, 6, 10], "sample_group_s": [0, 6], "maxmingroupsizesampl": [0, 6], "sum": [0, 6], "group_siz": [0, 6], "distributions_list": 0, "from": [0, 1, 2, 3, 6, 9], "draw": [0, 2], "arrang": 0, "orthonorm": 0, "axis_length": 0, "sample_data": 0, "while": [0, 9], "integ": 0, "rang": [0, 3, 9], "zero": 0, "minu": 0, "param": [0, 2], "_sample_1d": 0, "multivariatenorm": 0, "multivari": [0, 2], "normal": [0, 2, 6, 9], "exponenti": [0, 2, 6, 9], "distributionfromnumpi": [0, 2], "arbitrari": [0, 2], "sample_clust": 0, "n": [0, 9], "get": 0, "current": [0, 1, 9], "well": [0, 2, 9], "agre": 0, "modul": [0, 2, 6, 7, 9], "program": 0, "wide": 0, "repliclust": [1, 3, 4, 5, 8, 9], "base": [1, 2, 3, 4, 6, 7, 8, 9], "encod": 1, "your": [1, 5, 9], "prefer": 1, "command": [1, 5], "we": [1, 3, 6, 9], "simpl": 1, "exampl": [1, 2, 6, 9], "import": [1, 3, 9], "archetype_oblong": [1, 3], "oblong": [1, 3, 6, 9], "few": 1, "5": [1, 3, 6, 9], "dimension": [1, 6], "2": [1, 3, 6, 9], "1000": 1, "To": [1, 3, 5, 6, 9], "aspect_ref": [1, 3, 6, 9], "3": [1, 3, 6, 9], "aspect": [1, 6, 10], "ratio": [1, 6, 7, 10], "aspect_maxmin": [1, 3, 6, 9], "some": [1, 6, 7], "bit": 1, "other": [1, 6, 7, 9], "data_gener": [1, 3], "immedi": 1, "start": 1, "do": 1, "where": [1, 2, 9], "had": 1, "would": [1, 9], "automat": 1, "archetype0": 1, "simul": [1, 3, 9], "show": [1, 9], "how": [1, 6, 9], "our": 1, "setup": 1, "befor": 1, "code": 1, "abov": [1, 6, 9], "matplotlib": [1, 3, 9], "pyplot": [1, 3, 9], "plt": [1, 3, 9], "scatter": [1, 3, 9], "c": [1, 3, 9], "alpha": [1, 3, 9], "25": [1, 3, 9], "titl": [1, 9], "xlabel": 1, "x1": [1, 9], "ylabel": 1, "x2": [1, 9], "set_rot": 1, "equal": [1, 6, 9], "plot": [1, 9], "confirm": 1, "inde": 1, "discuss": [1, 9], "custom": [1, 9], "section": [1, 3, 9], "individu": 2, "mix": 2, "must": [2, 9], "take": [2, 9], "select": [2, 6], "distributionfrompdf": 2, "densiti": [2, 9], "fix": 2, "proport": [2, 6, 9], "For": [2, 6, 9], "mai": [2, 5, 7, 9], "choos": [2, 6, 7, 9], "50": [2, 9], "appear": [2, 6, 7, 9], "string": 2, "kei": 2, "valu": [2, 6, 9], "pair": [2, 7, 9], "self": [2, 6], "_distribut": 2, "_proport": [2, 9], "standardt": 2, "df": [2, 9], "t": [2, 9], "parse_distribut": 2, "distr_nam": 2, "look": 3, "illustr": 3, "now": 3, "nine": 3, "basic": [3, 10], "usag": [3, 10], "again": 3, "conveni": 3, "my_archetyp": [3, 9], "fig": [3, 9], "subplot": [3, 9], "figsiz": [3, 9], "9": 3, "dpi": [3, 9], "300": [3, 9], "nrow": [3, 9], "ncol": [3, 9], "set_titl": [3, 9], "fontsiz": [3, 9], "set_xtick": [3, 9], "set_ytick": [3, 9], "subplots_adjust": [3, 9], "hspace": [3, 9], "20": 3, "suptitl": [3, 9], "97": 3, "savefig": 3, "svg": 3, "bbox_inch": 3, "tight": 3, "avoid": [3, 9], "statu": [3, 7], "updat": [3, 7], "dure": [3, 7], "python": [4, 5], "packag": [4, 5], "It": [4, 6, 9], "high": [4, 9], "level": [4, 6], "blueprint": [4, 6], "same": [4, 6, 9], "host": 5, "index": 5, "pypi": 5, "enter": 5, "termin": 5, "pip": 5, "doe": 5, "work": 5, "machin": 5, "need": [5, 7, 9], "see": [5, 6, 9], "here": [5, 9], "inform": [5, 6], "user": 6, "between": [6, 7, 10], "largest": [6, 9], "smallest": [6, 9], "variou": [6, 9], "6": 6, "max_overlap": [6, 7, 9], "05": [6, 9], "min_overlap": [6, 7, 9], "001": [6, 9], "imbalance_ratio": [6, 9], "radius_maxmin": [6, 9], "pack": [6, 7], "distribution_proport": [6, 9], "maximum": [6, 7], "minimum": [6, 7], "radii": [6, 9], "among": [6, 9], "expect": 6, "strongli": 6, "elong": 6, "imbalance_maxmin": 6, "greatest": [6, 9], "requir": 6, "overlap": [6, 8, 10], "guarante": 6, "isol": [6, 7, 9], "ani": [6, 9], "measur": [6, 7], "fraction": [6, 7], "volum": [6, 7, 10], "refer": [6, 9], "OR": 6, "along": 6, "note": [6, 9], "short": 6, "glossari": 6, "term": [6, 9], "radiu": [6, 9], "mean": [6, 9], "standard": [6, 9], "deviat": 6, "eigenvector": 6, "longest": [6, 9], "shortest": [6, 9], "spheric": [6, 9], "exce": [6, 9], "parse_distribution_select": 6, "pars": 6, "reformat": 6, "input": 6, "valid": 6, "print_supported_distribut": 6, "distributions_pars": 6, "validate_archetype_arg": 6, "arg": 6, "validate_maxmin_ratio": 6, "maxmin_ratio": 6, "arg_nam": 6, "underlying_param": 6, "check": 6, "validate_overlap": 6, "effect": [6, 9], "remov": 6, "both": [6, 9], "constraint": 6, "validate_reference_quant": 6, "ref_qti": 6, "min_allowed_valu": 6, "its": [6, 9], "approach": 6, "document": [6, 9], "make_axis_length": 6, "n_ax": 6, "reference_length": 6, "aspect_ratio": 6, "make_cluster_aspect_ratio": 6, "ellipsoid": [6, 9], "out": [6, 9], "make_cluster_radii": 6, "ref_radiu": 6, "pairwis": [6, 7], "constrain": 6, "arithmet": 6, "power": 6, "result": [6, 9], "averag": 6, "comput": 6, "particular": 6, "validate_k": 6, "sure": 6, "__init__": 6, "make_group_s": 6, "clusterdata": 6, "achiev": 7, "degre": [7, 9], "minim": 7, "09": 7, "optimization_arg": 7, "optim": 7, "mass": [7, 9], "prevent": 7, "initi": 7, "learning_r": 7, "rate": 7, "numer": 7, "instabl": 7, "recommend": 7, "lower": [7, 9], "max_epoch": 7, "epoch": 7, "slow": 7, "down": 7, "tol": 7, "toler": 7, "print_progress": 7, "adjust": 7, "them": 7, "until": 7, "satisfi": 7, "convei": 7, "step": 7, "progress": 7, "even": [7, 9], "still": 7, "summari": 7, "unless": 7, "core": 8, "framework": 8, "max": [8, 9], "min": [8, 9], "maxmin": 8, "groupsiz": 8, "control": [8, 9], "explain": 9, "suit": 9, "includ": 9, "quantifi": 9, "percentag": 9, "roughli": 9, "outer": 9, "k": 9, "whole": 9, "latter": 9, "impos": 9, "upper": 9, "limit": 9, "henc": 9, "decreas": 9, "want": 9, "ensur": 9, "farther": 9, "apart": 9, "On": 9, "closest": 9, "neighbor": 9, "word": 9, "similar": [9, 10], "like": 9, "across": 9, "howev": 9, "keep": 9, "mind": 9, "alwai": 9, "gap": 9, "too": 9, "small": 9, "unaccept": 9, "long": 9, "ep": 9, "1e": 9, "4": 9, "overlap_set": 9, "enumer": 9, "descript": 9, "r": 9, "bf": 9, "around": 9, "els": 9, "approx": 9, "round": 9, "15": 9, "demonstr": 9, "impact": 9, "vari": 9, "middl": 9, "seri": 9, "plai": 9, "role": 9, "top": 9, "0011": 9, "rather": 9, "tightli": 9, "Not": 9, "onli": 9, "also": 9, "least": 9, "bottom": 9, "paint": 9, "pictur": 9, "except": 9, "vs": 9, "scenario": 9, "becaus": 9, "leav": 9, "substanti": 9, "less": 9, "permit": 9, "much": 9, "smaller": 9, "within": 9, "actual": 9, "chanc": 9, "left": 9, "those": 9, "Such": 9, "variat": 9, "help": 9, "applic": 9, "ha": 9, "ball": 9, "slender": 9, "rod": 9, "wherea": 9, "low": 9, "perfect": 9, "sphere": 9, "infinit": 9, "larg": 9, "influenc": 9, "determin": 9, "hand": 9, "precis": 9, "highest": 9, "lowest": 9, "four": 9, "time": 9, "longer": 9, "most": 9, "8": 9, "750": 9, "04": 9, "_": 9, "aspect_ref_descript": 9, "aspect_maxmin_descript": 9, "3x": 9, "_ref": 9, "_maxmin": 9, "set_aspect": 9, "wspace": 9, "span": 9, "inner": 9, "75": 9, "sinc": 9, "grow": 9, "rapidli": 9, "spatial": 9, "extent": 9, "instead": 9, "unit": 9, "biggest": 9, "impli": 9, "greater": 9, "10x": 9, "spread": 9, "central": 9, "depend": 9, "quickli": 9, "drop": 9, "distanc": 9, "further": 9, "space": 9, "larger": 9, "share": 9, "awai": 9, "go": 9, "heavi": 9, "tail": 9, "freedom": 9, "rise": 9, "outlier": 9, "veri": 9, "far": 9, "As": 9, "visual": 9, "vastli": 9, "about": 9, "right": 9, "lead": 9, "200": 9, "strike": 9, "compromis": 9, "up": 9, "five": 9, "besid": 9, "multipl": [9, 10], "standard_t": 9, "beta": 9, "b": 9, "chisquar": 9, "gumbel": 9, "weibul": 9, "gamma": 9, "pareto": 9, "f": 9, "dfnum": 9, "dfden": 9, "lognorm": 9, "sigma": 9, "spell": 9, "exactli": 9, "shown": 9, "adapt": 9, "understand": 9, "click": 9, "though": 9, "might": 9, "01": 9, "35": 9, "gcf": 9, "set_dpi": 9, "gca": 9, "randomli": 9, "half": 9, "rais": 9, "complex": 9, "distr_list": 9, "distr_proport": 9, "005": 9, "006": 9, "spot": 9, "significantli": 9, "speak": 9, "imbal": 9, "ten": 9, "introduc": 9, "120": 9, "plot_descript": 9, "balanc": 9, "_ratio": 9}, "objects": {"repliclust": [[0, 0, 0, "-", "base"], [2, 0, 0, "-", "distributions"], [6, 0, 0, "-", "maxmin"], [7, 0, 0, "-", "overlap"]], "repliclust.base": [[0, 1, 1, "", "Archetype"], [0, 1, 1, "", "ClusterCenterSampler"], [0, 1, 1, "", "CovarianceSampler"], [0, 1, 1, "", "DataGenerator"], [0, 1, 1, "", "DistributionMix"], [0, 1, 1, "", "GroupSizeSampler"], [0, 1, 1, "", "MixtureModel"], [0, 1, 1, "", "SingleClusterDistribution"], [0, 3, 1, "", "get_supported_distributions"], [0, 3, 1, "", "set_seed"]], "repliclust.base.Archetype": [[0, 2, 1, "", "sample_mixture_model"]], "repliclust.base.ClusterCenterSampler": [[0, 2, 1, "", "sample_cluster_centers"]], "repliclust.base.CovarianceSampler": [[0, 2, 1, "", "sample_covariances"]], "repliclust.base.DataGenerator": [[0, 2, 1, "", "synthesize"]], "repliclust.base.DistributionMix": [[0, 2, 1, "", "assign_distributions"]], "repliclust.base.GroupSizeSampler": [[0, 2, 1, "", "sample_group_sizes"]], "repliclust.base.MixtureModel": [[0, 2, 1, "", "sample_data"]], "repliclust.base.SingleClusterDistribution": [[0, 2, 1, "", "sample_cluster"]], "repliclust.distributions": [[2, 1, 1, "", "DistributionFromNumPy"], [2, 1, 1, "", "DistributionFromPDF"], [2, 1, 1, "", "Exponential"], [2, 1, 1, "", "FixedProportionMix"], [2, 1, 1, "", "Normal"], [2, 1, 1, "", "StandardT"], [2, 3, 1, "", "parse_distribution"]], "repliclust.distributions.FixedProportionMix": [[2, 4, 1, "", "_distributions"], [2, 4, 1, "", "_proportions"], [2, 2, 1, "", "assign_distributions"]], "repliclust.maxmin": [[6, 0, 0, "-", "archetype"], [6, 0, 0, "-", "covariance"], [6, 0, 0, "-", "groupsizes"]], "repliclust.maxmin.archetype": [[6, 1, 1, "", "MaxMinArchetype"], [6, 3, 1, "", "parse_distribution_selection"], [6, 3, 1, "", "validate_archetype_args"], [6, 3, 1, "", "validate_maxmin_ratios"], [6, 3, 1, "", "validate_overlaps"], [6, 3, 1, "", "validate_reference_quantity"]], "repliclust.maxmin.covariance": [[6, 1, 1, "", "MaxMinCovarianceSampler"]], "repliclust.maxmin.covariance.MaxMinCovarianceSampler": [[6, 4, 1, "", "aspect_maxmin"], [6, 4, 1, "", "aspect_ref"], [6, 2, 1, "", "make_axis_lengths"], [6, 2, 1, "", "make_cluster_aspect_ratios"], [6, 2, 1, "", "make_cluster_radii"], [6, 4, 1, "", "radius_maxmin"], [6, 2, 1, "", "sample_covariances"], [6, 2, 1, "", "validate_k"]], "repliclust.maxmin.groupsizes": [[6, 1, 1, "", "MaxMinGroupSizeSampler"]], "repliclust.maxmin.groupsizes.MaxMinGroupSizeSampler": [[6, 2, 1, "", "__init__"], [6, 4, 1, "", "imbalance_ratio"], [6, 2, 1, "", "make_group_sizes"], [6, 2, 1, "", "sample_group_sizes"]], "repliclust.overlap": [[7, 0, 0, "-", "centers"]], "repliclust.overlap.centers": [[7, 1, 1, "", "ConstrainedOverlapCenters"]], "repliclust.overlap.centers.ConstrainedOverlapCenters": [[7, 2, 1, "", "sample_cluster_centers"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:function", "4": "py:attribute"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "function", "Python function"], "4": ["py", "attribute", "Python attribute"]}, "titleterms": {"core": 0, "framework": 0, "repliclust": [0, 2, 6, 7], "base": 0, "basic": [1, 9], "usag": 1, "probabl": [2, 9], "distribut": [2, 9], "gener": 3, "multipl": 3, "similar": 3, "dataset": 3, "synthet": 4, "data": 4, "cluster": [4, 7, 9], "analysi": 4, "instal": 5, "max": 6, "min": 6, "implement": 6, "maxmin": 6, "archetyp": [6, 9], "covari": 6, "groupsiz": 6, "overlap": [7, 9], "control": 7, "center": 7, "refer": 8, "specifi": 9, "an": 9, "paramet": 9, "between": 9, "aspect": 9, "ratio": 9, "volum": 9, "group": 9, "size": 9, "user": 10, "guid": 10}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 6, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx": 56}}) | /repliclust-0.0.3.tar.gz/repliclust-0.0.3/docs/build/html/searchindex.js | 0.567577 | 0.723407 | searchindex.js | pypi |
```
import repliclust
import numpy as np
import repliclust.overlap._gradients as _gradients
# define new version of _optimize_centers that stores the loss after each epoch
def _optimize_centers(self, centers, cov_inv,
max_epoch=1000, learning_rate=0.1, tol=1e-10,
verbose=False, quiet=False):
"""
See description in original (overridden) function.
"""
if not quiet: print("\n[=== optimizing cluster overlaps ===]\n")
pad_epoch = int(np.maximum(2, np.floor(1+np.log10(max_epoch))))
self.loss_log = []
epoch_count = 0
keep_optimizing = (epoch_count < max_epoch)
while keep_optimizing:
epoch_order = repliclust.config._rng.permutation(centers.shape[0])
for i in epoch_order:
_gradients.update_centers(
i, centers, cov_inv,
learning_rate=learning_rate,
overlap_bounds=self.overlap_bounds
)
epoch_count += 1
keep_optimizing = (epoch_count < max_epoch)
loss = _gradients.total_loss(centers, cov_inv,
self.overlap_bounds)
self.loss_log.append((epoch_count,loss))
if verbose:
self._print_optimization_progress(
epoch_count, max_epoch, pad_epoch, loss)
if np.allclose(loss, 0, atol=1e-14, rtol=1e-14):
if not verbose and not quiet:
print(" "*17 + "...")
if not quiet:
self._print_optimization_result(epoch_count,
pad_epoch)
return centers
return centers
repliclust.overlap.centers.ConstrainedOverlapCenters._optimize_centers = _optimize_centers
import matplotlib.pyplot as plt
from statistics import median
epoch_max = 500
n_std = 10
fig, ax = plt.subplots(figsize=(10,5),dpi=300, ncols=2)
ax[0].set_ylabel('Loss')
ax[0].set_title('Loss vs Epoch (Varying Number of Clusters)')
ax[1].set_title('Loss vs Epoch (Varying Dimensionality)')
linestyles = ['solid','dotted','dashed']
markers = ['o', '^', 's']
p_vals = [10,50,100]
k_vals = [2,100,200]
for subplot_idx in [0,1]:
loss_firstmom = np.zeros(shape=(epoch_max,))
ax[subplot_idx].set_xlabel('Epoch')
ax[subplot_idx].set_yscale('log')
ax[subplot_idx].set_xlim([1,50])
ax[subplot_idx].set_ylim([1e-10,10])
vals = k_vals if subplot_idx==0 else p_vals
for j, val in enumerate(vals):
k = val if subplot_idx==0 else median(k_vals)
p = val if subplot_idx==1 else median(p_vals)
for i in range(n_std):
print(k)
print(p)
archie = repliclust.Archetype(n_clusters=k, dim=int(p))
dug = repliclust.DataGenerator(archie)
X, y, _ = dug.synthesize(quiet=True)
_, loss = list(zip(*archie.center_sampler.loss_log))
loss = np.array(loss, dtype='float')
loss_padded = np.concatenate([loss, np.zeros(shape=(epoch_max-len(loss),))])
loss_firstmom += loss_padded/n_std
loss_mean = loss_firstmom
ax[subplot_idx].plot(np.arange(1,epoch_max+1), loss_mean, zorder=1, color='black', linestyle=linestyles[j])
#ax.scatter(np.arange(1,epoch_max+1), loss_mean, color='black', facecolor='white', marker=markers[j], zorder=2)
plt.savefig('convergence.png',bbox_inches='tight')
```
| /repliclust-0.0.3.tar.gz/repliclust-0.0.3/.ipynb_checkpoints/numerical_convergence-checkpoint.ipynb | 0.560253 | 0.409014 | numerical_convergence-checkpoint.ipynb | pypi |
```
import matplotlib.pyplot as plt
import seaborn as sn
import numpy as np
from repliclust.distributions import N_EMPIRICAL_QUANTILE, QUANTILE_LEVEL
from repliclust import Archetype, DataGenerator, set_seed, get_supported_distributions
from scipy.stats import norm, expon, genpareto, t, beta, gamma
nrows=1
ncols=2
fig, ax = plt.subplots(figsize=(10,6.25),dpi=300,nrows=nrows,ncols=ncols)
rng = np.random.default_rng()
set_seed(2)
distributions = [
'normal',
'exponential',
('pareto', {'a': 3}),
('standard_t', {'df': 3}),
('beta', {'a': 2, 'b': 3}),
('gamma', {'shape': 10})
]
distributions_density = [
(norm.ppf, norm.pdf, {}, 'normal', {}),
(expon.ppf, expon.pdf, {}, 'exponential', {}),
(genpareto.ppf, genpareto.pdf, {'c':3}, 'pareto', {'a':3}),
(t.ppf, t.pdf, {'df': 3}, 'standard_t', {'df': 3}),
(beta.ppf, beta.pdf, {'a': 2, 'b': 3}, 'beta', {'a': 2, 'b': 3}),
(gamma.ppf, gamma.pdf, {'a':10}, 'gamma', {'shape':10}),
]
n_clusters=6
group_sizes = np.array([100,100,100,100,100,100])
archetype = Archetype(n_clusters=n_clusters, n_samples=500, distributions=distributions,
aspect_ref=2, aspect_maxmin=3, radius_maxmin=3, max_overlap=0.05)
mixture_model = archetype.sample_mixture_model()
X, y = mixture_model.sample_data(group_sizes=group_sizes)
j=0
for cluster_idx in range(n_clusters):
quantile, density, params_scipy, name, params_numpy = distributions_density[cluster_idx]
numpy_sample_generator = getattr(rng, name)
ax_max = 5
sample = np.abs(numpy_sample_generator(size=N_EMPIRICAL_QUANTILE, **params_numpy))
scaling = np.quantile(np.abs(sample), q=QUANTILE_LEVEL)
r_max=10
vmin = density(quantile(0.99, **params_scipy)/scaling, **params_scipy)
vmax = np.max(density(np.linspace(0,r_max,2000), **params_scipy))
n_contours=100
U, V = np.meshgrid(np.linspace(-r_max,r_max,1000), np.linspace(-r_max,r_max,1000))
Z = density(np.sqrt((U*scaling)**2 + (V*scaling)**2), **params_scipy)
x_c, y_c = mixture_model.centers[cluster_idx]
U_vec = mixture_model.axes_list[0][0,:]; U_length = mixture_model.axis_lengths_list[0][0]
V_vec = mixture_model.axes_list[0][1,:]; V_length = mixture_model.axis_lengths_list[0][1]
X_transformed = x_c + (U * U_length * U_vec[0] + V * V_length * V_vec[0])
Y_transformed = y_c + (U * U_length * U_vec[1] + V * V_length * V_vec[1])
print(U_vec)
print(V_vec)
ax[j].contourf(X_transformed,Y_transformed,Z,levels=n_contours, cmap='BuPu', vmin=vmin, vmax=1.5*vmax);
ax[j].scatter(X[y==cluster_idx,0],X[y==cluster_idx,1],c='black', alpha=0.375, s=1)
ax[j].arrow(0, 0, U_length*U_vec[0], U_length*U_vec[1], head_width=0.15, head_length=0.25, width=1e-4, fc='k', ec='k')
ax[j].arrow(0, 0, V_length*V_vec[0], V_length*V_vec[1], head_width=0.15, head_length=0.25, width=1e-4, fc='k', ec='k')
ax[j].scatter(0,0,c='black',alpha=1,s=3)
ax[j].set_aspect('equal')
plt.subplots_adjust(hspace=0.15, wspace=0.02)
# plt.figure()
# res = sn.kdeplot(x=X[:,0],y=X[:,1],bw_adjust=100,fill=True)
# plt.show()
```
| /repliclust-0.0.3.tar.gz/repliclust-0.0.3/.ipynb_checkpoints/mixture_model_figure-checkpoint.ipynb | 0.588889 | 0.618579 | mixture_model_figure-checkpoint.ipynb | pypi |
import json
from requests import Session
from typing import Dict, Any, List
from .queries import q
from .colors import green, end, purple, red, bold_green, bold_blue, blue
from time import sleep
import logging
import random
import os
logger: logging.Logger = logging.getLogger(__name__)
backup: str = "https://graphql-playground.pikachub2005.repl.co/"
endpoint: str = "https://replit.com/graphql"
headers: Dict[str, str] = {
"X-Requested-With": "replit",
"Origin": "https://replit.com",
"Accept": "application/json",
"Referrer": "https://replit.com",
"Content-Type": "application/json",
"Connection": "keep-alive",
"Host": "replit.com",
"x-requested-with": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0",
}
number_convert: List[str] = ["1st", "2nd", "3rd"]
__reset_status_codes: List[int] = [429, 403, 520, 503, 502, 500]
s = Session()
s.headers.update(headers)
def post(
connection: str,
query: str,
vars: Dict[str, Any] = {},
raw: bool = False,
retry_for_internal_errors: bool = True,
__different_endpoint: str = None,
):
"""post query with vars to replit graph query language"""
s.headers.update({"Cookie": f"connect.sid={connection}"})
class InitialRequest:
def __init__(self):
self.status_code = 429
self.text = ""
req = InitialRequest()
number_of_attempts = 0
max_attempts = 7
if __different_endpoint is None:
__different_endpoint = endpoint
while (
req.status_code in __reset_status_codes or str(req.status_code).startswith("5")
) and number_of_attempts < max_attempts: # only try 7 times
current_endpoint = f"{__different_endpoint}?e={int(random.random() * 100)}"
req = s.post(
current_endpoint,
json={"query": (query if raw else q[query]), "variables": vars},
)
if req.status_code in __reset_status_codes or str(req.status_code).startswith(
"5"
):
N_TH = (
number_convert[number_of_attempts]
if number_of_attempts < 3
else str(number_of_attempts + 1) + "th"
)
logger.warning(
f"{green}[FILE] POST_QL.py{end}\n{red}[WARNING]{end}\n{red}[STATUS CODE] {req.status_code}\n\t{red}[INFO]{end} You have been ratelimited\n\t{bold_blue}[SUMMARY]{end} Retrying query for the {N_TH} time (max retries is 5)"
)
number_of_attempts += 1
sleep(
5 * (number_of_attempts)
) # as not to overload the server, the sleep time increases per num attempts
continue
vars_max = 200
query_max = 100
text_max = 200
_query = query
_vars = (
f" {vars}"
if (len(json.dumps(vars, indent=8)) + 3 >= vars_max or len(vars) <= 1)
else f"\n\t\t\t{json.dumps(vars, indent=16)[:-1]}\t\t\t" + "}"
)
_text = req.text.strip("\n")
if len(_vars) >= vars_max:
_vars = _vars[: vars_max - 3] + "..."
if len(_query) >= query_max:
_query = _query[: query_max - 3] + "..."
if len(_text) >= text_max:
_text = _text[: text_max - 3] + "..."
if req.status_code == 200:
logger.info(
f"{green}[FILE] POST_QL.py{end}\n{green}[INFO]{end} {bold_green}Successful graphql!{end}\n\t{blue}[SUMMARY]{end} queried replit's graphql with these query and vars.\n\t{purple}[EXTRA]{end}\n\t\t{bold_blue}[QUERY]{end} {query}\n\t\t{bold_blue}[VARS]{end}{_vars}\n\t\t{bold_blue}[RESPONSE]{end} {_text}\n\t\t{bold_blue}[IS RAW QUERY]{end} {raw}\n\t\t{bold_blue}[URL END POINT]{end} {current_endpoint}"
)
else:
return logger.error(
f"{red}[FILE] POST_QL.py{end}\n{red}[STATUS CODE] {req.status_code}\n\t{purple}[EXTRA]{end} {_text}\n\t\t{bold_blue}[QUERY]{end} {query}\n\t\t{bold_blue}[VARS]{end}{_vars}\n\t\t{bold_blue}[IS RAW QUERY]{end} {raw}\n\t\t{bold_blue}[URL END POINT]{end} {current_endpoint}\n\t\t{bold_blue}[RETRY]{end} {retry_for_internal_errors}"
)
res = json.loads(req.text)
try:
_ = list(map(lambda x: x["data"], list(res["data"])))
return _
except:
if "data" in res["data"]:
return res["data"]["data"]
else:
if "data" in res:
return res["data"]
else:
return res
gql = post | /replit-bot-4.2.19.tar.gz/replit-bot-4.2.19/replit_bot/post_ql.py | 0.650134 | 0.174006 | post_ql.py | pypi |
# Python LSP Server
This is a fork of https://github.com/python-lsp/python-lsp-server.
See [RELEASE.md] for how to make a new release.
A Python 3.7+ implementation of the [Language Server Protocol](https://github.com/Microsoft/language-server-protocol).
(Note: versions <1.4 should still work with Python 3.6)
## Installation
The base language server requires [Jedi](https://github.com/davidhalter/jedi) to provide Completions, Definitions, Hover, References, Signature Help, and Symbols:
```
pip install python-lsp-server
```
This will expose the command `pylsp` on your PATH. Confirm that installation succeeded by running `pylsp --help`.
If the respective dependencies are found, the following optional providers will be enabled:
- [Rope](https://github.com/python-rope/rope) for Completions and renaming
- [Pyflakes](https://github.com/PyCQA/pyflakes) linter to detect various errors
- [McCabe](https://github.com/PyCQA/mccabe) linter for complexity checking
- [pycodestyle](https://github.com/PyCQA/pycodestyle) linter for style checking
- [pydocstyle](https://github.com/PyCQA/pydocstyle) linter for docstring style checking (disabled by default)
- [autopep8](https://github.com/hhatto/autopep8) for code formatting
- [YAPF](https://github.com/google/yapf) for code formatting (preferred over autopep8)
- [flake8](https://github.com/pycqa/flake8) for error checking (disabled by default)
- [pylint](https://github.com/PyCQA/pylint) for code linting (disabled by default)
Optional providers can be installed using the `extras` syntax. To install [YAPF](https://github.com/google/yapf) formatting for example:
```
pip install "python-lsp-server[yapf]"
```
All optional providers can be installed using:
```
pip install "python-lsp-server[all]"
```
If you get an error similar to `'install_requires' must be a string or list of strings` then please upgrade setuptools before trying again.
```
pip install -U setuptools
```
### Windows and Linux installation
If you use Anaconda/Miniconda, you can install `python-lsp-server` using this conda command
```
conda install -c conda-forge python-lsp-server
```
Python-lsp-server is available in the repos of every major Linux distribution, and it is usually called `python-lsp-server`.
For example, here is how to install it in Debian and Debian-based distributions (E.g. Ubuntu, Pop!_OS, Linux Mint)
```
sudo apt-get install python-lsp-server
```
or Fedora Linux
```
sudo dnf install python-lsp-server
```
Only on Alpine Linux the package is named differently. You can install it there by typing this command in your terminal:
```
apk add py3-lsp-server
```
### 3rd Party Plugins
Installing these plugins will add extra functionality to the language server:
- [pylsp-mypy](https://github.com/Richardk2n/pylsp-mypy): [MyPy](http://mypy-lang.org/) type checking for Python >=3.7.
- [pyls-isort](https://github.com/paradoxxxzero/pyls-isort): code formatting using [isort](https://github.com/PyCQA/isort) (automatic import sorting).
- [python-lsp-black](https://github.com/python-lsp/python-lsp-black): code formatting using [Black](https://github.com/psf/black).
- [pyls-memestra](https://github.com/QuantStack/pyls-memestra): detecting the use of deprecated APIs.
- [pylsp-rope](https://github.com/python-rope/pylsp-rope): Extended refactoring capabilities using [Rope](https://github.com/python-rope/rope).
Please see the above repositories for examples on how to write plugins for the Python LSP Server.
[cookiecutter-pylsp-plugin](https://github.com/python-lsp/cookiecutter-pylsp-plugin) is a [cookiecutter](https://cookiecutter.readthedocs.io/) template for setting up a basic plugin project for python-lsp-server. It documents all the essentials you need to know to kick start your own plugin project.
Please file an issue if you require assistance writing a plugin.
## Configuration
Like all language servers, configuration can be passed from the client that talks to this server (i.e. your editor/IDE or other tool that has the same purpose). The details of how this is done depend on the editor or plugin that you are using to communicate with `python-lsp-server`. The configuration options available at that level are documented in [`CONFIGURATION.md`](https://github.com/python-lsp/python-lsp-server/blob/develop/CONFIGURATION.md).
`python-lsp-server` depends on other tools, like flake8 and pycodestyle. These tools can be configured via settings passed from the client (as above), or alternatively from other configuration sources. The following sources are available:
- `pycodestyle`: discovered in `~/.config/pycodestyle`, `setup.cfg`, `tox.ini` and `pycodestyle.cfg`.
- `flake8`: discovered in `~/.config/flake8`, `.flake8`, `setup.cfg` and `tox.ini`
The default configuration sources are `pycodestyle` and `pyflakes`. If you would like to use `flake8`, you will need to:
1. Disable `pycodestyle`, `mccabe`, and `pyflakes`, by setting their corresponding `enabled` configurations, e.g. `pylsp.plugins.pycodestyle.enabled`, to `false`. This will prevent duplicate linting messages as flake8 includes these tools.
1. Set `pylsp.plugins.flake8.enabled` to `true`.
1. Change the `pylsp.configurationSources` setting (in the value passed in from your client) to `['flake8']` in order to use the flake8 configuration instead.
The configuration options available in these config files (`setup.cfg` etc) are documented in the relevant tools:
- [flake8 configuration](https://flake8.pycqa.org/en/latest/user/configuration.html)
- [pycodestyle configuration](https://pycodestyle.pycqa.org/en/latest/intro.html#configuration)
Overall configuration is computed first from user configuration (in home directory), overridden by configuration passed in by the language client, and then overridden by configuration discovered in the workspace.
As an example, to change the list of errors that pycodestyle will ignore, assuming you are using the `pycodestyle` configuration source (the default), you can:
1. Add the following to your ~/.config/pycodestyle:
```
[pycodestyle]
ignore = E226,E302,E41
```
2. Set the `pylsp.plugins.pycodestyle.ignore` config value from your editor
3. Same as 1, but add to `setup.cfg` file in the root of the project.
Python LSP Server can communicate over WebSockets when configured as follows:
```
pylsp --ws --port [port]
```
The following libraries are required for Web Sockets support:
- [websockets](https://websockets.readthedocs.io/en/stable/) for Python LSP Server Web sockets using websockets library. refer [Websockets installation](https://websockets.readthedocs.io/en/stable/intro/index.html#installation) for more details
You can install this dependency with command below:
```
pip install 'python-lsp-server[websockets]'
```
## LSP Server Features
* Auto Completion
* Code Linting
* Signature Help
* Go to definition
* Hover
* Find References
* Document Symbols
* Document Formatting
* Code folding
* Multiple workspaces
## Development
To run the test suite:
```sh
pip install ".[test]" && pytest
```
After adding configuration options to `schema.json`, refresh the `CONFIGURATION.md` file with
```
python scripts/jsonschema2md.py pylsp/config/schema.json CONFIGURATION.md
```
## License
This project is made available under the MIT License.
| /replit-python-lsp-server-1.15.9.tar.gz/replit-python-lsp-server-1.15.9/README.md | 0.605682 | 0.938067 | README.md | pypi |
# Python Language Server Configuration
This server can be configured using `workspace/didChangeConfiguration` method. Each configuration option is described below:
| **Configuration Key** | **Type** | **Description** | **Default**
|----|----|----|----|
| `pylsp.configurationSources` | `array` of unique `string` (one of: `pycodestyle`, `pyflakes`) items | List of configuration sources to use. | `["pycodestyle"]` |
| `pylsp.plugins.autopep8.enabled` | `boolean` | Enable or disable the plugin (disabling required to use `yapf`). | `true` |
| `pylsp.plugins.flake8.config` | `string` | Path to the config file that will be the authoritative config source. | `null` |
| `pylsp.plugins.flake8.enabled` | `boolean` | Enable or disable the plugin. | `false` |
| `pylsp.plugins.flake8.exclude` | `array` of `string` items | List of files or directories to exclude. | `[]` |
| `pylsp.plugins.flake8.executable` | `string` | Path to the flake8 executable. | `"flake8"` |
| `pylsp.plugins.flake8.filename` | `string` | Only check for filenames matching the patterns in this list. | `null` |
| `pylsp.plugins.flake8.hangClosing` | `boolean` | Hang closing bracket instead of matching indentation of opening bracket's line. | `null` |
| `pylsp.plugins.flake8.ignore` | `array` of `string` items | List of errors and warnings to ignore (or skip). | `[]` |
| `pylsp.plugins.flake8.maxLineLength` | `integer` | Maximum allowed line length for the entirety of this run. | `null` |
| `pylsp.plugins.flake8.indentSize` | `integer` | Set indentation spaces. | `null` |
| `pylsp.plugins.flake8.perFileIgnores` | `array` of `string` items | A pairing of filenames and violation codes that defines which violations to ignore in a particular file, for example: `["file_path.py:W305,W304"]`). | `[]` |
| `pylsp.plugins.flake8.select` | `array` of unique `string` items | List of errors and warnings to enable. | `null` |
| `pylsp.plugins.jedi.extra_paths` | `array` of `string` items | Define extra paths for jedi.Script. | `[]` |
| `pylsp.plugins.jedi.env_vars` | `object` | Define environment variables for jedi.Script and Jedi.names. | `null` |
| `pylsp.plugins.jedi.environment` | `string` | Define environment for jedi.Script and Jedi.names. | `null` |
| `pylsp.plugins.jedi_completion.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.jedi_completion.include_params` | `boolean` | Auto-completes methods and classes with tabstops for each parameter. | `true` |
| `pylsp.plugins.jedi_completion.include_class_objects` | `boolean` | Adds class objects as a separate completion item. | `true` |
| `pylsp.plugins.jedi_completion.include_function_objects` | `boolean` | Adds function objects as a separate completion item. | `true` |
| `pylsp.plugins.jedi_completion.fuzzy` | `boolean` | Enable fuzzy when requesting autocomplete. | `false` |
| `pylsp.plugins.jedi_completion.eager` | `boolean` | Resolve documentation and detail eagerly. | `false` |
| `pylsp.plugins.jedi_completion.resolve_at_most` | `number` | How many labels and snippets (at most) should be resolved? | `25` |
| `pylsp.plugins.jedi_completion.cache_for` | `array` of `string` items | Modules for which labels and snippets should be cached. | `["pandas", "numpy", "tensorflow", "matplotlib"]` |
| `pylsp.plugins.jedi_definition.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.jedi_definition.follow_imports` | `boolean` | The goto call will follow imports. | `true` |
| `pylsp.plugins.jedi_definition.follow_builtin_imports` | `boolean` | If follow_imports is True will decide if it follow builtin imports. | `true` |
| `pylsp.plugins.jedi_hover.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.jedi_references.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.jedi_signature_help.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.jedi_symbols.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.jedi_symbols.all_scopes` | `boolean` | If True lists the names of all scopes instead of only the module namespace. | `true` |
| `pylsp.plugins.jedi_symbols.include_import_symbols` | `boolean` | If True includes symbols imported from other libraries. | `true` |
| `pylsp.plugins.mccabe.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.mccabe.threshold` | `number` | The minimum threshold that triggers warnings about cyclomatic complexity. | `15` |
| `pylsp.plugins.preload.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.preload.modules` | `array` of unique `string` items | List of modules to import on startup | `[]` |
| `pylsp.plugins.pycodestyle.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.pycodestyle.exclude` | `array` of unique `string` items | Exclude files or directories which match these patterns. | `[]` |
| `pylsp.plugins.pycodestyle.filename` | `array` of unique `string` items | When parsing directories, only check filenames matching these patterns. | `[]` |
| `pylsp.plugins.pycodestyle.select` | `array` of unique `string` items | Select errors and warnings | `[]` |
| `pylsp.plugins.pycodestyle.ignore` | `array` of unique `string` items | Ignore errors and warnings | `[]` |
| `pylsp.plugins.pycodestyle.hangClosing` | `boolean` | Hang closing bracket instead of matching indentation of opening bracket's line. | `null` |
| `pylsp.plugins.pycodestyle.maxLineLength` | `number` | Set maximum allowed line length. | `null` |
| `pylsp.plugins.pycodestyle.indentSize` | `integer` | Set indentation spaces. | `null` |
| `pylsp.plugins.pydocstyle.enabled` | `boolean` | Enable or disable the plugin. | `false` |
| `pylsp.plugins.pydocstyle.convention` | `string` (one of: `pep257`, `numpy`, `None`) | Choose the basic list of checked errors by specifying an existing convention. | `null` |
| `pylsp.plugins.pydocstyle.addIgnore` | `array` of unique `string` items | Ignore errors and warnings in addition to the specified convention. | `[]` |
| `pylsp.plugins.pydocstyle.addSelect` | `array` of unique `string` items | Select errors and warnings in addition to the specified convention. | `[]` |
| `pylsp.plugins.pydocstyle.ignore` | `array` of unique `string` items | Ignore errors and warnings | `[]` |
| `pylsp.plugins.pydocstyle.select` | `array` of unique `string` items | Select errors and warnings | `[]` |
| `pylsp.plugins.pydocstyle.match` | `string` | Check only files that exactly match the given regular expression; default is to match files that don't start with 'test_' but end with '.py'. | `"(?!test_).*\\.py"` |
| `pylsp.plugins.pydocstyle.matchDir` | `string` | Search only dirs that exactly match the given regular expression; default is to match dirs which do not begin with a dot. | `"[^\\.].*"` |
| `pylsp.plugins.pyflakes.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.plugins.pylint.enabled` | `boolean` | Enable or disable the plugin. | `false` |
| `pylsp.plugins.pylint.args` | `array` of non-unique `string` items | Arguments to pass to pylint. | `[]` |
| `pylsp.plugins.pylint.executable` | `string` | Executable to run pylint with. Enabling this will run pylint on unsaved files via stdin. Can slow down workflow. Only works with python3. | `null` |
| `pylsp.plugins.rope_completion.enabled` | `boolean` | Enable or disable the plugin. | `false` |
| `pylsp.plugins.rope_completion.eager` | `boolean` | Resolve documentation and detail eagerly. | `false` |
| `pylsp.plugins.yapf.enabled` | `boolean` | Enable or disable the plugin. | `true` |
| `pylsp.rope.extensionModules` | `string` | Builtin and c-extension modules that are allowed to be imported and inspected by rope. | `null` |
| `pylsp.rope.ropeFolder` | `array` of unique `string` items | The name of the folder in which rope stores project configurations and data. Pass `null` for not using such a folder at all. | `null` |
This documentation was generated from `pylsp/config/schema.json`. Please do not edit this file directly.
| /replit-python-lsp-server-1.15.9.tar.gz/replit-python-lsp-server-1.15.9/CONFIGURATION.md | 0.90322 | 0.814496 | CONFIGURATION.md | pypi |
import json
import sys
from argparse import ArgumentParser, FileType
def describe_array(prop: dict) -> str:
extra = ""
if "items" in prop:
unique_qualifier = ""
if "uniqueItems" in prop:
unique_qualifier = "unique" if prop["uniqueItems"] else "non-unique"
item_type = describe_type(prop["items"])
extra = " ".join(filter(bool, ["of", unique_qualifier, item_type, "items"]))
return extra
def describe_number(prop: dict) -> str:
extra = []
if "minimum" in prop:
extra.append(f">= {prop['minimum']}")
if "maximum" in prop:
extra.append(f"<= {prop['maximum']}")
return ",".join(extra)
EXTRA_DESCRIPTORS = {
"array": describe_array,
"number": describe_number,
}
def describe_type(prop: dict) -> str:
prop_type = prop["type"]
types = prop_type if isinstance(prop_type, list) else [prop_type]
if "null" in types:
types.remove("null")
if len(types) == 1:
prop_type = types[0]
parts = [f"`{prop_type}`"]
for option in types:
if option in EXTRA_DESCRIPTORS:
parts.append(EXTRA_DESCRIPTORS[option](prop))
if "enum" in prop:
allowed_values = [f"`{value}`" for value in prop["enum"]]
parts.append("(one of: " + ", ".join(allowed_values) + ")")
return " ".join(parts)
def convert_schema(schema: dict, source: str = None) -> str:
lines = [
f"# {schema['title']}",
schema["description"],
"",
"| **Configuration Key** | **Type** | **Description** | **Default** ",
"|----|----|----|----|",
]
for key, prop in schema["properties"].items():
description = prop.get("description", "")
default = json.dumps(prop.get("default", ""))
lines.append(
f"| `{key}` | {describe_type(prop)} | {description} | `{default}` |"
)
if source:
lines.append(
f"\nThis documentation was generated from `{source}`."
" Please do not edit this file directly."
)
# ensure empty line at the end
lines.append("")
return "\n".join(lines)
def main(argv):
parser = ArgumentParser()
parser.add_argument("schema", type=FileType())
parser.add_argument("markdown", type=FileType("w+"), default=sys.stdout)
arguments = parser.parse_args(argv[1:])
schema = json.loads(arguments.schema.read())
markdown = convert_schema(schema, source=arguments.schema.name)
arguments.markdown.write(markdown)
if __name__ == "__main__":
main(sys.argv) | /replit-python-lsp-server-1.15.9.tar.gz/replit-python-lsp-server-1.15.9/scripts/jsonschema2md.py | 0.478773 | 0.246443 | jsonschema2md.py | pypi |
def get_well_formatted_range(lsp_range):
start = lsp_range['start']
end = lsp_range['end']
if start['line'] > end['line'] or (start['line'] == end['line'] and start['character'] > end['character']):
return {'start': end, 'end': start}
return lsp_range
def get_well_formatted_edit(text_edit):
lsp_range = get_well_formatted_range(text_edit['range'])
if lsp_range != text_edit['range']:
return {'newText': text_edit['newText'], 'range': lsp_range}
return text_edit
def compare_text_edits(a, b):
diff = a['range']['start']['line'] - b['range']['start']['line']
if diff == 0:
return a['range']['start']['character'] - b['range']['start']['character']
return diff
def merge_sort_text_edits(text_edits):
if len(text_edits) <= 1:
return text_edits
p = len(text_edits) // 2
left = text_edits[:p]
right = text_edits[p:]
merge_sort_text_edits(left)
merge_sort_text_edits(right)
left_idx = 0
right_idx = 0
i = 0
while left_idx < len(left) and right_idx < len(right):
ret = compare_text_edits(left[left_idx], right[right_idx])
if ret <= 0:
# smaller_equal -> take left to preserve order
text_edits[i] = left[left_idx]
i += 1
left_idx += 1
else:
# greater -> take right
text_edits[i] = right[right_idx]
i += 1
right_idx += 1
while left_idx < len(left):
text_edits[i] = left[left_idx]
i += 1
left_idx += 1
while right_idx < len(right):
text_edits[i] = right[right_idx]
i += 1
right_idx += 1
return text_edits
class OverLappingTextEditException(Exception):
"""
Text edits are expected to be sorted
and compressed instead of overlapping.
This error is raised when two edits
are overlapping.
"""
def apply_text_edits(doc, text_edits):
text = doc.source
sorted_edits = merge_sort_text_edits(list(map(get_well_formatted_edit, text_edits)))
last_modified_offset = 0
spans = []
for e in sorted_edits:
start_offset = doc.offset_at_position(e['range']['start'])
if start_offset < last_modified_offset:
raise OverLappingTextEditException('overlapping edit')
if start_offset > last_modified_offset:
spans.append(text[last_modified_offset:start_offset])
if len(e['newText']):
spans.append(e['newText'])
last_modified_offset = doc.offset_at_position(e['range']['end'])
spans.append(text[last_modified_offset:])
return ''.join(spans) | /replit-python-lsp-server-1.15.9.tar.gz/replit-python-lsp-server-1.15.9/pylsp/text_edit.py | 0.498047 | 0.336345 | text_edit.py | pypi |
import functools
import inspect
import logging
import os
import pathlib
import re
import threading
import jedi
JEDI_VERSION = jedi.__version__
# Eol chars accepted by the LSP protocol
# the ordering affects performance
EOL_CHARS = ['\r\n', '\r', '\n']
EOL_REGEX = re.compile(f'({"|".join(EOL_CHARS)})')
log = logging.getLogger(__name__)
def debounce(interval_s, keyed_by=None):
"""Debounce calls to this function until interval_s seconds have passed."""
def wrapper(func):
timers = {}
lock = threading.Lock()
@functools.wraps(func)
def debounced(*args, **kwargs):
sig = inspect.signature(func)
call_args = sig.bind(*args, **kwargs)
key = call_args.arguments[keyed_by] if keyed_by else None
def run():
with lock:
del timers[key]
return func(*args, **kwargs)
with lock:
old_timer = timers.get(key)
if old_timer:
old_timer.cancel()
timer = threading.Timer(interval_s, run)
timers[key] = timer
timer.start()
return debounced
return wrapper
def find_parents(root, path, names):
"""Find files matching the given names relative to the given path.
Args:
path (str): The file path to start searching up from.
names (List[str]): The file/directory names to look for.
root (str): The directory at which to stop recursing upwards.
Note:
The path MUST be within the root.
"""
if not root:
return []
if not os.path.commonprefix((root, path)):
log.warning("Path %s not in %s", path, root)
return []
# Split the relative by directory, generate all the parent directories, then check each of them.
# This avoids running a loop that has different base-cases for unix/windows
# e.g. /a/b and /a/b/c/d/e.py -> ['/a/b', 'c', 'd']
dirs = [root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep)
# Search each of /a/b/c, /a/b, /a
while dirs:
search_dir = os.path.join(*dirs)
existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names]))
if existing:
return existing
dirs.pop()
# Otherwise nothing
return []
def path_to_dot_name(path):
"""Given a path to a module, derive its dot-separated full name."""
directory = os.path.dirname(path)
module_name, _ = os.path.splitext(os.path.basename(path))
full_name = [module_name]
while os.path.exists(os.path.join(directory, '__init__.py')):
this_directory = os.path.basename(directory)
directory = os.path.dirname(directory)
full_name = [this_directory] + full_name
return '.'.join(full_name)
def match_uri_to_workspace(uri, workspaces):
if uri is None:
return None
max_len, chosen_workspace = -1, None
path = pathlib.Path(uri).parts
for workspace in workspaces:
workspace_parts = pathlib.Path(workspace).parts
if len(workspace_parts) > len(path):
continue
match_len = 0
for workspace_part, path_part in zip(workspace_parts, path):
if workspace_part == path_part:
match_len += 1
if match_len > 0:
if match_len > max_len:
max_len = match_len
chosen_workspace = workspace
return chosen_workspace
def list_to_string(value):
return ",".join(value) if isinstance(value, list) else value
def merge_dicts(dict_a, dict_b):
"""Recursively merge dictionary b into dictionary a.
If override_nones is True, then
"""
def _merge_dicts_(a, b):
for key in set(a.keys()).union(b.keys()):
if key in a and key in b:
if isinstance(a[key], dict) and isinstance(b[key], dict):
yield (key, dict(_merge_dicts_(a[key], b[key])))
elif isinstance(a[key], list) and isinstance(b[key], list):
yield (key, list(set(a[key] + b[key])))
elif b[key] is not None:
yield (key, b[key])
else:
yield (key, a[key])
elif key in a:
yield (key, a[key])
elif b[key] is not None:
yield (key, b[key])
return dict(_merge_dicts_(dict_a, dict_b))
def format_docstring(contents):
"""Python doc strings come in a number of formats, but LSP wants markdown.
Until we can find a fast enough way of discovering and parsing each format,
we can do a little better by at least preserving indentation.
"""
contents = contents.replace('\t', '\u00A0' * 4)
contents = contents.replace(' ', '\u00A0' * 2)
return contents
def clip_column(column, lines, line_number):
"""
Normalise the position as per the LSP that accepts character positions > line length
https://microsoft.github.io/language-server-protocol/specification#position
"""
max_column = len(lines[line_number].rstrip('\r\n')) if len(lines) > line_number else 0
return min(column, max_column)
def position_to_jedi_linecolumn(document, position):
"""
Convert the LSP format 'line', 'character' to Jedi's 'line', 'column'
https://microsoft.github.io/language-server-protocol/specification#position
"""
code_position = {}
if position:
code_position = {'line': position['line'] + 1,
'column': clip_column(position['character'],
document.lines,
position['line'])}
return code_position
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
PROCESS_QUERY_INFROMATION = 0x1000
def is_process_alive(pid):
"""Check whether the process with the given pid is still alive.
Running `os.kill()` on Windows always exits the process, so it can't be used to check for an alive process.
see: https://docs.python.org/3/library/os.html?highlight=os%20kill#os.kill
Hence ctypes is used to check for the process directly via windows API avoiding any other 3rd-party dependency.
Args:
pid (int): process ID
Returns:
bool: False if the process is not alive or don't have permission to check, True otherwise.
"""
process = kernel32.OpenProcess(PROCESS_QUERY_INFROMATION, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
return False
else:
import errno
def is_process_alive(pid):
"""Check whether the process with the given pid is still alive.
Args:
pid (int): process ID
Returns:
bool: False if the process is not alive or don't have permission to check, True otherwise.
"""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
def get_eol_chars(text):
"""Get EOL chars used in text."""
match = EOL_REGEX.search(text)
if match:
return match.group(0)
return None | /replit-python-lsp-server-1.15.9.tar.gz/replit-python-lsp-server-1.15.9/pylsp/_utils.py | 0.710528 | 0.186595 | _utils.py | pypi |
# pylint: disable=redefined-builtin, unused-argument
from pylsp import hookspec
@hookspec
def pylsp_code_actions(config, workspace, document, range, context):
pass
@hookspec
def pylsp_code_lens(config, workspace, document):
pass
@hookspec
def pylsp_commands(config, workspace):
"""The list of command strings supported by the server.
Returns:
List[str]: The supported commands.
"""
@hookspec
def pylsp_completions(config, workspace, document, position):
pass
@hookspec(firstresult=True)
def pylsp_completion_item_resolve(config, workspace, document, completion_item):
pass
@hookspec
def pylsp_definitions(config, workspace, document, position):
pass
@hookspec
def pylsp_dispatchers(config, workspace):
pass
@hookspec
def pylsp_document_did_open(config, workspace, document):
pass
@hookspec
def pylsp_document_did_save(config, workspace, document):
pass
@hookspec
def pylsp_document_highlight(config, workspace, document, position):
pass
@hookspec
def pylsp_document_symbols(config, workspace, document):
pass
@hookspec(firstresult=True)
def pylsp_execute_command(config, workspace, command, arguments):
pass
@hookspec
def pylsp_experimental_capabilities(config, workspace):
pass
@hookspec
def pylsp_folding_range(config, workspace, document):
pass
@hookspec(firstresult=True)
def pylsp_format_document(config, workspace, document, options):
pass
@hookspec(firstresult=True)
def pylsp_format_range(config, workspace, document, range, options):
pass
@hookspec(firstresult=True)
def pylsp_hover(config, workspace, document, position):
pass
@hookspec
def pylsp_initialize(config, workspace):
pass
@hookspec
def pylsp_initialized():
pass
@hookspec
def pylsp_lint(config, workspace, document, is_saved):
pass
@hookspec
def pylsp_references(config, workspace, document, position, exclude_declaration):
pass
@hookspec(firstresult=True)
def pylsp_rename(config, workspace, document, position, new_name):
pass
@hookspec
def pylsp_settings(config):
pass
@hookspec(firstresult=True)
def pylsp_signature_help(config, workspace, document, position):
pass | /replit-python-lsp-server-1.15.9.tar.gz/replit-python-lsp-server-1.15.9/pylsp/hookspecs.py | 0.748168 | 0.203826 | hookspecs.py | pypi |
import re
import parso
import parso.python.tree as tree_nodes
from pylsp import hookimpl
SKIP_NODES = (tree_nodes.Module, tree_nodes.IfStmt, tree_nodes.TryStmt)
IDENTATION_REGEX = re.compile(r'(\s+).+')
@hookimpl
def pylsp_folding_range(document):
program = document.source + '\n'
lines = program.splitlines()
tree = parso.parse(program)
ranges = __compute_folding_ranges(tree, lines)
results = []
for (start_line, end_line) in ranges:
start_line -= 1
end_line -= 1
# If start/end character is not defined, then it defaults to the
# corresponding line last character
results.append({
'startLine': start_line,
'endLine': end_line,
})
return results
def __merge_folding_ranges(left, right):
for start in list(left.keys()):
right_start = right.pop(start, None)
if right_start is not None:
left[start] = max(right_start, start)
left.update(right)
return left
def __empty_identation_stack(identation_stack, level_limits,
current_line, folding_ranges):
while identation_stack != []:
upper_level = identation_stack.pop(0)
level_start = level_limits.pop(upper_level)
folding_ranges.append((level_start, current_line))
return folding_ranges
def __match_identation_stack(identation_stack, level, level_limits,
folding_ranges, current_line):
upper_level = identation_stack.pop(0)
while upper_level >= level:
level_start = level_limits.pop(upper_level)
folding_ranges.append((level_start, current_line))
upper_level = identation_stack.pop(0)
identation_stack.insert(0, upper_level)
return identation_stack, folding_ranges
def __compute_folding_ranges_identation(text):
lines = text.splitlines()
folding_ranges = []
identation_stack = []
level_limits = {}
current_level = 0
current_line = 0
while lines[current_line] == '':
current_line += 1
for i, line in enumerate(lines):
if i < current_line:
continue
i += 1
identation_match = IDENTATION_REGEX.match(line)
if identation_match is not None:
whitespace = identation_match.group(1)
level = len(whitespace)
if level > current_level:
level_limits[current_level] = current_line
identation_stack.insert(0, current_level)
current_level = level
elif level < current_level:
identation_stack, folding_ranges = __match_identation_stack(
identation_stack, level, level_limits, folding_ranges,
current_line)
current_level = level
else:
folding_ranges = __empty_identation_stack(
identation_stack, level_limits, current_line, folding_ranges)
current_level = 0
if line.strip() != '':
current_line = i
folding_ranges = __empty_identation_stack(
identation_stack, level_limits, current_line, folding_ranges)
return dict(folding_ranges)
def __check_if_node_is_valid(node):
valid = True
if isinstance(node, tree_nodes.PythonNode):
kind = node.type
valid = kind not in {'decorated', 'parameters', 'dictorsetmaker',
'testlist_comp'}
if kind == 'suite':
if isinstance(node.parent, tree_nodes.Function):
valid = False
return valid
def __handle_skip(stack, skip):
body = stack[skip]
children = [body]
if hasattr(body, 'children'):
children = body.children
stack = stack[:skip] + children + stack[skip + 1:]
node = body
end_line, _ = body.end_pos
return node, end_line
def __handle_flow_nodes(node, end_line, stack):
from_keyword = False
if isinstance(node, tree_nodes.Keyword):
from_keyword = True
if node.value in {'if', 'elif', 'with', 'while'}:
node, end_line = __handle_skip(stack, 2)
elif node.value in {'except'}:
first_node = stack[0]
if isinstance(first_node, tree_nodes.Operator):
node, end_line = __handle_skip(stack, 1)
else:
node, end_line = __handle_skip(stack, 2)
elif node.value in {'for'}:
node, end_line = __handle_skip(stack, 4)
elif node.value in {'else'}:
node, end_line = __handle_skip(stack, 1)
return end_line, from_keyword, node, stack
def __compute_start_end_lines(node, stack):
start_line, _ = node.start_pos
end_line, _ = node.end_pos
modified = False
end_line, from_keyword, node, stack = __handle_flow_nodes(
node, end_line, stack)
last_leaf = node.get_last_leaf()
last_newline = isinstance(last_leaf, tree_nodes.Newline)
last_operator = isinstance(last_leaf, tree_nodes.Operator)
node_is_operator = isinstance(node, tree_nodes.Operator)
last_operator = last_operator or not node_is_operator
end_line -= 1
if isinstance(node.parent, tree_nodes.PythonNode) and not from_keyword:
kind = node.type
if kind in {'suite', 'atom', 'atom_expr', 'arglist'}:
if len(stack) > 0:
next_node = stack[0]
next_line, _ = next_node.start_pos
if next_line > end_line:
end_line += 1
modified = True
if not last_newline and not modified and not last_operator:
end_line += 1
return start_line, end_line, stack
def __compute_folding_ranges(tree, lines):
folding_ranges = {}
stack = [tree]
while len(stack) > 0:
node = stack.pop(0)
if isinstance(node, tree_nodes.Newline):
# Skip newline nodes
continue
if isinstance(node, tree_nodes.PythonErrorNode):
# Fallback to indentation-based (best-effort) folding
start_line, _ = node.start_pos
start_line -= 1
padding = [''] * start_line
text = '\n'.join(padding + lines[start_line:]) + '\n'
identation_ranges = __compute_folding_ranges_identation(text)
folding_ranges = __merge_folding_ranges(
folding_ranges, identation_ranges)
break
if not isinstance(node, SKIP_NODES):
valid = __check_if_node_is_valid(node)
if valid:
start_line, end_line, stack = __compute_start_end_lines(
node, stack)
if end_line > start_line:
current_end = folding_ranges.get(start_line, -1)
folding_ranges[start_line] = max(current_end, end_line)
if hasattr(node, 'children'):
stack = node.children + stack
folding_ranges = sorted(folding_ranges.items())
return folding_ranges | /replit-python-lsp-server-1.15.9.tar.gz/replit-python-lsp-server-1.15.9/pylsp/plugins/folding.py | 0.49707 | 0.281393 | folding.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.