id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
19,676
import logging from typing import Optional, Text, Dict, Any import re from logging import config as logging_config from time import time from contextlib import contextmanager from .config import C The provided code snippet includes necessary dependencies for implementing the `set_log_with_config` function. Write a Python function `def set_log_with_config(log_config: Dict[Text, Any])` to solve the following problem: set log with config :param log_config: :return: Here is the function: def set_log_with_config(log_config: Dict[Text, Any]): """set log with config :param log_config: :return: """ logging_config.dictConfig(log_config)
set log with config :param log_config: :return:
19,677
import logging from typing import Optional, Text, Dict, Any import re from logging import config as logging_config from time import time from contextlib import contextmanager from .config import C def set_global_logger_level(level: int, return_orig_handler_level: bool = False): """set qlib.xxx logger handlers level Parameters ---------- level: int logger level return_orig_handler_level: bool return origin handler level map Examples --------- .. code-block:: python import qlib import logging from qlib.log import get_module_logger, set_global_logger_level qlib.init() tmp_logger_01 = get_module_logger("tmp_logger_01", level=logging.INFO) tmp_logger_01.info("1. tmp_logger_01 info show") global_level = logging.WARNING + 1 set_global_logger_level(global_level) tmp_logger_02 = get_module_logger("tmp_logger_02", level=logging.INFO) tmp_logger_02.log(msg="2. tmp_logger_02 log show", level=global_level) tmp_logger_01.info("3. tmp_logger_01 info do not show") """ _handler_level_map = {} qlib_logger = logging.root.manager.loggerDict.get("qlib", None) # pylint: disable=E1101 if qlib_logger is not None: for _handler in qlib_logger.handlers: _handler_level_map[_handler] = _handler.level _handler.level = level return _handler_level_map if return_orig_handler_level else None The provided code snippet includes necessary dependencies for implementing the `set_global_logger_level_cm` function. Write a Python function `def set_global_logger_level_cm(level: int)` to solve the following problem: set qlib.xxx logger handlers level to use contextmanager Parameters ---------- level: int logger level Examples --------- .. code-block:: python import qlib import logging from qlib.log import get_module_logger, set_global_logger_level_cm qlib.init() tmp_logger_01 = get_module_logger("tmp_logger_01", level=logging.INFO) tmp_logger_01.info("1. tmp_logger_01 info show") global_level = logging.WARNING + 1 with set_global_logger_level_cm(global_level): tmp_logger_02 = get_module_logger("tmp_logger_02", level=logging.INFO) tmp_logger_02.log(msg="2. tmp_logger_02 log show", level=global_level) tmp_logger_01.info("3. tmp_logger_01 info do not show") tmp_logger_01.info("4. tmp_logger_01 info show") Here is the function: def set_global_logger_level_cm(level: int): """set qlib.xxx logger handlers level to use contextmanager Parameters ---------- level: int logger level Examples --------- .. code-block:: python import qlib import logging from qlib.log import get_module_logger, set_global_logger_level_cm qlib.init() tmp_logger_01 = get_module_logger("tmp_logger_01", level=logging.INFO) tmp_logger_01.info("1. tmp_logger_01 info show") global_level = logging.WARNING + 1 with set_global_logger_level_cm(global_level): tmp_logger_02 = get_module_logger("tmp_logger_02", level=logging.INFO) tmp_logger_02.log(msg="2. tmp_logger_02 log show", level=global_level) tmp_logger_01.info("3. tmp_logger_01 info do not show") tmp_logger_01.info("4. tmp_logger_01 info show") """ _handler_level_map = set_global_logger_level(level, return_orig_handler_level=True) try: yield finally: for _handler, _level in _handler_level_map.items(): _handler.level = _level
set qlib.xxx logger handlers level to use contextmanager Parameters ---------- level: int logger level Examples --------- .. code-block:: python import qlib import logging from qlib.log import get_module_logger, set_global_logger_level_cm qlib.init() tmp_logger_01 = get_module_logger("tmp_logger_01", level=logging.INFO) tmp_logger_01.info("1. tmp_logger_01 info show") global_level = logging.WARNING + 1 with set_global_logger_level_cm(global_level): tmp_logger_02 = get_module_logger("tmp_logger_02", level=logging.INFO) tmp_logger_02.log(msg="2. tmp_logger_02 log show", level=global_level) tmp_logger_01.info("3. tmp_logger_01 info do not show") tmp_logger_01.info("4. tmp_logger_01 info show")
19,678
import socket from typing import Callable, List, Optional from tqdm.auto import tqdm from qlib.config import C from qlib.data.dataset import Dataset from qlib.data.dataset.weight import Reweighter from qlib.log import get_module_logger from qlib.model.base import Model from qlib.utils import ( auto_filter_kwargs, fill_placeholder, flatten_dict, init_instance_by_config, ) from qlib.utils.paral import call_in_subproc from qlib.workflow import R from qlib.workflow.recorder import Recorder from qlib.workflow.task.manage import TaskManager, run_task def _log_task_info(task_config: dict): R.log_params(**flatten_dict(task_config)) R.save_objects(**{"task": task_config}) # keep the original format and datatype R.set_tags(**{"hostname": socket.gethostname()}) R: QlibRecorderWrapper = RecorderWrapper() class Recorder: """ This is the `Recorder` class for logging the experiments. The API is designed similar to mlflow. (The link: https://mlflow.org/docs/latest/python_api/mlflow.html) The status of the recorder can be SCHEDULED, RUNNING, FINISHED, FAILED. """ # status type STATUS_S = "SCHEDULED" STATUS_R = "RUNNING" STATUS_FI = "FINISHED" STATUS_FA = "FAILED" def __init__(self, experiment_id, name): self.id = None self.name = name self.experiment_id = experiment_id self.start_time = None self.end_time = None self.status = Recorder.STATUS_S def __repr__(self): return "{name}(info={info})".format(name=self.__class__.__name__, info=self.info) def __str__(self): return str(self.info) def __hash__(self) -> int: return hash(self.info["id"]) def info(self): output = dict() output["class"] = "Recorder" output["id"] = self.id output["name"] = self.name output["experiment_id"] = self.experiment_id output["start_time"] = self.start_time output["end_time"] = self.end_time output["status"] = self.status return output def set_recorder_name(self, rname): self.recorder_name = rname def save_objects(self, local_path=None, artifact_path=None, **kwargs): """ Save objects such as prediction file or model checkpoints to the artifact URI. User can save object through keywords arguments (name:value). Please refer to the docs of qlib.workflow:R.save_objects Parameters ---------- local_path : str if provided, them save the file or directory to the artifact URI. artifact_path=None : str the relative path for the artifact to be stored in the URI. """ raise NotImplementedError(f"Please implement the `save_objects` method.") def load_object(self, name): """ Load objects such as prediction file or model checkpoints. Parameters ---------- name : str name of the file to be loaded. Returns ------- The saved object. """ raise NotImplementedError(f"Please implement the `load_object` method.") def start_run(self): """ Start running or resuming the Recorder. The return value can be used as a context manager within a `with` block; otherwise, you must call end_run() to terminate the current run. (See `ActiveRun` class in mlflow) Returns ------- An active running object (e.g. mlflow.ActiveRun object). """ raise NotImplementedError(f"Please implement the `start_run` method.") def end_run(self): """ End an active Recorder. """ raise NotImplementedError(f"Please implement the `end_run` method.") def log_params(self, **kwargs): """ Log a batch of params for the current run. Parameters ---------- keyword arguments key, value pair to be logged as parameters. """ raise NotImplementedError(f"Please implement the `log_params` method.") def log_metrics(self, step=None, **kwargs): """ Log multiple metrics for the current run. Parameters ---------- keyword arguments key, value pair to be logged as metrics. """ raise NotImplementedError(f"Please implement the `log_metrics` method.") def log_artifact(self, local_path: str, artifact_path: Optional[str] = None): """ Log a local file or directory as an artifact of the currently active run. Parameters ---------- local_path : str Path to the file to write. artifact_path : Optional[str] If provided, the directory in ``artifact_uri`` to write to. """ raise NotImplementedError(f"Please implement the `log_metrics` method.") def set_tags(self, **kwargs): """ Log a batch of tags for the current run. Parameters ---------- keyword arguments key, value pair to be logged as tags. """ raise NotImplementedError(f"Please implement the `set_tags` method.") def delete_tags(self, *keys): """ Delete some tags from a run. Parameters ---------- keys : series of strs of the keys all the name of the tag to be deleted. """ raise NotImplementedError(f"Please implement the `delete_tags` method.") def list_artifacts(self, artifact_path: str = None): """ List all the artifacts of a recorder. Parameters ---------- artifact_path : str the relative path for the artifact to be stored in the URI. Returns ------- A list of artifacts information (name, path, etc.) that being stored. """ raise NotImplementedError(f"Please implement the `list_artifacts` method.") def download_artifact(self, path: str, dst_path: Optional[str] = None) -> str: """ Download an artifact file or directory from a run to a local directory if applicable, and return a local path for it. Parameters ---------- path : str Relative source path to the desired artifact. dst_path : Optional[str] Absolute path of the local filesystem destination directory to which to download the specified artifacts. This directory must already exist. If unspecified, the artifacts will either be downloaded to a new uniquely-named directory on the local filesystem. Returns ------- str Local path of desired artifact. """ raise NotImplementedError(f"Please implement the `list_artifacts` method.") def list_metrics(self): """ List all the metrics of a recorder. Returns ------- A dictionary of metrics that being stored. """ raise NotImplementedError(f"Please implement the `list_metrics` method.") def list_params(self): """ List all the params of a recorder. Returns ------- A dictionary of params that being stored. """ raise NotImplementedError(f"Please implement the `list_params` method.") def list_tags(self): """ List all the tags of a recorder. Returns ------- A dictionary of tags that being stored. """ raise NotImplementedError(f"Please implement the `list_tags` method.") The provided code snippet includes necessary dependencies for implementing the `begin_task_train` function. Write a Python function `def begin_task_train(task_config: dict, experiment_name: str, recorder_name: str = None) -> Recorder` to solve the following problem: Begin task training to start a recorder and save the task config. Args: task_config (dict): the config of a task experiment_name (str): the name of experiment recorder_name (str): the given name will be the recorder name. None for using rid. Returns: Recorder: the model recorder Here is the function: def begin_task_train(task_config: dict, experiment_name: str, recorder_name: str = None) -> Recorder: """ Begin task training to start a recorder and save the task config. Args: task_config (dict): the config of a task experiment_name (str): the name of experiment recorder_name (str): the given name will be the recorder name. None for using rid. Returns: Recorder: the model recorder """ with R.start(experiment_name=experiment_name, recorder_name=recorder_name): _log_task_info(task_config) return R.get_recorder()
Begin task training to start a recorder and save the task config. Args: task_config (dict): the config of a task experiment_name (str): the name of experiment recorder_name (str): the given name will be the recorder name. None for using rid. Returns: Recorder: the model recorder
19,679
import socket from typing import Callable, List, Optional from tqdm.auto import tqdm from qlib.config import C from qlib.data.dataset import Dataset from qlib.data.dataset.weight import Reweighter from qlib.log import get_module_logger from qlib.model.base import Model from qlib.utils import ( auto_filter_kwargs, fill_placeholder, flatten_dict, init_instance_by_config, ) from qlib.utils.paral import call_in_subproc from qlib.workflow import R from qlib.workflow.recorder import Recorder from qlib.workflow.task.manage import TaskManager, run_task def _exe_task(task_config: dict): rec = R.get_recorder() # model & dataset initiation model: Model = init_instance_by_config(task_config["model"], accept_types=Model) dataset: Dataset = init_instance_by_config(task_config["dataset"], accept_types=Dataset) reweighter: Reweighter = task_config.get("reweighter", None) # model training auto_filter_kwargs(model.fit)(dataset, reweighter=reweighter) R.save_objects(**{"params.pkl": model}) # this dataset is saved for online inference. So the concrete data should not be dumped dataset.config(dump_all=False, recursive=True) R.save_objects(**{"dataset": dataset}) # fill placehorder placehorder_value = {"<MODEL>": model, "<DATASET>": dataset} task_config = fill_placeholder(task_config, placehorder_value) # generate records: prediction, backtest, and analysis records = task_config.get("record", []) if isinstance(records, dict): # prevent only one dict records = [records] for record in records: # Some recorder require the parameter `model` and `dataset`. # try to automatically pass in them to the initialization function # to make defining the tasking easier r = init_instance_by_config( record, recorder=rec, default_module="qlib.workflow.record_temp", try_kwargs={"model": model, "dataset": dataset}, ) r.generate() R: QlibRecorderWrapper = RecorderWrapper() class Recorder: """ This is the `Recorder` class for logging the experiments. The API is designed similar to mlflow. (The link: https://mlflow.org/docs/latest/python_api/mlflow.html) The status of the recorder can be SCHEDULED, RUNNING, FINISHED, FAILED. """ # status type STATUS_S = "SCHEDULED" STATUS_R = "RUNNING" STATUS_FI = "FINISHED" STATUS_FA = "FAILED" def __init__(self, experiment_id, name): self.id = None self.name = name self.experiment_id = experiment_id self.start_time = None self.end_time = None self.status = Recorder.STATUS_S def __repr__(self): return "{name}(info={info})".format(name=self.__class__.__name__, info=self.info) def __str__(self): return str(self.info) def __hash__(self) -> int: return hash(self.info["id"]) def info(self): output = dict() output["class"] = "Recorder" output["id"] = self.id output["name"] = self.name output["experiment_id"] = self.experiment_id output["start_time"] = self.start_time output["end_time"] = self.end_time output["status"] = self.status return output def set_recorder_name(self, rname): self.recorder_name = rname def save_objects(self, local_path=None, artifact_path=None, **kwargs): """ Save objects such as prediction file or model checkpoints to the artifact URI. User can save object through keywords arguments (name:value). Please refer to the docs of qlib.workflow:R.save_objects Parameters ---------- local_path : str if provided, them save the file or directory to the artifact URI. artifact_path=None : str the relative path for the artifact to be stored in the URI. """ raise NotImplementedError(f"Please implement the `save_objects` method.") def load_object(self, name): """ Load objects such as prediction file or model checkpoints. Parameters ---------- name : str name of the file to be loaded. Returns ------- The saved object. """ raise NotImplementedError(f"Please implement the `load_object` method.") def start_run(self): """ Start running or resuming the Recorder. The return value can be used as a context manager within a `with` block; otherwise, you must call end_run() to terminate the current run. (See `ActiveRun` class in mlflow) Returns ------- An active running object (e.g. mlflow.ActiveRun object). """ raise NotImplementedError(f"Please implement the `start_run` method.") def end_run(self): """ End an active Recorder. """ raise NotImplementedError(f"Please implement the `end_run` method.") def log_params(self, **kwargs): """ Log a batch of params for the current run. Parameters ---------- keyword arguments key, value pair to be logged as parameters. """ raise NotImplementedError(f"Please implement the `log_params` method.") def log_metrics(self, step=None, **kwargs): """ Log multiple metrics for the current run. Parameters ---------- keyword arguments key, value pair to be logged as metrics. """ raise NotImplementedError(f"Please implement the `log_metrics` method.") def log_artifact(self, local_path: str, artifact_path: Optional[str] = None): """ Log a local file or directory as an artifact of the currently active run. Parameters ---------- local_path : str Path to the file to write. artifact_path : Optional[str] If provided, the directory in ``artifact_uri`` to write to. """ raise NotImplementedError(f"Please implement the `log_metrics` method.") def set_tags(self, **kwargs): """ Log a batch of tags for the current run. Parameters ---------- keyword arguments key, value pair to be logged as tags. """ raise NotImplementedError(f"Please implement the `set_tags` method.") def delete_tags(self, *keys): """ Delete some tags from a run. Parameters ---------- keys : series of strs of the keys all the name of the tag to be deleted. """ raise NotImplementedError(f"Please implement the `delete_tags` method.") def list_artifacts(self, artifact_path: str = None): """ List all the artifacts of a recorder. Parameters ---------- artifact_path : str the relative path for the artifact to be stored in the URI. Returns ------- A list of artifacts information (name, path, etc.) that being stored. """ raise NotImplementedError(f"Please implement the `list_artifacts` method.") def download_artifact(self, path: str, dst_path: Optional[str] = None) -> str: """ Download an artifact file or directory from a run to a local directory if applicable, and return a local path for it. Parameters ---------- path : str Relative source path to the desired artifact. dst_path : Optional[str] Absolute path of the local filesystem destination directory to which to download the specified artifacts. This directory must already exist. If unspecified, the artifacts will either be downloaded to a new uniquely-named directory on the local filesystem. Returns ------- str Local path of desired artifact. """ raise NotImplementedError(f"Please implement the `list_artifacts` method.") def list_metrics(self): """ List all the metrics of a recorder. Returns ------- A dictionary of metrics that being stored. """ raise NotImplementedError(f"Please implement the `list_metrics` method.") def list_params(self): """ List all the params of a recorder. Returns ------- A dictionary of params that being stored. """ raise NotImplementedError(f"Please implement the `list_params` method.") def list_tags(self): """ List all the tags of a recorder. Returns ------- A dictionary of tags that being stored. """ raise NotImplementedError(f"Please implement the `list_tags` method.") The provided code snippet includes necessary dependencies for implementing the `end_task_train` function. Write a Python function `def end_task_train(rec: Recorder, experiment_name: str) -> Recorder` to solve the following problem: Finish task training with real model fitting and saving. Args: rec (Recorder): the recorder will be resumed experiment_name (str): the name of experiment Returns: Recorder: the model recorder Here is the function: def end_task_train(rec: Recorder, experiment_name: str) -> Recorder: """ Finish task training with real model fitting and saving. Args: rec (Recorder): the recorder will be resumed experiment_name (str): the name of experiment Returns: Recorder: the model recorder """ with R.start(experiment_name=experiment_name, recorder_id=rec.info["id"], resume=True): task_config = R.load_object("task") _exe_task(task_config) return rec
Finish task training with real model fitting and saving. Args: rec (Recorder): the recorder will be resumed experiment_name (str): the name of experiment Returns: Recorder: the model recorder
19,680
import bisect from datetime import datetime, time, date, timedelta from typing import List, Optional, Tuple, Union import functools import re import pandas as pd from qlib.config import C from qlib.constant import REG_CN, REG_TW, REG_US REG_CN = "cn" REG_US = "us" REG_TW = "tw" The provided code snippet includes necessary dependencies for implementing the `is_single_value` function. Write a Python function `def is_single_value(start_time, end_time, freq, region: str = REG_CN)` to solve the following problem: Is there only one piece of data for stock market. Parameters ---------- start_time : Union[pd.Timestamp, str] closed start time for data. end_time : Union[pd.Timestamp, str] closed end time for data. freq : region: str Region, for example, "cn", "us" Returns ------- bool True means one piece of data to obtain. Here is the function: def is_single_value(start_time, end_time, freq, region: str = REG_CN): """Is there only one piece of data for stock market. Parameters ---------- start_time : Union[pd.Timestamp, str] closed start time for data. end_time : Union[pd.Timestamp, str] closed end time for data. freq : region: str Region, for example, "cn", "us" Returns ------- bool True means one piece of data to obtain. """ if region == REG_CN: if end_time - start_time < freq: return True if start_time.hour == 11 and start_time.minute == 29 and start_time.second == 0: return True if start_time.hour == 14 and start_time.minute == 59 and start_time.second == 0: return True return False elif region == REG_TW: if end_time - start_time < freq: return True if start_time.hour == 13 and start_time.minute >= 25 and start_time.second == 0: return True return False elif region == REG_US: if end_time - start_time < freq: return True if start_time.hour == 15 and start_time.minute == 59 and start_time.second == 0: return True return False else: raise NotImplementedError(f"please implement the is_single_value func for {region}")
Is there only one piece of data for stock market. Parameters ---------- start_time : Union[pd.Timestamp, str] closed start time for data. end_time : Union[pd.Timestamp, str] closed end time for data. freq : region: str Region, for example, "cn", "us" Returns ------- bool True means one piece of data to obtain.
19,681
import bisect from datetime import datetime, time, date, timedelta from typing import List, Optional, Tuple, Union import functools import re import pandas as pd from qlib.config import C from qlib.constant import REG_CN, REG_TW, REG_US CN_TIME = [ datetime.strptime("9:30", "%H:%M"), datetime.strptime("11:30", "%H:%M"), datetime.strptime("13:00", "%H:%M"), datetime.strptime("15:00", "%H:%M"), ] US_TIME = [datetime.strptime("9:30", "%H:%M"), datetime.strptime("16:00", "%H:%M")] TW_TIME = [ datetime.strptime("9:00", "%H:%M"), datetime.strptime("13:30", "%H:%M"), ] REG_CN = "cn" REG_US = "us" REG_TW = "tw" def time_to_day_index(time_obj: Union[str, datetime], region: str = REG_CN): if isinstance(time_obj, str): time_obj = datetime.strptime(time_obj, "%H:%M") if region == REG_CN: if CN_TIME[0] <= time_obj < CN_TIME[1]: return int((time_obj - CN_TIME[0]).total_seconds() / 60) elif CN_TIME[2] <= time_obj < CN_TIME[3]: return int((time_obj - CN_TIME[2]).total_seconds() / 60) + 120 else: raise ValueError(f"{time_obj} is not the opening time of the {region} stock market") elif region == REG_US: if US_TIME[0] <= time_obj < US_TIME[1]: return int((time_obj - US_TIME[0]).total_seconds() / 60) else: raise ValueError(f"{time_obj} is not the opening time of the {region} stock market") elif region == REG_TW: if TW_TIME[0] <= time_obj < TW_TIME[1]: return int((time_obj - TW_TIME[0]).total_seconds() / 60) else: raise ValueError(f"{time_obj} is not the opening time of the {region} stock market") else: raise ValueError(f"{region} is not supported")
null
19,682
import bisect from datetime import datetime, time, date, timedelta from typing import List, Optional, Tuple, Union import functools import re import pandas as pd from qlib.config import C from qlib.constant import REG_CN, REG_TW, REG_US def get_min_cal(shift: int = 0, region: str = REG_CN) -> List[time]: """ get the minute level calendar in day period Parameters ---------- shift : int the shift direction would be like pandas shift. series.shift(1) will replace the value at `i`-th with the one at `i-1`-th region: str Region, for example, "cn", "us" Returns ------- List[time]: """ cal = [] if region == REG_CN: for ts in list( pd.date_range(CN_TIME[0], CN_TIME[1] - timedelta(minutes=1), freq="1min") - pd.Timedelta(minutes=shift) ) + list( pd.date_range(CN_TIME[2], CN_TIME[3] - timedelta(minutes=1), freq="1min") - pd.Timedelta(minutes=shift) ): cal.append(ts.time()) elif region == REG_TW: for ts in list( pd.date_range(TW_TIME[0], TW_TIME[1] - timedelta(minutes=1), freq="1min") - pd.Timedelta(minutes=shift) ): cal.append(ts.time()) elif region == REG_US: for ts in list( pd.date_range(US_TIME[0], US_TIME[1] - timedelta(minutes=1), freq="1min") - pd.Timedelta(minutes=shift) ): cal.append(ts.time()) else: raise ValueError(f"{region} is not supported") return cal class Freq: NORM_FREQ_MONTH = "month" NORM_FREQ_WEEK = "week" NORM_FREQ_DAY = "day" NORM_FREQ_MINUTE = "min" # using min instead of minute for align with Qlib's data filename SUPPORT_CAL_LIST = [NORM_FREQ_MINUTE, NORM_FREQ_DAY] # FIXME: this list should from data def __init__(self, freq: Union[str, "Freq"]) -> None: if isinstance(freq, str): self.count, self.base = self.parse(freq) elif isinstance(freq, Freq): self.count, self.base = freq.count, freq.base else: raise NotImplementedError(f"This type of input is not supported") def __eq__(self, freq): freq = Freq(freq) return freq.count == self.count and freq.base == self.base def __str__(self): # trying to align to the filename of Qlib: day, 30min, 5min, 1min... return f"{self.count if self.count != 1 or self.base != 'day' else ''}{self.base}" def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self)})" def parse(freq: str) -> Tuple[int, str]: """ Parse freq into a unified format Parameters ---------- freq : str Raw freq, supported freq should match the re '^([0-9]*)(month|mon|week|w|day|d|minute|min)$' Returns ------- freq: Tuple[int, str] Unified freq, including freq count and unified freq unit. The freq unit should be '[month|week|day|minute]'. Example: .. code-block:: print(Freq.parse("day")) (1, "day" ) print(Freq.parse("2mon")) (2, "month") print(Freq.parse("10w")) (10, "week") """ freq = freq.lower() match_obj = re.match("^([0-9]*)(month|mon|week|w|day|d|minute|min)$", freq) if match_obj is None: raise ValueError( "freq format is not supported, the freq should be like (n)month/mon, (n)week/w, (n)day/d, (n)minute/min" ) _count = int(match_obj.group(1)) if match_obj.group(1) else 1 _freq = match_obj.group(2) _freq_format_dict = { "month": Freq.NORM_FREQ_MONTH, "mon": Freq.NORM_FREQ_MONTH, "week": Freq.NORM_FREQ_WEEK, "w": Freq.NORM_FREQ_WEEK, "day": Freq.NORM_FREQ_DAY, "d": Freq.NORM_FREQ_DAY, "minute": Freq.NORM_FREQ_MINUTE, "min": Freq.NORM_FREQ_MINUTE, } return _count, _freq_format_dict[_freq] def get_timedelta(n: int, freq: str) -> pd.Timedelta: """ get pd.Timedeta object Parameters ---------- n : int freq : str Typically, they are the return value of Freq.parse Returns ------- pd.Timedelta: """ return pd.Timedelta(f"{n}{freq}") def get_min_delta(left_frq: str, right_freq: str): """Calculate freq delta Parameters ---------- left_frq: str right_freq: str Returns ------- """ minutes_map = { Freq.NORM_FREQ_MINUTE: 1, Freq.NORM_FREQ_DAY: 60 * 24, Freq.NORM_FREQ_WEEK: 7 * 60 * 24, Freq.NORM_FREQ_MONTH: 30 * 7 * 60 * 24, } left_freq = Freq(left_frq) left_minutes = left_freq.count * minutes_map[left_freq.base] right_freq = Freq(right_freq) right_minutes = right_freq.count * minutes_map[right_freq.base] return left_minutes - right_minutes def get_recent_freq(base_freq: Union[str, "Freq"], freq_list: List[Union[str, "Freq"]]) -> Optional["Freq"]: """Get the closest freq to base_freq from freq_list Parameters ---------- base_freq freq_list Returns ------- if the recent frequency is found Freq else: None """ base_freq = Freq(base_freq) # use the nearest freq greater than 0 min_freq = None for _freq in freq_list: _min_delta = Freq.get_min_delta(base_freq, _freq) if _min_delta < 0: continue if min_freq is None: min_freq = (_min_delta, str(_freq)) continue min_freq = min_freq if min_freq[0] <= _min_delta else (_min_delta, _freq) return min_freq[1] if min_freq else None The provided code snippet includes necessary dependencies for implementing the `get_day_min_idx_range` function. Write a Python function `def get_day_min_idx_range(start: str, end: str, freq: str, region: str) -> Tuple[int, int]` to solve the following problem: get the min-bar index in a day for a time range (both left and right is closed) given a fixed frequency Parameters ---------- start : str e.g. "9:30" end : str e.g. "14:30" freq : str "1min" Returns ------- Tuple[int, int]: The index of start and end in the calendar. Both left and right are **closed** Here is the function: def get_day_min_idx_range(start: str, end: str, freq: str, region: str) -> Tuple[int, int]: """ get the min-bar index in a day for a time range (both left and right is closed) given a fixed frequency Parameters ---------- start : str e.g. "9:30" end : str e.g. "14:30" freq : str "1min" Returns ------- Tuple[int, int]: The index of start and end in the calendar. Both left and right are **closed** """ start = pd.Timestamp(start).time() end = pd.Timestamp(end).time() freq = Freq(freq) in_day_cal = get_min_cal(region=region)[:: freq.count] left_idx = bisect.bisect_left(in_day_cal, start) right_idx = bisect.bisect_right(in_day_cal, end) - 1 return left_idx, right_idx
get the min-bar index in a day for a time range (both left and right is closed) given a fixed frequency Parameters ---------- start : str e.g. "9:30" end : str e.g. "14:30" freq : str "1min" Returns ------- Tuple[int, int]: The index of start and end in the calendar. Both left and right are **closed**
19,683
import bisect from datetime import datetime, time, date, timedelta from typing import List, Optional, Tuple, Union import functools import re import pandas as pd from qlib.config import C from qlib.constant import REG_CN, REG_TW, REG_US The provided code snippet includes necessary dependencies for implementing the `epsilon_change` function. Write a Python function `def epsilon_change(date_time: pd.Timestamp, direction: str = "backward") -> pd.Timestamp` to solve the following problem: change the time by infinitely small quantity. Parameters ---------- date_time : pd.Timestamp the original time direction : str the direction the time are going to - "backward" for going to history - "forward" for going to the future Returns ------- pd.Timestamp: the shifted time Here is the function: def epsilon_change(date_time: pd.Timestamp, direction: str = "backward") -> pd.Timestamp: """ change the time by infinitely small quantity. Parameters ---------- date_time : pd.Timestamp the original time direction : str the direction the time are going to - "backward" for going to history - "forward" for going to the future Returns ------- pd.Timestamp: the shifted time """ if direction == "backward": return date_time - pd.Timedelta(seconds=1) elif direction == "forward": return date_time + pd.Timedelta(seconds=1) else: raise ValueError("Wrong input")
change the time by infinitely small quantity. Parameters ---------- date_time : pd.Timestamp the original time direction : str the direction the time are going to - "backward" for going to history - "forward" for going to the future Returns ------- pd.Timestamp: the shifted time
19,684
from __future__ import annotations from typing import Dict, Tuple, Union, Callable, List import bisect import numpy as np import pandas as pd class SingleData(IndexData): def __init__( self, data: Union[int, float, np.number, list, dict, pd.Series] = [], index: Union[List, pd.Index, Index] = [] ): """A data structure of index and numpy data. It's used to replace pd.Series due to high-speed. Parameters ---------- data : Union[int, float, np.number, list, dict, pd.Series] the input data index : Union[list, pd.Index] the index of data. empty list indicates that auto filling the index to the length of data """ # for special data type if isinstance(data, dict): assert len(index) == 0 if len(data) > 0: index, data = zip(*data.items()) else: index, data = [], [] elif isinstance(data, pd.Series): assert len(index) == 0 index, data = data.index, data.values elif isinstance(data, (int, float, np.number)): data = [data] super().__init__(data, index) assert self.ndim == 1 def _align_indices(self, other): if self.index == other.index: return other elif set(self.index) == set(other.index): return other.reindex(self.index) else: raise ValueError( f"The indexes of self and other do not meet the requirements of the four arithmetic operations" ) def reindex(self, index: Index, fill_value=np.NaN) -> SingleData: """reindex data and fill the missing value with np.NaN. Parameters ---------- new_index : list new index fill_value: what value to fill if index is missing Returns ------- SingleData reindex data """ # TODO: This method can be more general if self.index == index: return self tmp_data = np.full(len(index), fill_value, dtype=np.float64) for index_id, index_item in enumerate(index): try: tmp_data[index_id] = self.loc[index_item] except KeyError: pass return SingleData(tmp_data, index) def add(self, other: SingleData, fill_value=0): # TODO: add and __add__ are a little confusing. # This could be a more general common_index = self.index | other.index common_index, _ = common_index.sort() tmp_data1 = self.reindex(common_index, fill_value) tmp_data2 = other.reindex(common_index, fill_value) return tmp_data1.fillna(fill_value) + tmp_data2.fillna(fill_value) def to_dict(self): """convert SingleData to dict. Returns ------- dict data with the dict format. """ return dict(zip(self.index, self.data.tolist())) def to_series(self): return pd.Series(self.data, index=self.index) def __repr__(self) -> str: return str(pd.Series(self.data, index=self.index)) class MultiData(IndexData): def __init__( self, data: Union[int, float, np.number, list] = [], index: Union[List, pd.Index, Index] = [], columns: Union[List, pd.Index, Index] = [], ): """A data structure of index and numpy data. It's used to replace pd.DataFrame due to high-speed. Parameters ---------- data : Union[list, np.ndarray] the dim of data must be 2. index : Union[List, pd.Index, Index] the index of data. columns: Union[List, pd.Index, Index] the columns of data. """ if isinstance(data, pd.DataFrame): index, columns, data = data.index, data.columns, data.values super().__init__(data, index, columns) assert self.ndim == 2 def _align_indices(self, other): if self.indices == other.indices: return other else: raise ValueError( f"The indexes of self and other do not meet the requirements of the four arithmetic operations" ) def __repr__(self) -> str: return str(pd.DataFrame(self.data, index=self.index, columns=self.columns)) The provided code snippet includes necessary dependencies for implementing the `concat` function. Write a Python function `def concat(data_list: Union[SingleData], axis=0) -> MultiData` to solve the following problem: concat all SingleData by index. TODO: now just for SingleData. Parameters ---------- data_list : List[SingleData] the list of all SingleData to concat. Returns ------- MultiData the MultiData with ndim == 2 Here is the function: def concat(data_list: Union[SingleData], axis=0) -> MultiData: """concat all SingleData by index. TODO: now just for SingleData. Parameters ---------- data_list : List[SingleData] the list of all SingleData to concat. Returns ------- MultiData the MultiData with ndim == 2 """ if axis == 0: raise NotImplementedError(f"please implement this func when axis == 0") elif axis == 1: # get all index and row all_index = set() for index_data in data_list: all_index = all_index | set(index_data.index) all_index = list(all_index) all_index.sort() all_index_map = dict(zip(all_index, range(len(all_index)))) # concat all tmp_data = np.full((len(all_index), len(data_list)), np.NaN) for data_id, index_data in enumerate(data_list): assert isinstance(index_data, SingleData) now_data_map = [all_index_map[index] for index in index_data.index] tmp_data[now_data_map, data_id] = index_data.data return MultiData(tmp_data, all_index) else: raise ValueError(f"axis must be 0 or 1")
concat all SingleData by index. TODO: now just for SingleData. Parameters ---------- data_list : List[SingleData] the list of all SingleData to concat. Returns ------- MultiData the MultiData with ndim == 2
19,685
from __future__ import annotations from typing import Dict, Tuple, Union, Callable, List import bisect import numpy as np import pandas as pd class SingleData(IndexData): def __init__( self, data: Union[int, float, np.number, list, dict, pd.Series] = [], index: Union[List, pd.Index, Index] = [] ): """A data structure of index and numpy data. It's used to replace pd.Series due to high-speed. Parameters ---------- data : Union[int, float, np.number, list, dict, pd.Series] the input data index : Union[list, pd.Index] the index of data. empty list indicates that auto filling the index to the length of data """ # for special data type if isinstance(data, dict): assert len(index) == 0 if len(data) > 0: index, data = zip(*data.items()) else: index, data = [], [] elif isinstance(data, pd.Series): assert len(index) == 0 index, data = data.index, data.values elif isinstance(data, (int, float, np.number)): data = [data] super().__init__(data, index) assert self.ndim == 1 def _align_indices(self, other): if self.index == other.index: return other elif set(self.index) == set(other.index): return other.reindex(self.index) else: raise ValueError( f"The indexes of self and other do not meet the requirements of the four arithmetic operations" ) def reindex(self, index: Index, fill_value=np.NaN) -> SingleData: """reindex data and fill the missing value with np.NaN. Parameters ---------- new_index : list new index fill_value: what value to fill if index is missing Returns ------- SingleData reindex data """ # TODO: This method can be more general if self.index == index: return self tmp_data = np.full(len(index), fill_value, dtype=np.float64) for index_id, index_item in enumerate(index): try: tmp_data[index_id] = self.loc[index_item] except KeyError: pass return SingleData(tmp_data, index) def add(self, other: SingleData, fill_value=0): # TODO: add and __add__ are a little confusing. # This could be a more general common_index = self.index | other.index common_index, _ = common_index.sort() tmp_data1 = self.reindex(common_index, fill_value) tmp_data2 = other.reindex(common_index, fill_value) return tmp_data1.fillna(fill_value) + tmp_data2.fillna(fill_value) def to_dict(self): """convert SingleData to dict. Returns ------- dict data with the dict format. """ return dict(zip(self.index, self.data.tolist())) def to_series(self): return pd.Series(self.data, index=self.index) def __repr__(self) -> str: return str(pd.Series(self.data, index=self.index)) The provided code snippet includes necessary dependencies for implementing the `sum_by_index` function. Write a Python function `def sum_by_index(data_list: Union[SingleData], new_index: list, fill_value=0) -> SingleData` to solve the following problem: concat all SingleData by new index. Parameters ---------- data_list : List[SingleData] the list of all SingleData to sum. new_index : list the new_index of new SingleData. fill_value : float fill the missing values or replace np.NaN. Returns ------- SingleData the SingleData with new_index and values after sum. Here is the function: def sum_by_index(data_list: Union[SingleData], new_index: list, fill_value=0) -> SingleData: """concat all SingleData by new index. Parameters ---------- data_list : List[SingleData] the list of all SingleData to sum. new_index : list the new_index of new SingleData. fill_value : float fill the missing values or replace np.NaN. Returns ------- SingleData the SingleData with new_index and values after sum. """ data_list = [data.to_dict() for data in data_list] data_sum = {} for id in new_index: item_sum = 0 for data in data_list: if id in data and not np.isnan(data[id]): item_sum += data[id] else: item_sum += fill_value data_sum[id] = item_sum return SingleData(data_sum)
concat all SingleData by new index. Parameters ---------- data_list : List[SingleData] the list of all SingleData to sum. new_index : list the new_index of new SingleData. fill_value : float fill the missing values or replace np.NaN. Returns ------- SingleData the SingleData with new_index and values after sum.
19,686
from __future__ import annotations from typing import Dict, Tuple, Union, Callable, List import bisect import numpy as np import pandas as pd class BinaryOps: def __init__(self, method_name): self.method_name = method_name def __get__(self, obj, *args): # bind object self.obj = obj return self def __call__(self, other): self_data_method = getattr(self.obj.data, self.method_name) if isinstance(other, (int, float, np.number)): return self.obj.__class__(self_data_method(other), *self.obj.indices) elif isinstance(other, self.obj.__class__): other_aligned = self.obj._align_indices(other) return self.obj.__class__(self_data_method(other_aligned.data), *self.obj.indices) else: return NotImplemented The provided code snippet includes necessary dependencies for implementing the `index_data_ops_creator` function. Write a Python function `def index_data_ops_creator(*args, **kwargs)` to solve the following problem: meta class for auto generating operations for index data. Here is the function: def index_data_ops_creator(*args, **kwargs): """ meta class for auto generating operations for index data. """ for method_name in ["__add__", "__sub__", "__rsub__", "__mul__", "__truediv__", "__eq__", "__gt__", "__lt__"]: args[2][method_name] = BinaryOps(method_name=method_name) return type(*args)
meta class for auto generating operations for index data.
19,687
import contextlib import importlib import os from pathlib import Path import pickle import pkgutil import re import sys from types import ModuleType from typing import Any, Dict, List, Tuple, Union from urllib.parse import urlparse from qlib.typehint import InstConf The provided code snippet includes necessary dependencies for implementing the `class_casting` function. Write a Python function `def class_casting(obj: object, cls: type)` to solve the following problem: Python doesn't provide the downcasting mechanism. We use the trick here to downcast the class Parameters ---------- obj : object the object to be cast cls : type the target class type Here is the function: def class_casting(obj: object, cls: type): """ Python doesn't provide the downcasting mechanism. We use the trick here to downcast the class Parameters ---------- obj : object the object to be cast cls : type the target class type """ orig_cls = obj.__class__ obj.__class__ = cls yield obj.__class__ = orig_cls
Python doesn't provide the downcasting mechanism. We use the trick here to downcast the class Parameters ---------- obj : object the object to be cast cls : type the target class type
19,688
import contextlib import importlib import os from pathlib import Path import pickle import pkgutil import re import sys from types import ModuleType from typing import Any, Dict, List, Tuple, Union from urllib.parse import urlparse from qlib.typehint import InstConf The provided code snippet includes necessary dependencies for implementing the `find_all_classes` function. Write a Python function `def find_all_classes(module_path: Union[str, ModuleType], cls: type) -> List[type]` to solve the following problem: Find all the classes recursively that inherit from `cls` in a given module. - `cls` itself is also included >>> from qlib.data.dataset.handler import DataHandler >>> find_all_classes("qlib.contrib.data.handler", DataHandler) [<class 'qlib.contrib.data.handler.Alpha158'>, <class 'qlib.contrib.data.handler.Alpha158vwap'>, <class 'qlib.contrib.data.handler.Alpha360'>, <class 'qlib.contrib.data.handler.Alpha360vwap'>, <class 'qlib.data.dataset.handler.DataHandlerLP'>] TODO: - skip import error Here is the function: def find_all_classes(module_path: Union[str, ModuleType], cls: type) -> List[type]: """ Find all the classes recursively that inherit from `cls` in a given module. - `cls` itself is also included >>> from qlib.data.dataset.handler import DataHandler >>> find_all_classes("qlib.contrib.data.handler", DataHandler) [<class 'qlib.contrib.data.handler.Alpha158'>, <class 'qlib.contrib.data.handler.Alpha158vwap'>, <class 'qlib.contrib.data.handler.Alpha360'>, <class 'qlib.contrib.data.handler.Alpha360vwap'>, <class 'qlib.data.dataset.handler.DataHandlerLP'>] TODO: - skip import error """ if isinstance(module_path, ModuleType): mod = module_path else: mod = importlib.import_module(module_path) cls_list = [] def _append_cls(obj): # Leverage the closure trick to reuse code if isinstance(obj, type) and issubclass(obj, cls) and cls not in cls_list: cls_list.append(obj) for attr in dir(mod): _append_cls(getattr(mod, attr)) if hasattr(mod, "__path__"): # if the model is a package for _, modname, _ in pkgutil.iter_modules(mod.__path__): sub_mod = importlib.import_module(f"{mod.__package__}.{modname}") for m_cls in find_all_classes(sub_mod, cls): _append_cls(m_cls) return cls_list
Find all the classes recursively that inherit from `cls` in a given module. - `cls` itself is also included >>> from qlib.data.dataset.handler import DataHandler >>> find_all_classes("qlib.contrib.data.handler", DataHandler) [<class 'qlib.contrib.data.handler.Alpha158'>, <class 'qlib.contrib.data.handler.Alpha158vwap'>, <class 'qlib.contrib.data.handler.Alpha360'>, <class 'qlib.contrib.data.handler.Alpha360vwap'>, <class 'qlib.data.dataset.handler.DataHandlerLP'>] TODO: - skip import error
19,689
import os import shutil import tempfile import contextlib from typing import Optional, Text, IO, Union from pathlib import Path from qlib.log import get_module_logger The provided code snippet includes necessary dependencies for implementing the `get_or_create_path` function. Write a Python function `def get_or_create_path(path: Optional[Text] = None, return_dir: bool = False)` to solve the following problem: Create or get a file or directory given the path and return_dir. Parameters ---------- path: a string indicates the path or None indicates creating a temporary path. return_dir: if True, create and return a directory; otherwise c&r a file. Here is the function: def get_or_create_path(path: Optional[Text] = None, return_dir: bool = False): """Create or get a file or directory given the path and return_dir. Parameters ---------- path: a string indicates the path or None indicates creating a temporary path. return_dir: if True, create and return a directory; otherwise c&r a file. """ if path: if return_dir and not os.path.exists(path): os.makedirs(path) elif not return_dir: # return a file, thus we need to create its parent directory xpath = os.path.abspath(os.path.join(path, "..")) if not os.path.exists(xpath): os.makedirs(xpath) else: temp_dir = os.path.expanduser("~/tmp") if not os.path.exists(temp_dir): os.makedirs(temp_dir) if return_dir: _, path = tempfile.mkdtemp(dir=temp_dir) else: _, path = tempfile.mkstemp(dir=temp_dir) return path
Create or get a file or directory given the path and return_dir. Parameters ---------- path: a string indicates the path or None indicates creating a temporary path. return_dir: if True, create and return a directory; otherwise c&r a file.
19,690
import os import shutil import tempfile import contextlib from typing import Optional, Text, IO, Union from pathlib import Path from qlib.log import get_module_logger The provided code snippet includes necessary dependencies for implementing the `save_multiple_parts_file` function. Write a Python function `def save_multiple_parts_file(filename, format="gztar")` to solve the following problem: Save multiple parts file Implementation process: 1. get the absolute path to 'filename' 2. create a 'filename' directory 3. user does something with file_path('filename/') 4. remove 'filename' directory 5. make_archive 'filename' directory, and rename 'archive file' to filename :param filename: result model path :param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar" :return: real model path Usage:: >>> # The following code will create an archive file('~/tmp/test_file') containing 'test_doc_i'(i is 0-10) files. >>> with save_multiple_parts_file('~/tmp/test_file') as filename_dir: ... for i in range(10): ... temp_path = os.path.join(filename_dir, 'test_doc_{}'.format(str(i))) ... with open(temp_path) as fp: ... fp.write(str(i)) ... Here is the function: def save_multiple_parts_file(filename, format="gztar"): """Save multiple parts file Implementation process: 1. get the absolute path to 'filename' 2. create a 'filename' directory 3. user does something with file_path('filename/') 4. remove 'filename' directory 5. make_archive 'filename' directory, and rename 'archive file' to filename :param filename: result model path :param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar" :return: real model path Usage:: >>> # The following code will create an archive file('~/tmp/test_file') containing 'test_doc_i'(i is 0-10) files. >>> with save_multiple_parts_file('~/tmp/test_file') as filename_dir: ... for i in range(10): ... temp_path = os.path.join(filename_dir, 'test_doc_{}'.format(str(i))) ... with open(temp_path) as fp: ... fp.write(str(i)) ... """ if filename.startswith("~"): filename = os.path.expanduser(filename) file_path = os.path.abspath(filename) # Create model dir if os.path.exists(file_path): raise FileExistsError("ERROR: file exists: {}, cannot be create the directory.".format(file_path)) os.makedirs(file_path) # return model dir yield file_path # filename dir to filename.tar.gz file tar_file = shutil.make_archive(file_path, format=format, root_dir=file_path) # Remove filename dir if os.path.exists(file_path): shutil.rmtree(file_path) # filename.tar.gz rename to filename os.rename(tar_file, file_path)
Save multiple parts file Implementation process: 1. get the absolute path to 'filename' 2. create a 'filename' directory 3. user does something with file_path('filename/') 4. remove 'filename' directory 5. make_archive 'filename' directory, and rename 'archive file' to filename :param filename: result model path :param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar" :return: real model path Usage:: >>> # The following code will create an archive file('~/tmp/test_file') containing 'test_doc_i'(i is 0-10) files. >>> with save_multiple_parts_file('~/tmp/test_file') as filename_dir: ... for i in range(10): ... temp_path = os.path.join(filename_dir, 'test_doc_{}'.format(str(i))) ... with open(temp_path) as fp: ... fp.write(str(i)) ...
19,691
import os import shutil import tempfile import contextlib from typing import Optional, Text, IO, Union from pathlib import Path from qlib.log import get_module_logger log = get_module_logger("utils.file") The provided code snippet includes necessary dependencies for implementing the `unpack_archive_with_buffer` function. Write a Python function `def unpack_archive_with_buffer(buffer, format="gztar")` to solve the following problem: Unpack archive with archive buffer After the call is finished, the archive file and directory will be deleted. Implementation process: 1. create 'tempfile' in '~/tmp/' and directory 2. 'buffer' write to 'tempfile' 3. unpack archive file('tempfile') 4. user does something with file_path('tempfile/') 5. remove 'tempfile' and 'tempfile directory' :param buffer: bytes :param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar" :return: unpack archive directory path Usage:: >>> # The following code is to print all the file names in 'test_unpack.tar.gz' >>> with open('test_unpack.tar.gz') as fp: ... buffer = fp.read() ... >>> with unpack_archive_with_buffer(buffer) as temp_dir: ... for f_n in os.listdir(temp_dir): ... print(f_n) ... Here is the function: def unpack_archive_with_buffer(buffer, format="gztar"): """Unpack archive with archive buffer After the call is finished, the archive file and directory will be deleted. Implementation process: 1. create 'tempfile' in '~/tmp/' and directory 2. 'buffer' write to 'tempfile' 3. unpack archive file('tempfile') 4. user does something with file_path('tempfile/') 5. remove 'tempfile' and 'tempfile directory' :param buffer: bytes :param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar" :return: unpack archive directory path Usage:: >>> # The following code is to print all the file names in 'test_unpack.tar.gz' >>> with open('test_unpack.tar.gz') as fp: ... buffer = fp.read() ... >>> with unpack_archive_with_buffer(buffer) as temp_dir: ... for f_n in os.listdir(temp_dir): ... print(f_n) ... """ temp_dir = os.path.expanduser("~/tmp") if not os.path.exists(temp_dir): os.makedirs(temp_dir) with tempfile.NamedTemporaryFile("wb", delete=False, dir=temp_dir) as fp: fp.write(buffer) file_path = fp.name try: tar_file = file_path + ".tar.gz" os.rename(file_path, tar_file) # Create dir os.makedirs(file_path) shutil.unpack_archive(tar_file, format=format, extract_dir=file_path) # Return temp dir yield file_path except Exception as e: log.error(str(e)) finally: # Remove temp tar file if os.path.exists(tar_file): os.unlink(tar_file) # Remove temp model dir if os.path.exists(file_path): shutil.rmtree(file_path)
Unpack archive with archive buffer After the call is finished, the archive file and directory will be deleted. Implementation process: 1. create 'tempfile' in '~/tmp/' and directory 2. 'buffer' write to 'tempfile' 3. unpack archive file('tempfile') 4. user does something with file_path('tempfile/') 5. remove 'tempfile' and 'tempfile directory' :param buffer: bytes :param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar" :return: unpack archive directory path Usage:: >>> # The following code is to print all the file names in 'test_unpack.tar.gz' >>> with open('test_unpack.tar.gz') as fp: ... buffer = fp.read() ... >>> with unpack_archive_with_buffer(buffer) as temp_dir: ... for f_n in os.listdir(temp_dir): ... print(f_n) ...
19,692
import os import shutil import tempfile import contextlib from typing import Optional, Text, IO, Union from pathlib import Path from qlib.log import get_module_logger def get_tmp_file_with_buffer(buffer): temp_dir = os.path.expanduser("~/tmp") if not os.path.exists(temp_dir): os.makedirs(temp_dir) with tempfile.NamedTemporaryFile("wb", delete=True, dir=temp_dir) as fp: fp.write(buffer) file_path = fp.name yield file_path
null
19,693
import os import shutil import tempfile import contextlib from typing import Optional, Text, IO, Union from pathlib import Path from qlib.log import get_module_logger The provided code snippet includes necessary dependencies for implementing the `get_io_object` function. Write a Python function `def get_io_object(file: Union[IO, str, Path], *args, **kwargs) -> IO` to solve the following problem: providing a easy interface to get an IO object Parameters ---------- file : Union[IO, str, Path] a object representing the file Returns ------- IO: a IO-like object Raises ------ NotImplementedError: Here is the function: def get_io_object(file: Union[IO, str, Path], *args, **kwargs) -> IO: """ providing a easy interface to get an IO object Parameters ---------- file : Union[IO, str, Path] a object representing the file Returns ------- IO: a IO-like object Raises ------ NotImplementedError: """ if isinstance(file, IO): yield file else: if isinstance(file, str): file = Path(file) if not isinstance(file, Path): raise NotImplementedError(f"This type[{type(file)}] of input is not supported") with file.open(*args, **kwargs) as f: yield f
providing a easy interface to get an IO object Parameters ---------- file : Union[IO, str, Path] a object representing the file Returns ------- IO: a IO-like object Raises ------ NotImplementedError:
19,694
from copy import deepcopy from typing import List, Union import pandas as pd import numpy as np The provided code snippet includes necessary dependencies for implementing the `robust_zscore` function. Write a Python function `def robust_zscore(x: pd.Series, zscore=False)` to solve the following problem: Robust ZScore Normalization Use robust statistics for Z-Score normalization: mean(x) = median(x) std(x) = MAD(x) * 1.4826 Reference: https://en.wikipedia.org/wiki/Median_absolute_deviation. Here is the function: def robust_zscore(x: pd.Series, zscore=False): """Robust ZScore Normalization Use robust statistics for Z-Score normalization: mean(x) = median(x) std(x) = MAD(x) * 1.4826 Reference: https://en.wikipedia.org/wiki/Median_absolute_deviation. """ x = x - x.median() mad = x.abs().median() x = np.clip(x / mad / 1.4826, -3, 3) if zscore: x -= x.mean() x /= x.std() return x
Robust ZScore Normalization Use robust statistics for Z-Score normalization: mean(x) = median(x) std(x) = MAD(x) * 1.4826 Reference: https://en.wikipedia.org/wiki/Median_absolute_deviation.
19,695
from copy import deepcopy from typing import List, Union import pandas as pd import numpy as np def zscore(x: Union[pd.Series, pd.DataFrame]): return (x - x.mean()).div(x.std())
null
19,696
from copy import deepcopy from typing import List, Union import pandas as pd import numpy as np The provided code snippet includes necessary dependencies for implementing the `deepcopy_basic_type` function. Write a Python function `def deepcopy_basic_type(obj: object) -> object` to solve the following problem: deepcopy an object without copy the complicated objects. This is useful when you want to generate Qlib tasks and share the handler NOTE: - This function can't handle recursive objects!!!!! Parameters ---------- obj : object the object to be copied Returns ------- object: The copied object Here is the function: def deepcopy_basic_type(obj: object) -> object: """ deepcopy an object without copy the complicated objects. This is useful when you want to generate Qlib tasks and share the handler NOTE: - This function can't handle recursive objects!!!!! Parameters ---------- obj : object the object to be copied Returns ------- object: The copied object """ if isinstance(obj, tuple): return tuple(deepcopy_basic_type(i) for i in obj) elif isinstance(obj, list): return list(deepcopy_basic_type(i) for i in obj) elif isinstance(obj, dict): return {k: deepcopy_basic_type(v) for k, v in obj.items()} else: return obj
deepcopy an object without copy the complicated objects. This is useful when you want to generate Qlib tasks and share the handler NOTE: - This function can't handle recursive objects!!!!! Parameters ---------- obj : object the object to be copied Returns ------- object: The copied object
19,697
from functools import partial from threading import Thread from typing import Callable, Text, Union from joblib import Parallel, delayed from joblib._parallel_backends import MultiprocessingBackend import pandas as pd from queue import Queue import concurrent from qlib.config import C, QlibConfig class ParallelExt(Parallel): def __init__(self, *args, **kwargs): maxtasksperchild = kwargs.pop("maxtasksperchild", None) super(ParallelExt, self).__init__(*args, **kwargs) if isinstance(self._backend, MultiprocessingBackend): self._backend_args["maxtasksperchild"] = maxtasksperchild The provided code snippet includes necessary dependencies for implementing the `datetime_groupby_apply` function. Write a Python function `def datetime_groupby_apply( df, apply_func: Union[Callable, Text], axis=0, level="datetime", resample_rule="M", n_jobs=-1 )` to solve the following problem: datetime_groupby_apply This function will apply the `apply_func` on the datetime level index. Parameters ---------- df : DataFrame for processing apply_func : Union[Callable, Text] apply_func for processing the data if a string is given, then it is treated as naive pandas function axis : which axis is the datetime level located level : which level is the datetime level resample_rule : How to resample the data to calculating parallel n_jobs : n_jobs for joblib Returns: pd.DataFrame Here is the function: def datetime_groupby_apply( df, apply_func: Union[Callable, Text], axis=0, level="datetime", resample_rule="M", n_jobs=-1 ): """datetime_groupby_apply This function will apply the `apply_func` on the datetime level index. Parameters ---------- df : DataFrame for processing apply_func : Union[Callable, Text] apply_func for processing the data if a string is given, then it is treated as naive pandas function axis : which axis is the datetime level located level : which level is the datetime level resample_rule : How to resample the data to calculating parallel n_jobs : n_jobs for joblib Returns: pd.DataFrame """ def _naive_group_apply(df): if isinstance(apply_func, str): return getattr(df.groupby(axis=axis, level=level), apply_func)() return df.groupby(axis=axis, level=level).apply(apply_func) if n_jobs != 1: dfs = ParallelExt(n_jobs=n_jobs)( delayed(_naive_group_apply)(sub_df) for idx, sub_df in df.resample(resample_rule, axis=axis, level=level) ) return pd.concat(dfs, axis=axis).sort_index() else: return _naive_group_apply(df)
datetime_groupby_apply This function will apply the `apply_func` on the datetime level index. Parameters ---------- df : DataFrame for processing apply_func : Union[Callable, Text] apply_func for processing the data if a string is given, then it is treated as naive pandas function axis : which axis is the datetime level located level : which level is the datetime level resample_rule : How to resample the data to calculating parallel n_jobs : n_jobs for joblib Returns: pd.DataFrame
19,698
import numpy as np import pandas as pd from functools import partial from typing import Union, Callable from . import lazy_sort_index from .time import Freq, cal_sam_minute from ..config import C class Freq: NORM_FREQ_MONTH = "month" NORM_FREQ_WEEK = "week" NORM_FREQ_DAY = "day" NORM_FREQ_MINUTE = "min" # using min instead of minute for align with Qlib's data filename SUPPORT_CAL_LIST = [NORM_FREQ_MINUTE, NORM_FREQ_DAY] # FIXME: this list should from data def __init__(self, freq: Union[str, "Freq"]) -> None: if isinstance(freq, str): self.count, self.base = self.parse(freq) elif isinstance(freq, Freq): self.count, self.base = freq.count, freq.base else: raise NotImplementedError(f"This type of input is not supported") def __eq__(self, freq): freq = Freq(freq) return freq.count == self.count and freq.base == self.base def __str__(self): # trying to align to the filename of Qlib: day, 30min, 5min, 1min... return f"{self.count if self.count != 1 or self.base != 'day' else ''}{self.base}" def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self)})" def parse(freq: str) -> Tuple[int, str]: """ Parse freq into a unified format Parameters ---------- freq : str Raw freq, supported freq should match the re '^([0-9]*)(month|mon|week|w|day|d|minute|min)$' Returns ------- freq: Tuple[int, str] Unified freq, including freq count and unified freq unit. The freq unit should be '[month|week|day|minute]'. Example: .. code-block:: print(Freq.parse("day")) (1, "day" ) print(Freq.parse("2mon")) (2, "month") print(Freq.parse("10w")) (10, "week") """ freq = freq.lower() match_obj = re.match("^([0-9]*)(month|mon|week|w|day|d|minute|min)$", freq) if match_obj is None: raise ValueError( "freq format is not supported, the freq should be like (n)month/mon, (n)week/w, (n)day/d, (n)minute/min" ) _count = int(match_obj.group(1)) if match_obj.group(1) else 1 _freq = match_obj.group(2) _freq_format_dict = { "month": Freq.NORM_FREQ_MONTH, "mon": Freq.NORM_FREQ_MONTH, "week": Freq.NORM_FREQ_WEEK, "w": Freq.NORM_FREQ_WEEK, "day": Freq.NORM_FREQ_DAY, "d": Freq.NORM_FREQ_DAY, "minute": Freq.NORM_FREQ_MINUTE, "min": Freq.NORM_FREQ_MINUTE, } return _count, _freq_format_dict[_freq] def get_timedelta(n: int, freq: str) -> pd.Timedelta: """ get pd.Timedeta object Parameters ---------- n : int freq : str Typically, they are the return value of Freq.parse Returns ------- pd.Timedelta: """ return pd.Timedelta(f"{n}{freq}") def get_min_delta(left_frq: str, right_freq: str): """Calculate freq delta Parameters ---------- left_frq: str right_freq: str Returns ------- """ minutes_map = { Freq.NORM_FREQ_MINUTE: 1, Freq.NORM_FREQ_DAY: 60 * 24, Freq.NORM_FREQ_WEEK: 7 * 60 * 24, Freq.NORM_FREQ_MONTH: 30 * 7 * 60 * 24, } left_freq = Freq(left_frq) left_minutes = left_freq.count * minutes_map[left_freq.base] right_freq = Freq(right_freq) right_minutes = right_freq.count * minutes_map[right_freq.base] return left_minutes - right_minutes def get_recent_freq(base_freq: Union[str, "Freq"], freq_list: List[Union[str, "Freq"]]) -> Optional["Freq"]: """Get the closest freq to base_freq from freq_list Parameters ---------- base_freq freq_list Returns ------- if the recent frequency is found Freq else: None """ base_freq = Freq(base_freq) # use the nearest freq greater than 0 min_freq = None for _freq in freq_list: _min_delta = Freq.get_min_delta(base_freq, _freq) if _min_delta < 0: continue if min_freq is None: min_freq = (_min_delta, str(_freq)) continue min_freq = min_freq if min_freq[0] <= _min_delta else (_min_delta, _freq) return min_freq[1] if min_freq else None def cal_sam_minute(x: pd.Timestamp, sam_minutes: int, region: str = REG_CN) -> pd.Timestamp: """ align the minute-level data to a down sampled calendar e.g. align 10:38 to 10:35 in 5 minute-level(10:30 in 10 minute-level) Parameters ---------- x : pd.Timestamp datetime to be aligned sam_minutes : int align to `sam_minutes` minute-level calendar region: str Region, for example, "cn", "us" Returns ------- pd.Timestamp: the datetime after aligned """ cal = get_min_cal(C.min_data_shift, region)[::sam_minutes] idx = bisect.bisect_right(cal, x.time()) - 1 _date, new_time = x.date(), cal[idx] return concat_date_time(_date, new_time) C = QlibConfig(_default_config) The provided code snippet includes necessary dependencies for implementing the `resam_calendar` function. Write a Python function `def resam_calendar( calendar_raw: np.ndarray, freq_raw: Union[str, Freq], freq_sam: Union[str, Freq], region: str = None ) -> np.ndarray` to solve the following problem: Resample the calendar with frequency freq_raw into the calendar with frequency freq_sam Assumption: - Fix length (240) of the calendar in each day. Parameters ---------- calendar_raw : np.ndarray The calendar with frequency freq_raw freq_raw : str Frequency of the raw calendar freq_sam : str Sample frequency region: str Region, for example, "cn", "us" Returns ------- np.ndarray The calendar with frequency freq_sam Here is the function: def resam_calendar( calendar_raw: np.ndarray, freq_raw: Union[str, Freq], freq_sam: Union[str, Freq], region: str = None ) -> np.ndarray: """ Resample the calendar with frequency freq_raw into the calendar with frequency freq_sam Assumption: - Fix length (240) of the calendar in each day. Parameters ---------- calendar_raw : np.ndarray The calendar with frequency freq_raw freq_raw : str Frequency of the raw calendar freq_sam : str Sample frequency region: str Region, for example, "cn", "us" Returns ------- np.ndarray The calendar with frequency freq_sam """ if region is None: region = C["region"] freq_raw = Freq(freq_raw) freq_sam = Freq(freq_sam) if not len(calendar_raw): return calendar_raw # if freq_sam is xminute, divide each trading day into several bars evenly if freq_sam.base == Freq.NORM_FREQ_MINUTE: if freq_raw.base != Freq.NORM_FREQ_MINUTE: raise ValueError("when sampling minute calendar, freq of raw calendar must be minute or min") else: if freq_raw.count > freq_sam.count: raise ValueError("raw freq must be higher than sampling freq") _calendar_minute = np.unique(list(map(lambda x: cal_sam_minute(x, freq_sam.count, region), calendar_raw))) return _calendar_minute # else, convert the raw calendar into day calendar, and divide the whole calendar into several bars evenly else: _calendar_day = np.unique(list(map(lambda x: pd.Timestamp(x.year, x.month, x.day, 0, 0, 0), calendar_raw))) if freq_sam.base == Freq.NORM_FREQ_DAY: return _calendar_day[:: freq_sam.count] elif freq_sam.base == Freq.NORM_FREQ_WEEK: _day_in_week = np.array(list(map(lambda x: x.dayofweek, _calendar_day))) _calendar_week = _calendar_day[np.ediff1d(_day_in_week, to_begin=-1) < 0] return _calendar_week[:: freq_sam.count] elif freq_sam.base == Freq.NORM_FREQ_MONTH: _day_in_month = np.array(list(map(lambda x: x.day, _calendar_day))) _calendar_month = _calendar_day[np.ediff1d(_day_in_month, to_begin=-1) < 0] return _calendar_month[:: freq_sam.count] else: raise ValueError("sampling freq must be xmin, xd, xw, xm")
Resample the calendar with frequency freq_raw into the calendar with frequency freq_sam Assumption: - Fix length (240) of the calendar in each day. Parameters ---------- calendar_raw : np.ndarray The calendar with frequency freq_raw freq_raw : str Frequency of the raw calendar freq_sam : str Sample frequency region: str Region, for example, "cn", "us" Returns ------- np.ndarray The calendar with frequency freq_sam
19,699
import numpy as np import pandas as pd from functools import partial from typing import Union, Callable from . import lazy_sort_index from .time import Freq, cal_sam_minute from ..config import C class Freq: NORM_FREQ_MONTH = "month" NORM_FREQ_WEEK = "week" NORM_FREQ_DAY = "day" NORM_FREQ_MINUTE = "min" # using min instead of minute for align with Qlib's data filename SUPPORT_CAL_LIST = [NORM_FREQ_MINUTE, NORM_FREQ_DAY] # FIXME: this list should from data def __init__(self, freq: Union[str, "Freq"]) -> None: if isinstance(freq, str): self.count, self.base = self.parse(freq) elif isinstance(freq, Freq): self.count, self.base = freq.count, freq.base else: raise NotImplementedError(f"This type of input is not supported") def __eq__(self, freq): freq = Freq(freq) return freq.count == self.count and freq.base == self.base def __str__(self): # trying to align to the filename of Qlib: day, 30min, 5min, 1min... return f"{self.count if self.count != 1 or self.base != 'day' else ''}{self.base}" def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self)})" def parse(freq: str) -> Tuple[int, str]: """ Parse freq into a unified format Parameters ---------- freq : str Raw freq, supported freq should match the re '^([0-9]*)(month|mon|week|w|day|d|minute|min)$' Returns ------- freq: Tuple[int, str] Unified freq, including freq count and unified freq unit. The freq unit should be '[month|week|day|minute]'. Example: .. code-block:: print(Freq.parse("day")) (1, "day" ) print(Freq.parse("2mon")) (2, "month") print(Freq.parse("10w")) (10, "week") """ freq = freq.lower() match_obj = re.match("^([0-9]*)(month|mon|week|w|day|d|minute|min)$", freq) if match_obj is None: raise ValueError( "freq format is not supported, the freq should be like (n)month/mon, (n)week/w, (n)day/d, (n)minute/min" ) _count = int(match_obj.group(1)) if match_obj.group(1) else 1 _freq = match_obj.group(2) _freq_format_dict = { "month": Freq.NORM_FREQ_MONTH, "mon": Freq.NORM_FREQ_MONTH, "week": Freq.NORM_FREQ_WEEK, "w": Freq.NORM_FREQ_WEEK, "day": Freq.NORM_FREQ_DAY, "d": Freq.NORM_FREQ_DAY, "minute": Freq.NORM_FREQ_MINUTE, "min": Freq.NORM_FREQ_MINUTE, } return _count, _freq_format_dict[_freq] def get_timedelta(n: int, freq: str) -> pd.Timedelta: """ get pd.Timedeta object Parameters ---------- n : int freq : str Typically, they are the return value of Freq.parse Returns ------- pd.Timedelta: """ return pd.Timedelta(f"{n}{freq}") def get_min_delta(left_frq: str, right_freq: str): """Calculate freq delta Parameters ---------- left_frq: str right_freq: str Returns ------- """ minutes_map = { Freq.NORM_FREQ_MINUTE: 1, Freq.NORM_FREQ_DAY: 60 * 24, Freq.NORM_FREQ_WEEK: 7 * 60 * 24, Freq.NORM_FREQ_MONTH: 30 * 7 * 60 * 24, } left_freq = Freq(left_frq) left_minutes = left_freq.count * minutes_map[left_freq.base] right_freq = Freq(right_freq) right_minutes = right_freq.count * minutes_map[right_freq.base] return left_minutes - right_minutes def get_recent_freq(base_freq: Union[str, "Freq"], freq_list: List[Union[str, "Freq"]]) -> Optional["Freq"]: """Get the closest freq to base_freq from freq_list Parameters ---------- base_freq freq_list Returns ------- if the recent frequency is found Freq else: None """ base_freq = Freq(base_freq) # use the nearest freq greater than 0 min_freq = None for _freq in freq_list: _min_delta = Freq.get_min_delta(base_freq, _freq) if _min_delta < 0: continue if min_freq is None: min_freq = (_min_delta, str(_freq)) continue min_freq = min_freq if min_freq[0] <= _min_delta else (_min_delta, _freq) return min_freq[1] if min_freq else None D: BaseProviderWrapper = Wrapper() The provided code snippet includes necessary dependencies for implementing the `get_higher_eq_freq_feature` function. Write a Python function `def get_higher_eq_freq_feature(instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1)` to solve the following problem: get the feature with higher or equal frequency than `freq`. Returns ------- pd.DataFrame the feature with higher or equal frequency Here is the function: def get_higher_eq_freq_feature(instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1): """get the feature with higher or equal frequency than `freq`. Returns ------- pd.DataFrame the feature with higher or equal frequency """ from ..data.data import D # pylint: disable=C0415 try: _result = D.features(instruments, fields, start_time, end_time, freq=freq, disk_cache=disk_cache) _freq = freq except (ValueError, KeyError) as value_key_e: _, norm_freq = Freq.parse(freq) if norm_freq in [Freq.NORM_FREQ_MONTH, Freq.NORM_FREQ_WEEK, Freq.NORM_FREQ_DAY]: try: _result = D.features(instruments, fields, start_time, end_time, freq="day", disk_cache=disk_cache) _freq = "day" except (ValueError, KeyError): _result = D.features(instruments, fields, start_time, end_time, freq="1min", disk_cache=disk_cache) _freq = "1min" elif norm_freq == Freq.NORM_FREQ_MINUTE: _result = D.features(instruments, fields, start_time, end_time, freq="1min", disk_cache=disk_cache) _freq = "1min" else: raise ValueError(f"freq {freq} is not supported") from value_key_e return _result, _freq
get the feature with higher or equal frequency than `freq`. Returns ------- pd.DataFrame the feature with higher or equal frequency
19,700
import numpy as np import pandas as pd from functools import partial from typing import Union, Callable from . import lazy_sort_index from .time import Freq, cal_sam_minute from ..config import C def get_level_index(df: pd.DataFrame, level=Union[str, int]) -> int: """ get the level index of `df` given `level` Parameters ---------- df : pd.DataFrame data level : Union[str, int] index level Returns ------- int: The level index in the multiple index """ if isinstance(level, str): try: return df.index.names.index(level) except (AttributeError, ValueError): # NOTE: If level index is not given in the data, the default level index will be ('datetime', 'instrument') return ("datetime", "instrument").index(level) elif isinstance(level, int): return level else: raise NotImplementedError(f"This type of input is not supported") The provided code snippet includes necessary dependencies for implementing the `resam_ts_data` function. Write a Python function `def resam_ts_data( ts_feature: Union[pd.DataFrame, pd.Series], start_time: Union[str, pd.Timestamp] = None, end_time: Union[str, pd.Timestamp] = None, method: Union[str, Callable] = "last", method_kwargs: dict = {}, )` to solve the following problem: Resample value from time-series data - If `feature` has MultiIndex[instrument, datetime], apply the `method` to each instruemnt data with datetime in [start_time, end_time] Example: .. code-block:: print(feature) $close $volume instrument datetime SH600000 2010-01-04 86.778313 16162960.0 2010-01-05 87.433578 28117442.0 2010-01-06 85.713585 23632884.0 2010-01-07 83.788803 20813402.0 2010-01-08 84.730675 16044853.0 SH600655 2010-01-04 2699.567383 158193.328125 2010-01-08 2612.359619 77501.406250 2010-01-11 2712.982422 160852.390625 2010-01-12 2788.688232 164587.937500 2010-01-13 2790.604004 145460.453125 print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", fields=["$close", "$volume"], method="last")) $close $volume instrument SH600000 87.433578 28117442.0 SH600655 2699.567383 158193.328125 - Else, the `feature` should have Index[datetime], just apply the `method` to `feature` directly Example: .. code-block:: print(feature) $close $volume datetime 2010-01-04 86.778313 16162960.0 2010-01-05 87.433578 28117442.0 2010-01-06 85.713585 23632884.0 2010-01-07 83.788803 20813402.0 2010-01-08 84.730675 16044853.0 print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", method="last")) $close 87.433578 $volume 28117442.0 print(resam_ts_data(feature['$close'], start_time="2010-01-04", end_time="2010-01-05", method="last")) 87.433578 Parameters ---------- ts_feature : Union[pd.DataFrame, pd.Series] Raw time-series feature to be resampled start_time : Union[str, pd.Timestamp], optional start sampling time, by default None end_time : Union[str, pd.Timestamp], optional end sampling time, by default None method : Union[str, Callable], optional sample method, apply method function to each stock series data, by default "last" - If type(method) is str or callable function, it should be an attribute of SeriesGroupBy or DataFrameGroupby, and applies groupy.method for the sliced time-series data - If method is None, do nothing for the sliced time-series data. method_kwargs : dict, optional arguments of method, by default {} Returns ------- The resampled DataFrame/Series/value, return None when the resampled data is empty. Here is the function: def resam_ts_data( ts_feature: Union[pd.DataFrame, pd.Series], start_time: Union[str, pd.Timestamp] = None, end_time: Union[str, pd.Timestamp] = None, method: Union[str, Callable] = "last", method_kwargs: dict = {}, ): """ Resample value from time-series data - If `feature` has MultiIndex[instrument, datetime], apply the `method` to each instruemnt data with datetime in [start_time, end_time] Example: .. code-block:: print(feature) $close $volume instrument datetime SH600000 2010-01-04 86.778313 16162960.0 2010-01-05 87.433578 28117442.0 2010-01-06 85.713585 23632884.0 2010-01-07 83.788803 20813402.0 2010-01-08 84.730675 16044853.0 SH600655 2010-01-04 2699.567383 158193.328125 2010-01-08 2612.359619 77501.406250 2010-01-11 2712.982422 160852.390625 2010-01-12 2788.688232 164587.937500 2010-01-13 2790.604004 145460.453125 print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", fields=["$close", "$volume"], method="last")) $close $volume instrument SH600000 87.433578 28117442.0 SH600655 2699.567383 158193.328125 - Else, the `feature` should have Index[datetime], just apply the `method` to `feature` directly Example: .. code-block:: print(feature) $close $volume datetime 2010-01-04 86.778313 16162960.0 2010-01-05 87.433578 28117442.0 2010-01-06 85.713585 23632884.0 2010-01-07 83.788803 20813402.0 2010-01-08 84.730675 16044853.0 print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", method="last")) $close 87.433578 $volume 28117442.0 print(resam_ts_data(feature['$close'], start_time="2010-01-04", end_time="2010-01-05", method="last")) 87.433578 Parameters ---------- ts_feature : Union[pd.DataFrame, pd.Series] Raw time-series feature to be resampled start_time : Union[str, pd.Timestamp], optional start sampling time, by default None end_time : Union[str, pd.Timestamp], optional end sampling time, by default None method : Union[str, Callable], optional sample method, apply method function to each stock series data, by default "last" - If type(method) is str or callable function, it should be an attribute of SeriesGroupBy or DataFrameGroupby, and applies groupy.method for the sliced time-series data - If method is None, do nothing for the sliced time-series data. method_kwargs : dict, optional arguments of method, by default {} Returns ------- The resampled DataFrame/Series/value, return None when the resampled data is empty. """ selector_datetime = slice(start_time, end_time) from ..data.dataset.utils import get_level_index # pylint: disable=C0415 feature = lazy_sort_index(ts_feature) datetime_level = get_level_index(feature, level="datetime") == 0 if datetime_level: feature = feature.loc[selector_datetime] else: feature = feature.loc(axis=0)[(slice(None), selector_datetime)] if feature.empty: return None if isinstance(feature.index, pd.MultiIndex): if callable(method): method_func = method return feature.groupby(level="instrument").apply(method_func, **method_kwargs) elif isinstance(method, str): return getattr(feature.groupby(level="instrument"), method)(**method_kwargs) else: if callable(method): method_func = method return method_func(feature, **method_kwargs) elif isinstance(method, str): return getattr(feature, method)(**method_kwargs) return feature
Resample value from time-series data - If `feature` has MultiIndex[instrument, datetime], apply the `method` to each instruemnt data with datetime in [start_time, end_time] Example: .. code-block:: print(feature) $close $volume instrument datetime SH600000 2010-01-04 86.778313 16162960.0 2010-01-05 87.433578 28117442.0 2010-01-06 85.713585 23632884.0 2010-01-07 83.788803 20813402.0 2010-01-08 84.730675 16044853.0 SH600655 2010-01-04 2699.567383 158193.328125 2010-01-08 2612.359619 77501.406250 2010-01-11 2712.982422 160852.390625 2010-01-12 2788.688232 164587.937500 2010-01-13 2790.604004 145460.453125 print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", fields=["$close", "$volume"], method="last")) $close $volume instrument SH600000 87.433578 28117442.0 SH600655 2699.567383 158193.328125 - Else, the `feature` should have Index[datetime], just apply the `method` to `feature` directly Example: .. code-block:: print(feature) $close $volume datetime 2010-01-04 86.778313 16162960.0 2010-01-05 87.433578 28117442.0 2010-01-06 85.713585 23632884.0 2010-01-07 83.788803 20813402.0 2010-01-08 84.730675 16044853.0 print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", method="last")) $close 87.433578 $volume 28117442.0 print(resam_ts_data(feature['$close'], start_time="2010-01-04", end_time="2010-01-05", method="last")) 87.433578 Parameters ---------- ts_feature : Union[pd.DataFrame, pd.Series] Raw time-series feature to be resampled start_time : Union[str, pd.Timestamp], optional start sampling time, by default None end_time : Union[str, pd.Timestamp], optional end sampling time, by default None method : Union[str, Callable], optional sample method, apply method function to each stock series data, by default "last" - If type(method) is str or callable function, it should be an attribute of SeriesGroupBy or DataFrameGroupby, and applies groupy.method for the sliced time-series data - If method is None, do nothing for the sliced time-series data. method_kwargs : dict, optional arguments of method, by default {} Returns ------- The resampled DataFrame/Series/value, return None when the resampled data is empty.
19,701
import numpy as np import pandas as pd from functools import partial from typing import Union, Callable from . import lazy_sort_index from .time import Freq, cal_sam_minute from ..config import C def get_valid_value(series, last=True): """get the first/last not nan value of pd.Series with single level index Parameters ---------- series : pd.Series series should not be empty last : bool, optional whether to get the last valid value, by default True - if last is True, get the last valid value - else, get the first valid value Returns ------- Nan | float the first/last valid value """ return series.fillna(method="ffill").iloc[-1] if last else series.fillna(method="bfill").iloc[0] The provided code snippet includes necessary dependencies for implementing the `_ts_data_valid` function. Write a Python function `def _ts_data_valid(ts_feature, last=False)` to solve the following problem: get the first/last not nan value of pd.Series|DataFrame with single level index Here is the function: def _ts_data_valid(ts_feature, last=False): """get the first/last not nan value of pd.Series|DataFrame with single level index""" if isinstance(ts_feature, pd.DataFrame): return ts_feature.apply(lambda column: get_valid_value(column, last=last)) elif isinstance(ts_feature, pd.Series): return get_valid_value(ts_feature, last=last) else: raise TypeError(f"ts_feature should be pd.DataFrame/Series, not {type(ts_feature)}")
get the first/last not nan value of pd.Series|DataFrame with single level index
19,702
import os import numpy as np import pandas as pd from pathlib import Path DATA_PATH = Path(os.path.join("data", "pickle", "backtest")) OUTPUT_PATH = Path(os.path.join("data", "orders")) np.random.seed(1234) np.random.shuffle(stocks) def generate_order(stock: str, start_idx: int, end_idx: int) -> bool: dataset = pd.read_pickle(DATA_PATH / f"{stock}.pkl") df = dataset.handler.fetch(level=None).reset_index() if len(df) == 0 or df.isnull().values.any() or min(df["$volume0"]) < 1e-5: return False df["date"] = df["datetime"].dt.date.astype("datetime64") df = df.set_index(["instrument", "datetime", "date"]) df = df.groupby("date").take(range(start_idx, end_idx)).droplevel(level=0) order_all = pd.DataFrame(df.groupby(level=(2, 0)).mean().dropna()) order_all["amount"] = np.random.lognormal(-3.28, 1.14) * order_all["$volume0"] order_all = order_all[order_all["amount"] > 0.0] order_all["order_type"] = 0 order_all = order_all.drop(columns=["$volume0"]) order_train = order_all[order_all.index.get_level_values(0) <= pd.Timestamp("2021-06-30")] order_test = order_all[order_all.index.get_level_values(0) > pd.Timestamp("2021-06-30")] order_valid = order_test[order_test.index.get_level_values(0) <= pd.Timestamp("2021-09-30")] order_test = order_test[order_test.index.get_level_values(0) > pd.Timestamp("2021-09-30")] for order, tag in zip((order_train, order_valid, order_test, order_all), ("train", "valid", "test", "all")): path = OUTPUT_PATH / tag os.makedirs(path, exist_ok=True) if len(order) > 0: order.to_pickle(path / f"{stock}.pkl.target") return True
null
19,703
import qlib import optuna from qlib.constant import REG_CN from qlib.utils import init_instance_by_config from qlib.tests.data import GetData from qlib.tests.config import get_dataset_config, CSI300_MARKET, DATASET_ALPHA360_CLASS def objective(trial): task = { "model": { "class": "LGBModel", "module_path": "qlib.contrib.model.gbdt", "kwargs": { "loss": "mse", "colsample_bytree": trial.suggest_uniform("colsample_bytree", 0.5, 1), "learning_rate": trial.suggest_uniform("learning_rate", 0, 1), "subsample": trial.suggest_uniform("subsample", 0, 1), "lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 1e4), "lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 1e4), "max_depth": 10, "num_leaves": trial.suggest_int("num_leaves", 1, 1024), "feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0), "bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.4, 1.0), "bagging_freq": trial.suggest_int("bagging_freq", 1, 7), "min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 1, 50), "min_child_samples": trial.suggest_int("min_child_samples", 5, 100), }, }, } evals_result = dict() model = init_instance_by_config(task["model"]) model.fit(dataset, evals_result=evals_result) return min(evals_result["valid"])
null
19,704
import qlib import optuna from qlib.constant import REG_CN from qlib.utils import init_instance_by_config from qlib.tests.config import CSI300_DATASET_CONFIG from qlib.tests.data import GetData def objective(trial): task = { "model": { "class": "LGBModel", "module_path": "qlib.contrib.model.gbdt", "kwargs": { "loss": "mse", "colsample_bytree": trial.suggest_uniform("colsample_bytree", 0.5, 1), "learning_rate": trial.suggest_uniform("learning_rate", 0, 1), "subsample": trial.suggest_uniform("subsample", 0, 1), "lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 1e4), "lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 1e4), "max_depth": 10, "num_leaves": trial.suggest_int("num_leaves", 1, 1024), "feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0), "bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.4, 1.0), "bagging_freq": trial.suggest_int("bagging_freq", 1, 7), "min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 1, 50), "min_child_samples": trial.suggest_int("min_child_samples", 5, 100), }, }, } evals_result = dict() model = init_instance_by_config(task["model"]) model.fit(dataset, evals_result=evals_result) return min(evals_result["valid"])
null
19,705
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData def only_allow_defined_args(function_to_decorate): @functools.wraps(function_to_decorate) def _return_wrapped(*args, **kwargs): """Internal wrapper function.""" argspec = inspect.getfullargspec(function_to_decorate) valid_names = set(argspec.args + argspec.kwonlyargs) if "self" in valid_names: valid_names.remove("self") for arg_name in kwargs: if arg_name not in valid_names: raise ValueError("Unknown argument seen '%s', expected: [%s]" % (arg_name, ", ".join(valid_names))) return function_to_decorate(*args, **kwargs) return _return_wrapped
null
19,706
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData def handler(signum, frame): os.system("kill -9 %d" % os.getpid())
null
19,707
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData def cal_mean_std(results) -> dict: mean_std = dict() for fn in results: mean_std[fn] = dict() for metric in results[fn]: mean = statistics.mean(results[fn][metric]) if len(results[fn][metric]) > 1 else results[fn][metric][0] std = statistics.stdev(results[fn][metric]) if len(results[fn][metric]) > 1 else 0 mean_std[fn][metric] = [mean, std] return mean_std
null
19,708
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData def execute(cmd, wait_when_err=False, raise_err=True): def create_env(): # create env temp_dir = tempfile.mkdtemp() env_path = Path(temp_dir).absolute() sys.stderr.write(f"Creating Virtual Environment with path: {env_path}...\n") execute(f"conda create --prefix {env_path} python=3.7 -y") python_path = env_path / "bin" / "python" # TODO: FIX ME! sys.stderr.write("\n") # get anaconda activate path conda_activate = Path(os.environ["CONDA_PREFIX"]) / "bin" / "activate" # TODO: FIX ME! return temp_dir, env_path, python_path, conda_activate
null
19,709
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData def get_all_folders(models, exclude) -> dict: folders = dict() if isinstance(models, str): model_list = models.split(",") models = [m.lower().strip("[ ]") for m in model_list] elif isinstance(models, list): models = [m.lower() for m in models] elif models is None: models = [f.name.lower() for f in os.scandir("benchmarks")] else: raise ValueError("Input models type is not supported. Please provide str or list without space.") for f in os.scandir("benchmarks"): add = xor(bool(f.name.lower() in models), bool(exclude)) if add: path = Path("benchmarks") / f.name folders[f.name] = str(path.resolve()) return folders
null
19,710
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData def get_all_files(folder_path, dataset, universe="") -> (str, str): if universe != "": universe = f"_{universe}" yaml_path = str(Path(f"{folder_path}") / f"*{dataset}{universe}.yaml") req_path = str(Path(f"{folder_path}") / f"*.txt") yaml_file = glob.glob(yaml_path) req_file = glob.glob(req_path) if len(yaml_file) == 0: return None, None else: return yaml_file[0], req_file[0]
null
19,711
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData R: QlibRecorderWrapper = RecorderWrapper() def get_all_results(folders) -> dict: results = dict() for fn in folders: try: exp = R.get_exp(experiment_name=fn, create=False) except ValueError: # No experiment results continue recorders = exp.list_recorders() result = dict() result["annualized_return_with_cost"] = list() result["information_ratio_with_cost"] = list() result["max_drawdown_with_cost"] = list() result["ic"] = list() result["icir"] = list() result["rank_ic"] = list() result["rank_icir"] = list() for recorder_id in recorders: if recorders[recorder_id].status == "FINISHED": recorder = R.get_recorder(recorder_id=recorder_id, experiment_name=fn) metrics = recorder.list_metrics() if "1day.excess_return_with_cost.annualized_return" not in metrics: print(f"{recorder_id} is skipped due to incomplete result") continue result["annualized_return_with_cost"].append(metrics["1day.excess_return_with_cost.annualized_return"]) result["information_ratio_with_cost"].append(metrics["1day.excess_return_with_cost.information_ratio"]) result["max_drawdown_with_cost"].append(metrics["1day.excess_return_with_cost.max_drawdown"]) result["ic"].append(metrics["IC"]) result["icir"].append(metrics["ICIR"]) result["rank_ic"].append(metrics["Rank IC"]) result["rank_icir"].append(metrics["Rank ICIR"]) results[fn] = result return results
null
19,712
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData def gen_and_save_md_table(metrics, dataset): table = "| Model Name | Dataset | IC | ICIR | Rank IC | Rank ICIR | Annualized Return | Information Ratio | Max Drawdown |\n" table += "|---|---|---|---|---|---|---|---|---|\n" for fn in metrics: ic = metrics[fn]["ic"] icir = metrics[fn]["icir"] ric = metrics[fn]["rank_ic"] ricir = metrics[fn]["rank_icir"] ar = metrics[fn]["annualized_return_with_cost"] ir = metrics[fn]["information_ratio_with_cost"] md = metrics[fn]["max_drawdown_with_cost"] table += f"| {fn} | {dataset} | {ic[0]:5.4f}±{ic[1]:2.2f} | {icir[0]:5.4f}±{icir[1]:2.2f}| {ric[0]:5.4f}±{ric[1]:2.2f} | {ricir[0]:5.4f}±{ricir[1]:2.2f} | {ar[0]:5.4f}±{ar[1]:2.2f} | {ir[0]:5.4f}±{ir[1]:2.2f}| {md[0]:5.4f}±{md[1]:2.2f} |\n" pprint(table) with open("table.md", "w") as f: f.write(table) return table
null
19,713
import os import sys import fire import time import glob import yaml import shutil import signal import inspect import tempfile import functools import statistics import subprocess from datetime import datetime from pathlib import Path from operator import xor from pprint import pprint import qlib from qlib.workflow import R from qlib.tests.data import GetData def gen_yaml_file_without_seed_kwargs(yaml_path, temp_dir): with open(yaml_path, "r") as fp: config = yaml.safe_load(fp) try: del config["task"]["model"]["kwargs"]["seed"] except KeyError: # If the key does not exists, use original yaml # NOTE: it is very important if the model most run in original path(when sys.rel_path is used) return yaml_path else: # otherwise, generating a new yaml without random seed file_name = yaml_path.split("/")[-1] temp_path = os.path.join(temp_dir, file_name) with open(temp_path, "w") as fp: yaml.dump(config, fp) return temp_path
null
19,714
from pathlib import Path from typing import Union import numpy as np import pandas as pd import tensorflow.compat.v1 as tf import data_formatters.base import expt_settings.configs import libs.hyperparam_opt import libs.tft_model import libs.utils as utils import os import datetime as dte from qlib.model.base import ModelFT from qlib.data.dataset import DatasetH from qlib.data.dataset.handler import DataHandlerLP DATASET_SETTING = { "Alpha158": { "feature_col": [ "RESI5", "WVMA5", "RSQR5", "KLEN", "RSQR10", "CORR5", "CORD5", "CORR10", "ROC60", "RESI10", "VSTD5", "RSQR60", "CORR60", "WVMA60", "STD5", "RSQR20", "CORD60", "CORD10", "CORR20", "KLOW", ], "label_col": "LABEL0", }, "Alpha360": { "feature_col": [ "HIGH0", "LOW0", "OPEN0", "CLOSE1", "HIGH1", "VOLUME1", "LOW1", "VOLUME3", "OPEN1", "VOLUME4", "CLOSE2", "CLOSE4", "VOLUME5", "LOW2", "CLOSE3", "VOLUME2", "HIGH2", "LOW4", "VOLUME8", "VOLUME11", ], "label_col": "LABEL0", }, } def fill_test_na(test_df): test_df_res = test_df.copy() feature_cols = ~test_df_res.columns.str.contains("label", case=False) test_feature_fna = test_df_res.loc[:, feature_cols].groupby("datetime").apply(lambda df: df.fillna(df.mean())) test_df_res.loc[:, feature_cols] = test_feature_fna return test_df_res The provided code snippet includes necessary dependencies for implementing the `process_qlib_data` function. Write a Python function `def process_qlib_data(df, dataset, fillna=False)` to solve the following problem: Prepare data to fit the TFT model. Args: df: Original DataFrame. fillna: Whether to fill the data with the mean values. Returns: Transformed DataFrame. Here is the function: def process_qlib_data(df, dataset, fillna=False): """Prepare data to fit the TFT model. Args: df: Original DataFrame. fillna: Whether to fill the data with the mean values. Returns: Transformed DataFrame. """ # Several features selected manually feature_col = DATASET_SETTING[dataset]["feature_col"] label_col = [DATASET_SETTING[dataset]["label_col"]] temp_df = df.loc[:, feature_col + label_col] if fillna: temp_df = fill_test_na(temp_df) temp_df = temp_df.swaplevel() temp_df = temp_df.sort_index() temp_df = temp_df.reset_index(level=0) dates = pd.to_datetime(temp_df.index) temp_df["date"] = dates temp_df["day_of_week"] = dates.dayofweek temp_df["month"] = dates.month temp_df["year"] = dates.year temp_df["const"] = 1.0 return temp_df
Prepare data to fit the TFT model. Args: df: Original DataFrame. fillna: Whether to fill the data with the mean values. Returns: Transformed DataFrame.
19,715
from pathlib import Path from typing import Union import numpy as np import pandas as pd import tensorflow.compat.v1 as tf import data_formatters.base import expt_settings.configs import libs.hyperparam_opt import libs.tft_model import libs.utils as utils import os import datetime as dte from qlib.model.base import ModelFT from qlib.data.dataset import DatasetH from qlib.data.dataset.handler import DataHandlerLP def get_shifted_label(data_df, shifts=5, col_shift="LABEL0"): return data_df[[col_shift]].groupby("instrument").apply(lambda df: df.shift(shifts)) def process_predicted(df, col_name): """Transform the TFT predicted data into Qlib format. Args: df: Original DataFrame. fillna: New column name. Returns: Transformed DataFrame. """ df_res = df.copy() df_res = df_res.rename(columns={"forecast_time": "datetime", "identifier": "instrument", "t+4": col_name}) df_res = df_res.set_index(["datetime", "instrument"]).sort_index() df_res = df_res[[col_name]] return df_res def format_score(forecast_df, col_name="pred", label_shift=5): pred = process_predicted(forecast_df, col_name=col_name) pred = get_shifted_label(pred, shifts=-label_shift, col_shift=col_name) pred = pred.dropna()[col_name] return pred
null
19,716
from pathlib import Path from typing import Union import numpy as np import pandas as pd import tensorflow.compat.v1 as tf import data_formatters.base import expt_settings.configs import libs.hyperparam_opt import libs.tft_model import libs.utils as utils import os import datetime as dte from qlib.model.base import ModelFT from qlib.data.dataset import DatasetH from qlib.data.dataset.handler import DataHandlerLP def transform_df(df, col_name="LABEL0"): df_res = df["feature"] df_res[col_name] = df["label"] return df_res
null
19,717
import os import pathlib import numpy as np import tensorflow as tf from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file The provided code snippet includes necessary dependencies for implementing the `get_single_col_by_input_type` function. Write a Python function `def get_single_col_by_input_type(input_type, column_definition)` to solve the following problem: Returns name of single column. Args: input_type: Input type of column to extract column_definition: Column definition list for experiment Here is the function: def get_single_col_by_input_type(input_type, column_definition): """Returns name of single column. Args: input_type: Input type of column to extract column_definition: Column definition list for experiment """ l = [tup[0] for tup in column_definition if tup[2] == input_type] if len(l) != 1: raise ValueError("Invalid number of columns for {}".format(input_type)) return l[0]
Returns name of single column. Args: input_type: Input type of column to extract column_definition: Column definition list for experiment
19,718
import os import pathlib import numpy as np import tensorflow as tf from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file The provided code snippet includes necessary dependencies for implementing the `extract_cols_from_data_type` function. Write a Python function `def extract_cols_from_data_type(data_type, column_definition, excluded_input_types)` to solve the following problem: Extracts the names of columns that correspond to a define data_type. Args: data_type: DataType of columns to extract. column_definition: Column definition to use. excluded_input_types: Set of input types to exclude Returns: List of names for columns with data type specified. Here is the function: def extract_cols_from_data_type(data_type, column_definition, excluded_input_types): """Extracts the names of columns that correspond to a define data_type. Args: data_type: DataType of columns to extract. column_definition: Column definition to use. excluded_input_types: Set of input types to exclude Returns: List of names for columns with data type specified. """ return [tup[0] for tup in column_definition if tup[1] == data_type and tup[2] not in excluded_input_types]
Extracts the names of columns that correspond to a define data_type. Args: data_type: DataType of columns to extract. column_definition: Column definition to use. excluded_input_types: Set of input types to exclude Returns: List of names for columns with data type specified.
19,719
import os import pathlib import numpy as np import tensorflow as tf from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file The provided code snippet includes necessary dependencies for implementing the `tensorflow_quantile_loss` function. Write a Python function `def tensorflow_quantile_loss(y, y_pred, quantile)` to solve the following problem: Computes quantile loss for tensorflow. Standard quantile loss as defined in the "Training Procedure" section of the main TFT paper Args: y: Targets y_pred: Predictions quantile: Quantile to use for loss calculations (between 0 & 1) Returns: Tensor for quantile loss. Here is the function: def tensorflow_quantile_loss(y, y_pred, quantile): """Computes quantile loss for tensorflow. Standard quantile loss as defined in the "Training Procedure" section of the main TFT paper Args: y: Targets y_pred: Predictions quantile: Quantile to use for loss calculations (between 0 & 1) Returns: Tensor for quantile loss. """ # Checks quantile if quantile < 0 or quantile > 1: raise ValueError("Illegal quantile value={}! Values should be between 0 and 1.".format(quantile)) prediction_underflow = y - y_pred q_loss = quantile * tf.maximum(prediction_underflow, 0.0) + (1.0 - quantile) * tf.maximum( -prediction_underflow, 0.0 ) return tf.reduce_sum(q_loss, axis=-1)
Computes quantile loss for tensorflow. Standard quantile loss as defined in the "Training Procedure" section of the main TFT paper Args: y: Targets y_pred: Predictions quantile: Quantile to use for loss calculations (between 0 & 1) Returns: Tensor for quantile loss.
19,720
import os import pathlib import numpy as np import tensorflow as tf from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file The provided code snippet includes necessary dependencies for implementing the `numpy_normalised_quantile_loss` function. Write a Python function `def numpy_normalised_quantile_loss(y, y_pred, quantile)` to solve the following problem: Computes normalised quantile loss for numpy arrays. Uses the q-Risk metric as defined in the "Training Procedure" section of the main TFT paper. Args: y: Targets y_pred: Predictions quantile: Quantile to use for loss calculations (between 0 & 1) Returns: Float for normalised quantile loss. Here is the function: def numpy_normalised_quantile_loss(y, y_pred, quantile): """Computes normalised quantile loss for numpy arrays. Uses the q-Risk metric as defined in the "Training Procedure" section of the main TFT paper. Args: y: Targets y_pred: Predictions quantile: Quantile to use for loss calculations (between 0 & 1) Returns: Float for normalised quantile loss. """ prediction_underflow = y - y_pred weighted_errors = quantile * np.maximum(prediction_underflow, 0.0) + (1.0 - quantile) * np.maximum( -prediction_underflow, 0.0 ) quantile_loss = weighted_errors.mean() normaliser = y.abs().mean() return 2 * quantile_loss / normaliser
Computes normalised quantile loss for numpy arrays. Uses the q-Risk metric as defined in the "Training Procedure" section of the main TFT paper. Args: y: Targets y_pred: Predictions quantile: Quantile to use for loss calculations (between 0 & 1) Returns: Float for normalised quantile loss.
19,721
import os import pathlib import numpy as np import tensorflow as tf from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file The provided code snippet includes necessary dependencies for implementing the `create_folder_if_not_exist` function. Write a Python function `def create_folder_if_not_exist(directory)` to solve the following problem: Creates folder if it doesn't exist. Args: directory: Folder path to create. Here is the function: def create_folder_if_not_exist(directory): """Creates folder if it doesn't exist. Args: directory: Folder path to create. """ # Also creates directories recursively pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
Creates folder if it doesn't exist. Args: directory: Folder path to create.
19,722
import os import pathlib import numpy as np import tensorflow as tf from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file The provided code snippet includes necessary dependencies for implementing the `get_default_tensorflow_config` function. Write a Python function `def get_default_tensorflow_config(tf_device="gpu", gpu_id=0)` to solve the following problem: Creates tensorflow config for graphs to run on CPU or GPU. Specifies whether to run graph on gpu or cpu and which GPU ID to use for multi GPU machines. Args: tf_device: 'cpu' or 'gpu' gpu_id: GPU ID to use if relevant Returns: Tensorflow config. Here is the function: def get_default_tensorflow_config(tf_device="gpu", gpu_id=0): """Creates tensorflow config for graphs to run on CPU or GPU. Specifies whether to run graph on gpu or cpu and which GPU ID to use for multi GPU machines. Args: tf_device: 'cpu' or 'gpu' gpu_id: GPU ID to use if relevant Returns: Tensorflow config. """ if tf_device == "cpu": os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # for training on cpu tf_config = tf.ConfigProto(log_device_placement=False, device_count={"GPU": 0}) else: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) print("Selecting GPU ID={}".format(gpu_id)) tf_config = tf.ConfigProto(log_device_placement=False) tf_config.gpu_options.allow_growth = True return tf_config
Creates tensorflow config for graphs to run on CPU or GPU. Specifies whether to run graph on gpu or cpu and which GPU ID to use for multi GPU machines. Args: tf_device: 'cpu' or 'gpu' gpu_id: GPU ID to use if relevant Returns: Tensorflow config.
19,723
import os import pathlib import numpy as np import tensorflow as tf from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file The provided code snippet includes necessary dependencies for implementing the `save` function. Write a Python function `def save(tf_session, model_folder, cp_name, scope=None)` to solve the following problem: Saves Tensorflow graph to checkpoint. Saves all trainiable variables under a given variable scope to checkpoint. Args: tf_session: Session containing graph model_folder: Folder to save models cp_name: Name of Tensorflow checkpoint scope: Variable scope containing variables to save Here is the function: def save(tf_session, model_folder, cp_name, scope=None): """Saves Tensorflow graph to checkpoint. Saves all trainiable variables under a given variable scope to checkpoint. Args: tf_session: Session containing graph model_folder: Folder to save models cp_name: Name of Tensorflow checkpoint scope: Variable scope containing variables to save """ # Save model if scope is None: saver = tf.train.Saver() else: var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope) saver = tf.train.Saver(var_list=var_list, max_to_keep=100000) save_path = saver.save(tf_session, os.path.join(model_folder, "{0}.ckpt".format(cp_name))) print("Model saved to: {0}".format(save_path))
Saves Tensorflow graph to checkpoint. Saves all trainiable variables under a given variable scope to checkpoint. Args: tf_session: Session containing graph model_folder: Folder to save models cp_name: Name of Tensorflow checkpoint scope: Variable scope containing variables to save
19,724
import os import pathlib import numpy as np import tensorflow as tf from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file def print_weights_in_checkpoint(model_folder, cp_name): """Prints all weights in Tensorflow checkpoint. Args: model_folder: Folder containing checkpoint cp_name: Name of checkpoint Returns: """ load_path = os.path.join(model_folder, "{0}.ckpt".format(cp_name)) print_tensors_in_checkpoint_file(file_name=load_path, tensor_name="", all_tensors=True, all_tensor_names=True) The provided code snippet includes necessary dependencies for implementing the `load` function. Write a Python function `def load(tf_session, model_folder, cp_name, scope=None, verbose=False)` to solve the following problem: Loads Tensorflow graph from checkpoint. Args: tf_session: Session to load graph into model_folder: Folder containing serialised model cp_name: Name of Tensorflow checkpoint scope: Variable scope to use. verbose: Whether to print additional debugging information. Here is the function: def load(tf_session, model_folder, cp_name, scope=None, verbose=False): """Loads Tensorflow graph from checkpoint. Args: tf_session: Session to load graph into model_folder: Folder containing serialised model cp_name: Name of Tensorflow checkpoint scope: Variable scope to use. verbose: Whether to print additional debugging information. """ # Load model proper load_path = os.path.join(model_folder, "{0}.ckpt".format(cp_name)) print("Loading model from {0}".format(load_path)) print_weights_in_checkpoint(model_folder, cp_name) initial_vars = set([v.name for v in tf.get_default_graph().as_graph_def().node]) # Saver if scope is None: saver = tf.train.Saver() else: var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) saver = tf.train.Saver(var_list=var_list, max_to_keep=100000) # Load saver.restore(tf_session, load_path) all_vars = set([v.name for v in tf.get_default_graph().as_graph_def().node]) if verbose: print("Restored {0}".format(",".join(initial_vars.difference(all_vars)))) print("Existing {0}".format(",".join(all_vars.difference(initial_vars)))) print("All {0}".format(",".join(all_vars))) print("Done.")
Loads Tensorflow graph from checkpoint. Args: tf_session: Session to load graph into model_folder: Folder containing serialised model cp_name: Name of Tensorflow checkpoint scope: Variable scope to use. verbose: Whether to print additional debugging information.
19,725
from __future__ import absolute_import from __future__ import division from __future__ import print_function import gc import json import os import shutil import data_formatters.base import libs.utils as utils import numpy as np import pandas as pd import tensorflow as tf Dense = tf.keras.layers.Dense The provided code snippet includes necessary dependencies for implementing the `apply_mlp` function. Write a Python function `def apply_mlp( inputs, hidden_size, output_size, output_activation=None, hidden_activation="tanh", use_time_distributed=False )` to solve the following problem: Applies simple feed-forward network to an input. Args: inputs: MLP inputs hidden_size: Hidden state size output_size: Output size of MLP output_activation: Activation function to apply on output hidden_activation: Activation function to apply on input use_time_distributed: Whether to apply across time Returns: Tensor for MLP outputs. Here is the function: def apply_mlp( inputs, hidden_size, output_size, output_activation=None, hidden_activation="tanh", use_time_distributed=False ): """Applies simple feed-forward network to an input. Args: inputs: MLP inputs hidden_size: Hidden state size output_size: Output size of MLP output_activation: Activation function to apply on output hidden_activation: Activation function to apply on input use_time_distributed: Whether to apply across time Returns: Tensor for MLP outputs. """ if use_time_distributed: hidden = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(hidden_size, activation=hidden_activation))( inputs ) return tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(output_size, activation=output_activation))(hidden) else: hidden = tf.keras.layers.Dense(hidden_size, activation=hidden_activation)(inputs) return tf.keras.layers.Dense(output_size, activation=output_activation)(hidden)
Applies simple feed-forward network to an input. Args: inputs: MLP inputs hidden_size: Hidden state size output_size: Output size of MLP output_activation: Activation function to apply on output hidden_activation: Activation function to apply on input use_time_distributed: Whether to apply across time Returns: Tensor for MLP outputs.
19,726
from __future__ import absolute_import from __future__ import division from __future__ import print_function import gc import json import os import shutil import data_formatters.base import libs.utils as utils import numpy as np import pandas as pd import tensorflow as tf Dense = tf.keras.layers.Dense Activation = tf.keras.layers.Activation def linear_layer(size, activation=None, use_time_distributed=False, use_bias=True): """Returns simple Keras linear layer. Args: size: Output size activation: Activation function to apply if required use_time_distributed: Whether to apply layer across time use_bias: Whether bias should be included in layer """ linear = tf.keras.layers.Dense(size, activation=activation, use_bias=use_bias) if use_time_distributed: linear = tf.keras.layers.TimeDistributed(linear) return linear def apply_gating_layer(x, hidden_layer_size, dropout_rate=None, use_time_distributed=True, activation=None): """Applies a Gated Linear Unit (GLU) to an input. Args: x: Input to gating layer hidden_layer_size: Dimension of GLU dropout_rate: Dropout rate to apply if any use_time_distributed: Whether to apply across time activation: Activation function to apply to the linear feature transform if necessary Returns: Tuple of tensors for: (GLU output, gate) """ if dropout_rate is not None: x = tf.keras.layers.Dropout(dropout_rate)(x) if use_time_distributed: activation_layer = tf.keras.layers.TimeDistributed( tf.keras.layers.Dense(hidden_layer_size, activation=activation) )(x) gated_layer = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(hidden_layer_size, activation="sigmoid"))(x) else: activation_layer = tf.keras.layers.Dense(hidden_layer_size, activation=activation)(x) gated_layer = tf.keras.layers.Dense(hidden_layer_size, activation="sigmoid")(x) return tf.keras.layers.Multiply()([activation_layer, gated_layer]), gated_layer def add_and_norm(x_list): """Applies skip connection followed by layer normalisation. Args: x_list: List of inputs to sum for skip connection Returns: Tensor output from layer. """ tmp = Add()(x_list) tmp = LayerNorm()(tmp) return tmp The provided code snippet includes necessary dependencies for implementing the `gated_residual_network` function. Write a Python function `def gated_residual_network( x, hidden_layer_size, output_size=None, dropout_rate=None, use_time_distributed=True, additional_context=None, return_gate=False, )` to solve the following problem: Applies the gated residual network (GRN) as defined in paper. Args: x: Network inputs hidden_layer_size: Internal state size output_size: Size of output layer dropout_rate: Dropout rate if dropout is applied use_time_distributed: Whether to apply network across time dimension additional_context: Additional context vector to use if relevant return_gate: Whether to return GLU gate for diagnostic purposes Returns: Tuple of tensors for: (GRN output, GLU gate) Here is the function: def gated_residual_network( x, hidden_layer_size, output_size=None, dropout_rate=None, use_time_distributed=True, additional_context=None, return_gate=False, ): """Applies the gated residual network (GRN) as defined in paper. Args: x: Network inputs hidden_layer_size: Internal state size output_size: Size of output layer dropout_rate: Dropout rate if dropout is applied use_time_distributed: Whether to apply network across time dimension additional_context: Additional context vector to use if relevant return_gate: Whether to return GLU gate for diagnostic purposes Returns: Tuple of tensors for: (GRN output, GLU gate) """ # Setup skip connection if output_size is None: output_size = hidden_layer_size skip = x else: linear = Dense(output_size) if use_time_distributed: linear = tf.keras.layers.TimeDistributed(linear) skip = linear(x) # Apply feedforward network hidden = linear_layer(hidden_layer_size, activation=None, use_time_distributed=use_time_distributed)(x) if additional_context is not None: hidden = hidden + linear_layer( hidden_layer_size, activation=None, use_time_distributed=use_time_distributed, use_bias=False )(additional_context) hidden = tf.keras.layers.Activation("elu")(hidden) hidden = linear_layer(hidden_layer_size, activation=None, use_time_distributed=use_time_distributed)(hidden) gating_layer, gate = apply_gating_layer( hidden, output_size, dropout_rate=dropout_rate, use_time_distributed=use_time_distributed, activation=None ) if return_gate: return add_and_norm([skip, gating_layer]), gate else: return add_and_norm([skip, gating_layer])
Applies the gated residual network (GRN) as defined in paper. Args: x: Network inputs hidden_layer_size: Internal state size output_size: Size of output layer dropout_rate: Dropout rate if dropout is applied use_time_distributed: Whether to apply network across time dimension additional_context: Additional context vector to use if relevant return_gate: Whether to return GLU gate for diagnostic purposes Returns: Tuple of tensors for: (GRN output, GLU gate)
19,727
from __future__ import absolute_import from __future__ import division from __future__ import print_function import gc import json import os import shutil import data_formatters.base import libs.utils as utils import numpy as np import pandas as pd import tensorflow as tf K = tf.keras.backend The provided code snippet includes necessary dependencies for implementing the `get_decoder_mask` function. Write a Python function `def get_decoder_mask(self_attn_inputs)` to solve the following problem: Returns causal mask to apply for self-attention layer. Args: self_attn_inputs: Inputs to self attention layer to determine mask shape Here is the function: def get_decoder_mask(self_attn_inputs): """Returns causal mask to apply for self-attention layer. Args: self_attn_inputs: Inputs to self attention layer to determine mask shape """ len_s = tf.shape(self_attn_inputs)[1] bs = tf.shape(self_attn_inputs)[:1] mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1) return mask
Returns causal mask to apply for self-attention layer. Args: self_attn_inputs: Inputs to self attention layer to determine mask shape
19,728
import os import copy import math import json import collections import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm from qlib.utils import get_or_create_path from qlib.log import get_module_logger from qlib.model.base import Model def evaluate(pred): pred = pred.rank(pct=True) # transform into percentiles score = pred.score label = pred.label diff = score - label MSE = (diff**2).mean() MAE = (diff.abs()).mean() IC = score.corr(label) return {"MSE": MSE, "MAE": MAE, "IC": IC}
null
19,729
import os import copy import math import json import collections import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm from qlib.utils import get_or_create_path from qlib.log import get_module_logger from qlib.model.base import Model def average_params(params_list): assert isinstance(params_list, (tuple, list, collections.deque)) n = len(params_list) if n == 1: return params_list[0] new_params = collections.OrderedDict() keys = None for i, params in enumerate(params_list): if keys is None: keys = params.keys() for k, v in params.items(): if k not in keys: raise ValueError("the %d-th model has different params" % i) if k not in new_params: new_params[k] = v / n else: new_params[k] += v / n return new_params
null
19,730
import os import copy import math import json import collections import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm from qlib.utils import get_or_create_path from qlib.log import get_module_logger from qlib.model.base import Model def shoot_infs(inp_tensor): """Replaces inf by maximum of tensor""" mask_inf = torch.isinf(inp_tensor) ind_inf = torch.nonzero(mask_inf, as_tuple=False) if len(ind_inf) > 0: for ind in ind_inf: if len(ind) == 2: inp_tensor[ind[0], ind[1]] = 0 elif len(ind) == 1: inp_tensor[ind[0]] = 0 m = torch.max(inp_tensor) for ind in ind_inf: if len(ind) == 2: inp_tensor[ind[0], ind[1]] = m elif len(ind) == 1: inp_tensor[ind[0]] = m return inp_tensor def sinkhorn(Q, n_iters=3, epsilon=0.01): # epsilon should be adjusted according to logits value's scale with torch.no_grad(): Q = shoot_infs(Q) Q = torch.exp(Q / epsilon) for i in range(n_iters): Q /= Q.sum(dim=0, keepdim=True) Q /= Q.sum(dim=1, keepdim=True) return Q
null
19,731
import copy import torch import numpy as np import pandas as pd from qlib.data.dataset import DatasetH device = "cuda" if torch.cuda.is_available() else "cpu" def _to_tensor(x): if not isinstance(x, torch.Tensor): return torch.tensor(x, dtype=torch.float, device=device) return x
null
19,732
import copy import torch import numpy as np import pandas as pd from qlib.data.dataset import DatasetH The provided code snippet includes necessary dependencies for implementing the `_create_ts_slices` function. Write a Python function `def _create_ts_slices(index, seq_len)` to solve the following problem: create time series slices from pandas index Args: index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order seq_len (int): sequence length Here is the function: def _create_ts_slices(index, seq_len): """ create time series slices from pandas index Args: index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order seq_len (int): sequence length """ assert index.is_lexsorted(), "index should be sorted" # number of dates for each code sample_count_by_codes = pd.Series(0, index=index).groupby(level=0).size().values # start_index for each code start_index_of_codes = np.roll(np.cumsum(sample_count_by_codes), 1) start_index_of_codes[0] = 0 # all the [start, stop) indices of features # features btw [start, stop) are used to predict the `stop - 1` label slices = [] for cur_loc, cur_cnt in zip(start_index_of_codes, sample_count_by_codes): for stop in range(1, cur_cnt + 1): end = cur_loc + stop start = max(end - seq_len, 0) slices.append(slice(start, end)) slices = np.array(slices) return slices
create time series slices from pandas index Args: index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order seq_len (int): sequence length
19,733
import copy import torch import numpy as np import pandas as pd from qlib.data.dataset import DatasetH The provided code snippet includes necessary dependencies for implementing the `_get_date_parse_fn` function. Write a Python function `def _get_date_parse_fn(target)` to solve the following problem: get date parse function This method is used to parse date arguments as target type. Example: get_date_parse_fn('20120101')('2017-01-01') => '20170101' get_date_parse_fn(20120101)('2017-01-01') => 20170101 Here is the function: def _get_date_parse_fn(target): """get date parse function This method is used to parse date arguments as target type. Example: get_date_parse_fn('20120101')('2017-01-01') => '20170101' get_date_parse_fn(20120101)('2017-01-01') => 20170101 """ if isinstance(target, pd.Timestamp): _fn = lambda x: pd.Timestamp(x) # Timestamp('2020-01-01') elif isinstance(target, str) and len(target) == 8: _fn = lambda x: str(x).replace("-", "")[:8] # '20200201' elif isinstance(target, int): _fn = lambda x: int(str(x).replace("-", "")[:8]) # 20200201 else: _fn = lambda x: x return _fn
get date parse function This method is used to parse date arguments as target type. Example: get_date_parse_fn('20120101')('2017-01-01') => '20170101' get_date_parse_fn(20120101)('2017-01-01') => 20170101
19,734
import os import numpy as np import pandas as pd from qlib.data import D from qlib.model.riskmodel import StructuredCovEstimator def prepare_data(riskdata_root="./riskdata", T=240, start_time="2016-01-01"): universe = D.features(D.instruments("csi300"), ["$close"], start_time=start_time).swaplevel().sort_index() price_all = ( D.features(D.instruments("all"), ["$close"], start_time=start_time).squeeze().unstack(level="instrument") ) # StructuredCovEstimator is a statistical risk model riskmodel = StructuredCovEstimator() for i in range(T - 1, len(price_all)): date = price_all.index[i] ref_date = price_all.index[i - T + 1] print(date) codes = universe.loc[date].index price = price_all.loc[ref_date:date, codes] # calculate return and remove extreme return ret = price.pct_change() ret.clip(ret.quantile(0.025), ret.quantile(0.975), axis=1, inplace=True) # run risk model F, cov_b, var_u = riskmodel.predict(ret, is_price=False, return_decomposed_components=True) # save risk data root = riskdata_root + "/" + date.strftime("%Y%m%d") os.makedirs(root, exist_ok=True) pd.DataFrame(F, index=codes).to_pickle(root + "/factor_exp.pkl") pd.DataFrame(cov_b).to_pickle(root + "/factor_cov.pkl") # for specific_risk we follow the convention to save volatility pd.Series(np.sqrt(var_u), index=codes).to_pickle(root + "/specific_risk.pkl")
null
19,735
from datetime import date, datetime as dt import os from pathlib import Path import random import shutil import time import traceback from arctic import Arctic, chunkstore import arctic from arctic import Arctic, CHUNK_STORE from arctic.chunkstore.chunkstore import CHUNK_SIZE import fire from joblib import Parallel, delayed, parallel import numpy as np import pandas as pd from pandas import DataFrame from pandas.core.indexes.datetimes import date_range from pymongo.mongo_client import MongoClient def is_stock(exchange_place, code): if exchange_place == "SH" and code[0] != "6": return False if exchange_place == "SZ" and code[0] != "0" and code[:2] != "30": return False return True
null
19,736
from datetime import date, datetime as dt import os from pathlib import Path import random import shutil import time import traceback from arctic import Arctic, chunkstore import arctic from arctic import Arctic, CHUNK_STORE from arctic.chunkstore.chunkstore import CHUNK_SIZE import fire from joblib import Parallel, delayed, parallel import numpy as np import pandas as pd from pandas import DataFrame from pandas.core.indexes.datetimes import date_range from pymongo.mongo_client import MongoClient N_JOBS = -1 LOG_FILE_PATH = DIRNAME / "log_file" DATA_PATH = DIRNAME / "raw_data" DATABASE_PATH = DIRNAME / "orig_data" DATA_INFO_PATH = DIRNAME / "data_info" DATA_FINISH_INFO_PATH = DIRNAME / "./data_finish_info" DOC_TYPE = ["Tick", "Order", "OrderQueue", "Transaction", "Day", "Minute"] def add_one_stock_daily_data_wrapper(filepath, type, exchange_place, index, date): pid = os.getpid() code = os.path.split(filepath)[-1].split(".csv")[0] arc = Arctic(ARCTIC_SRV) try: if index % 100 == 0: print("index = {}, filepath = {}".format(index, filepath)) error_index_list = add_one_stock_daily_data(filepath, type, exchange_place, arc, date) if error_index_list is not None and len(error_index_list) > 0: f = open(os.path.join(LOG_FILE_PATH, "temp_timestamp_error_{0}_{1}_{2}.txt".format(pid, date, type)), "a+") f.write("{}, {}, {}\n".format(filepath, error_index_list, exchange_place + "_" + code)) f.close() except Exception as e: info = traceback.format_exc() print("error:" + str(e)) f = open(os.path.join(LOG_FILE_PATH, "temp_fail_{0}_{1}_{2}.txt".format(pid, date, type)), "a+") f.write("fail:" + str(filepath) + "\n" + str(e) + "\n" + str(info) + "\n") f.close() finally: arc.reset() def add_data(tick_date, doc_type, stock_name_dict): pid = os.getpid() if doc_type not in DOC_TYPE: print("doc_type not in {}".format(DOC_TYPE)) return try: begin_time = time.time() os.system(f"cp {DATABASE_PATH}/{tick_date + '_{}.tar.gz'.format(doc_type)} {DATA_PATH}/") os.system( f"tar -xvzf {DATA_PATH}/{tick_date + '_{}.tar.gz'.format(doc_type)} -C {DATA_PATH}/ {tick_date + '_' + doc_type}/SH" ) os.system( f"tar -xvzf {DATA_PATH}/{tick_date + '_{}.tar.gz'.format(doc_type)} -C {DATA_PATH}/ {tick_date + '_' + doc_type}/SZ" ) os.system(f"chmod 777 {DATA_PATH}") os.system(f"chmod 777 {DATA_PATH}/{tick_date + '_' + doc_type}") os.system(f"chmod 777 {DATA_PATH}/{tick_date + '_' + doc_type}/SH") os.system(f"chmod 777 {DATA_PATH}/{tick_date + '_' + doc_type}/SZ") os.system(f"chmod 777 {DATA_PATH}/{tick_date + '_' + doc_type}/SH/{tick_date}") os.system(f"chmod 777 {DATA_PATH}/{tick_date + '_' + doc_type}/SZ/{tick_date}") print("tick_date={}".format(tick_date)) temp_data_path_sh = os.path.join(DATA_PATH, tick_date + "_" + doc_type, "SH", tick_date) temp_data_path_sz = os.path.join(DATA_PATH, tick_date + "_" + doc_type, "SZ", tick_date) is_files_exist = {"sh": os.path.exists(temp_data_path_sh), "sz": os.path.exists(temp_data_path_sz)} sz_files = ( ( set([i.split(".csv")[0] for i in os.listdir(temp_data_path_sz) if i[:2] == "30" or i[0] == "0"]) & set(stock_name_dict["SZ"]) ) if is_files_exist["sz"] else set() ) sz_file_nums = len(sz_files) if is_files_exist["sz"] else 0 sh_files = ( ( set([i.split(".csv")[0] for i in os.listdir(temp_data_path_sh) if i[0] == "6"]) & set(stock_name_dict["SH"]) ) if is_files_exist["sh"] else set() ) sh_file_nums = len(sh_files) if is_files_exist["sh"] else 0 print("sz_file_nums:{}, sh_file_nums:{}".format(sz_file_nums, sh_file_nums)) f = (DATA_INFO_PATH / "data_info_log_{}_{}".format(doc_type, tick_date)).open("w+") f.write("sz:{}, sh:{}, date:{}:".format(sz_file_nums, sh_file_nums, tick_date) + "\n") f.close() if sh_file_nums > 0: # write is not thread-safe, update may be thread-safe Parallel(n_jobs=N_JOBS)( delayed(add_one_stock_daily_data_wrapper)( os.path.join(temp_data_path_sh, name + ".csv"), doc_type, "SH", index, tick_date ) for index, name in enumerate(list(sh_files)) ) if sz_file_nums > 0: # write is not thread-safe, update may be thread-safe Parallel(n_jobs=N_JOBS)( delayed(add_one_stock_daily_data_wrapper)( os.path.join(temp_data_path_sz, name + ".csv"), doc_type, "SZ", index, tick_date ) for index, name in enumerate(list(sz_files)) ) os.system(f"rm -f {DATA_PATH}/{tick_date + '_{}.tar.gz'.format(doc_type)}") os.system(f"rm -rf {DATA_PATH}/{tick_date + '_' + doc_type}") total_time = time.time() - begin_time f = (DATA_FINISH_INFO_PATH / "data_info_finish_log_{}_{}".format(doc_type, tick_date)).open("w+") f.write("finish: date:{}, consume_time:{}, end_time: {}".format(tick_date, total_time, time.time()) + "\n") f.close() except Exception as e: info = traceback.format_exc() print("date error:" + str(e)) f = open(os.path.join(LOG_FILE_PATH, "temp_fail_{0}_{1}_{2}.txt".format(pid, tick_date, doc_type)), "a+") f.write("fail:" + str(tick_date) + "\n" + str(e) + "\n" + str(info) + "\n") f.close()
null
19,737
import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) plt.rcParams["font.sans-serif"] = "SimHei" plt.rcParams["axes.unicode_minus"] = False from tqdm.auto import tqdm plt.figure(figsize=(40, 20)) sns.heatmap(data_sim) plt.figure(figsize=(40, 20)) sns.heatmap(data_sim.rolling(20).mean()) from qlib import auto_init from qlib.workflow import R pd.DataFrame(meta_m.tn.twm.linear.weight.detach().numpy()).T[0].plot() pd.DataFrame(meta_m.tn.twm.linear.weight.detach().numpy()).T[0].rolling(5).mean().plot() for t in tasks: test_seg = t["dataset"]["kwargs"]["segments"]["test"] if None not in test_seg: # The last rolling is skipped. task_df[test_seg] = t["reweighter"].time_weight plt.figure(figsize=(40, 20)) sns.heatmap(task_df.T) plt.figure(figsize=(40, 20)) sns.heatmap(task_df.rolling(10).mean().T) def show_linear_weight(exp): coef_df = {} for r in exp.list_recorders("list"): t = r.load_object("task") if None in t["dataset"]["kwargs"]["segments"]["test"]: continue m = r.load_object("params.pkl") coef_df[t["dataset"]["kwargs"]["segments"]["test"]] = pd.Series(m.coef_) coef_df = pd.concat(coef_df) coef_df.index.names = ["test_start", "test_end", "coef_idx"] coef_df = coef_df.droplevel("test_end").unstack("coef_idx").T plt.figure(figsize=(40, 20)) sns.heatmap(coef_df) plt.show()
null
19,738
import abc import importlib from pathlib import Path from typing import Union, Iterable, List import fire import numpy as np import pandas as pd import baostock as bs from loguru import logger The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(qlib_dir: Union[str, Path], region: str = "cn", start_date: str = None, end_date: str = None)` to solve the following problem: Collect future calendar(day) Parameters ---------- qlib_dir: qlib data directory region: cn/CN or us/US start_date start date end_date end date Examples ------- # get cn future calendar $ python future_calendar_collector.py --qlib_data_1d_dir <user data dir> --region cn Here is the function: def run(qlib_dir: Union[str, Path], region: str = "cn", start_date: str = None, end_date: str = None): """Collect future calendar(day) Parameters ---------- qlib_dir: qlib data directory region: cn/CN or us/US start_date start date end_date end date Examples ------- # get cn future calendar $ python future_calendar_collector.py --qlib_data_1d_dir <user data dir> --region cn """ logger.info(f"collector future calendar: region={region}") _cur_module = importlib.import_module("future_calendar_collector") _class = getattr(_cur_module, f"CollectorFutureCalendar{region.upper()}") collector = _class(qlib_dir=qlib_dir, start_date=start_date, end_date=end_date) collector.write_calendar(collector.collector())
Collect future calendar(day) Parameters ---------- qlib_dir: qlib data directory region: cn/CN or us/US start_date start date end_date end date Examples ------- # get cn future calendar $ python future_calendar_collector.py --qlib_data_1d_dir <user data dir> --region cn
19,739
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup SZSE_CALENDAR_URL = "http://www.szse.cn/api/report/exchange/onepersistenthour/monthList?month={month}&random={random}" CALENDAR_BENCH_URL_MAP = { "CSI300": CALENDAR_URL_BASE.format(market=1, bench_code="000300"), "CSI500": CALENDAR_URL_BASE.format(market=1, bench_code="000905"), "CSI100": CALENDAR_URL_BASE.format(market=1, bench_code="000903"), # NOTE: Use the time series of SH600000 as the sequence of all stocks "ALL": CALENDAR_URL_BASE.format(market=1, bench_code="000905"), # NOTE: Use the time series of ^GSPC(SP500) as the sequence of all stocks "US_ALL": "^GSPC", "IN_ALL": "^NSEI", "BR_ALL": "^BVSP", } _CALENDAR_MAP = {} def deco_retry(retry: int = 5, retry_sleep: int = 3): def deco_func(func): def wrapper(*args, **kwargs): _retry = 5 if callable(retry) else retry _result = None for _i in range(1, _retry + 1): try: _result = func(*args, **kwargs) break except Exception as e: logger.warning(f"{func.__name__}: {_i} :{e}") if _i == _retry: raise time.sleep(retry_sleep) return _result return wrapper return deco_func(retry) if callable(retry) else deco_func The provided code snippet includes necessary dependencies for implementing the `get_calendar_list` function. Write a Python function `def get_calendar_list(bench_code="CSI300") -> List[pd.Timestamp]` to solve the following problem: get SH/SZ history calendar list Parameters ---------- bench_code: str value from ["CSI300", "CSI500", "ALL", "US_ALL"] Returns ------- history calendar list Here is the function: def get_calendar_list(bench_code="CSI300") -> List[pd.Timestamp]: """get SH/SZ history calendar list Parameters ---------- bench_code: str value from ["CSI300", "CSI500", "ALL", "US_ALL"] Returns ------- history calendar list """ logger.info(f"get calendar list: {bench_code}......") def _get_calendar(url): _value_list = requests.get(url, timeout=None).json()["data"]["klines"] return sorted(map(lambda x: pd.Timestamp(x.split(",")[0]), _value_list)) calendar = _CALENDAR_MAP.get(bench_code, None) if calendar is None: if bench_code.startswith("US_") or bench_code.startswith("IN_") or bench_code.startswith("BR_"): print(Ticker(CALENDAR_BENCH_URL_MAP[bench_code])) print(Ticker(CALENDAR_BENCH_URL_MAP[bench_code]).history(interval="1d", period="max")) df = Ticker(CALENDAR_BENCH_URL_MAP[bench_code]).history(interval="1d", period="max") calendar = df.index.get_level_values(level="date").map(pd.Timestamp).unique().tolist() else: if bench_code.upper() == "ALL": @deco_retry def _get_calendar(month): _cal = [] try: resp = requests.get( SZSE_CALENDAR_URL.format(month=month, random=random.random), timeout=None ).json() for _r in resp["data"]: if int(_r["jybz"]): _cal.append(pd.Timestamp(_r["jyrq"])) except Exception as e: raise ValueError(f"{month}-->{e}") from e return _cal month_range = pd.date_range(start="2000-01", end=pd.Timestamp.now() + pd.Timedelta(days=31), freq="M") calendar = [] for _m in month_range: cal = _get_calendar(_m.strftime("%Y-%m")) if cal: calendar += cal calendar = list(filter(lambda x: x <= pd.Timestamp.now(), calendar)) else: calendar = _get_calendar(CALENDAR_BENCH_URL_MAP[bench_code]) _CALENDAR_MAP[bench_code] = calendar logger.info(f"end of get calendar list: {bench_code}.") return calendar
get SH/SZ history calendar list Parameters ---------- bench_code: str value from ["CSI300", "CSI500", "ALL", "US_ALL"] Returns ------- history calendar list
19,740
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup def return_date_list(date_field_name: str, file_path: Path): date_list = pd.read_csv(file_path, sep=",", index_col=0)[date_field_name].to_list() return sorted([pd.Timestamp(x) for x in date_list]) The provided code snippet includes necessary dependencies for implementing the `get_calendar_list_by_ratio` function. Write a Python function `def get_calendar_list_by_ratio( source_dir: [str, Path], date_field_name: str = "date", threshold: float = 0.5, minimum_count: int = 10, max_workers: int = 16, ) -> list` to solve the following problem: get calendar list by selecting the date when few funds trade in this day Parameters ---------- source_dir: str or Path The directory where the raw data collected from the Internet is saved date_field_name: str date field name, default is date threshold: float threshold to exclude some days when few funds trade in this day, default 0.5 minimum_count: int minimum count of funds should trade in one day max_workers: int Concurrent number, default is 16 Returns ------- history calendar list Here is the function: def get_calendar_list_by_ratio( source_dir: [str, Path], date_field_name: str = "date", threshold: float = 0.5, minimum_count: int = 10, max_workers: int = 16, ) -> list: """get calendar list by selecting the date when few funds trade in this day Parameters ---------- source_dir: str or Path The directory where the raw data collected from the Internet is saved date_field_name: str date field name, default is date threshold: float threshold to exclude some days when few funds trade in this day, default 0.5 minimum_count: int minimum count of funds should trade in one day max_workers: int Concurrent number, default is 16 Returns ------- history calendar list """ logger.info(f"get calendar list from {source_dir} by threshold = {threshold}......") source_dir = Path(source_dir).expanduser() file_list = list(source_dir.glob("*.csv")) _number_all_funds = len(file_list) logger.info(f"count how many funds trade in this day......") _dict_count_trade = dict() # dict{date:count} _fun = partial(return_date_list, date_field_name) all_oldest_list = [] with tqdm(total=_number_all_funds) as p_bar: with ProcessPoolExecutor(max_workers=max_workers) as executor: for date_list in executor.map(_fun, file_list): if date_list: all_oldest_list.append(date_list[0]) for date in date_list: if date not in _dict_count_trade: _dict_count_trade[date] = 0 _dict_count_trade[date] += 1 p_bar.update() logger.info(f"count how many funds have founded in this day......") _dict_count_founding = {date: _number_all_funds for date in _dict_count_trade} # dict{date:count} with tqdm(total=_number_all_funds) as p_bar: for oldest_date in all_oldest_list: for date in _dict_count_founding.keys(): if date < oldest_date: _dict_count_founding[date] -= 1 calendar = [ date for date, count in _dict_count_trade.items() if count >= max(int(count * threshold), minimum_count) ] return calendar
get calendar list by selecting the date when few funds trade in this day Parameters ---------- source_dir: str or Path The directory where the raw data collected from the Internet is saved date_field_name: str date field name, default is date threshold: float threshold to exclude some days when few funds trade in this day, default 0.5 minimum_count: int minimum count of funds should trade in one day max_workers: int Concurrent number, default is 16 Returns ------- history calendar list
19,741
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup HS_SYMBOLS_URL = "http://app.finance.ifeng.com/hq/list.php?type=stock_a&class={s_type}" _HS_SYMBOLS = None MINIMUM_SYMBOLS_NUM = 3900 The provided code snippet includes necessary dependencies for implementing the `get_hs_stock_symbols` function. Write a Python function `def get_hs_stock_symbols() -> list` to solve the following problem: get SH/SZ stock symbols Returns ------- stock symbols Here is the function: def get_hs_stock_symbols() -> list: """get SH/SZ stock symbols Returns ------- stock symbols """ global _HS_SYMBOLS # pylint: disable=W0603 def _get_symbol(): _res = set() for _k, _v in (("ha", "ss"), ("sa", "sz"), ("gem", "sz")): resp = requests.get(HS_SYMBOLS_URL.format(s_type=_k), timeout=None) _res |= set( map( lambda x: "{}.{}".format(re.findall(r"\d+", x)[0], _v), # pylint: disable=W0640 etree.HTML(resp.text).xpath("//div[@class='result']/ul//li/a/text()"), # pylint: disable=I1101 ) ) time.sleep(3) return _res if _HS_SYMBOLS is None: symbols = set() _retry = 60 # It may take multiple times to get the complete while len(symbols) < MINIMUM_SYMBOLS_NUM: symbols |= _get_symbol() time.sleep(3) symbol_cache_path = Path("~/.cache/hs_symbols_cache.pkl").expanduser().resolve() symbol_cache_path.parent.mkdir(parents=True, exist_ok=True) if symbol_cache_path.exists(): with symbol_cache_path.open("rb") as fp: cache_symbols = pickle.load(fp) symbols |= cache_symbols with symbol_cache_path.open("wb") as fp: pickle.dump(symbols, fp) _HS_SYMBOLS = sorted(list(symbols)) return _HS_SYMBOLS
get SH/SZ stock symbols Returns ------- stock symbols
19,742
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup _US_SYMBOLS = None def deco_retry(retry: int = 5, retry_sleep: int = 3): def deco_func(func): def wrapper(*args, **kwargs): _retry = 5 if callable(retry) else retry _result = None for _i in range(1, _retry + 1): try: _result = func(*args, **kwargs) break except Exception as e: logger.warning(f"{func.__name__}: {_i} :{e}") if _i == _retry: raise time.sleep(retry_sleep) return _result return wrapper return deco_func(retry) if callable(retry) else deco_func The provided code snippet includes necessary dependencies for implementing the `get_us_stock_symbols` function. Write a Python function `def get_us_stock_symbols(qlib_data_path: [str, Path] = None) -> list` to solve the following problem: get US stock symbols Returns ------- stock symbols Here is the function: def get_us_stock_symbols(qlib_data_path: [str, Path] = None) -> list: """get US stock symbols Returns ------- stock symbols """ global _US_SYMBOLS # pylint: disable=W0603 @deco_retry def _get_eastmoney(): url = "http://4.push2.eastmoney.com/api/qt/clist/get?pn=1&pz=10000&fs=m:105,m:106,m:107&fields=f12" resp = requests.get(url, timeout=None) if resp.status_code != 200: raise ValueError("request error") try: _symbols = [_v["f12"].replace("_", "-P") for _v in resp.json()["data"]["diff"].values()] except Exception as e: logger.warning(f"request error: {e}") raise if len(_symbols) < 8000: raise ValueError("request error") return _symbols @deco_retry def _get_nasdaq(): _res_symbols = [] for _name in ["otherlisted", "nasdaqtraded"]: url = f"ftp://ftp.nasdaqtrader.com/SymbolDirectory/{_name}.txt" df = pd.read_csv(url, sep="|") df = df.rename(columns={"ACT Symbol": "Symbol"}) _symbols = df["Symbol"].dropna() _symbols = _symbols.str.replace("$", "-P", regex=False) _symbols = _symbols.str.replace(".W", "-WT", regex=False) _symbols = _symbols.str.replace(".U", "-UN", regex=False) _symbols = _symbols.str.replace(".R", "-RI", regex=False) _symbols = _symbols.str.replace(".", "-", regex=False) _res_symbols += _symbols.unique().tolist() return _res_symbols @deco_retry def _get_nyse(): url = "https://www.nyse.com/api/quotes/filter" _parms = { "instrumentType": "EQUITY", "pageNumber": 1, "sortColumn": "NORMALIZED_TICKER", "sortOrder": "ASC", "maxResultsPerPage": 10000, "filterToken": "", } resp = requests.post(url, json=_parms, timeout=None) if resp.status_code != 200: raise ValueError("request error") try: _symbols = [_v["symbolTicker"].replace("-", "-P") for _v in resp.json()] except Exception as e: logger.warning(f"request error: {e}") _symbols = [] return _symbols if _US_SYMBOLS is None: _all_symbols = _get_eastmoney() + _get_nasdaq() + _get_nyse() if qlib_data_path is not None: for _index in ["nasdaq100", "sp500"]: ins_df = pd.read_csv( Path(qlib_data_path).joinpath(f"instruments/{_index}.txt"), sep="\t", names=["symbol", "start_date", "end_date"], ) _all_symbols += ins_df["symbol"].unique().tolist() def _format(s_): s_ = s_.replace(".", "-") s_ = s_.strip("$") s_ = s_.strip("*") return s_ _US_SYMBOLS = sorted(set(map(_format, filter(lambda x: len(x) < 8 and not x.endswith("WS"), _all_symbols)))) return _US_SYMBOLS
get US stock symbols Returns ------- stock symbols
19,743
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup _IN_SYMBOLS = None def deco_retry(retry: int = 5, retry_sleep: int = 3): def deco_func(func): def wrapper(*args, **kwargs): _retry = 5 if callable(retry) else retry _result = None for _i in range(1, _retry + 1): try: _result = func(*args, **kwargs) break except Exception as e: logger.warning(f"{func.__name__}: {_i} :{e}") if _i == _retry: raise time.sleep(retry_sleep) return _result return wrapper return deco_func(retry) if callable(retry) else deco_func The provided code snippet includes necessary dependencies for implementing the `get_in_stock_symbols` function. Write a Python function `def get_in_stock_symbols(qlib_data_path: [str, Path] = None) -> list` to solve the following problem: get IN stock symbols Returns ------- stock symbols Here is the function: def get_in_stock_symbols(qlib_data_path: [str, Path] = None) -> list: """get IN stock symbols Returns ------- stock symbols """ global _IN_SYMBOLS # pylint: disable=W0603 @deco_retry def _get_nifty(): url = f"https://www1.nseindia.com/content/equities/EQUITY_L.csv" df = pd.read_csv(url) df = df.rename(columns={"SYMBOL": "Symbol"}) df["Symbol"] = df["Symbol"] + ".NS" _symbols = df["Symbol"].dropna() _symbols = _symbols.unique().tolist() return _symbols if _IN_SYMBOLS is None: _all_symbols = _get_nifty() if qlib_data_path is not None: for _index in ["nifty"]: ins_df = pd.read_csv( Path(qlib_data_path).joinpath(f"instruments/{_index}.txt"), sep="\t", names=["symbol", "start_date", "end_date"], ) _all_symbols += ins_df["symbol"].unique().tolist() def _format(s_): s_ = s_.replace(".", "-") s_ = s_.strip("$") s_ = s_.strip("*") return s_ _IN_SYMBOLS = sorted(set(_all_symbols)) return _IN_SYMBOLS
get IN stock symbols Returns ------- stock symbols
19,744
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup _BR_SYMBOLS = None def deco_retry(retry: int = 5, retry_sleep: int = 3): def deco_func(func): def wrapper(*args, **kwargs): _retry = 5 if callable(retry) else retry _result = None for _i in range(1, _retry + 1): try: _result = func(*args, **kwargs) break except Exception as e: logger.warning(f"{func.__name__}: {_i} :{e}") if _i == _retry: raise time.sleep(retry_sleep) return _result return wrapper return deco_func(retry) if callable(retry) else deco_func The provided code snippet includes necessary dependencies for implementing the `get_br_stock_symbols` function. Write a Python function `def get_br_stock_symbols(qlib_data_path: [str, Path] = None) -> list` to solve the following problem: get Brazil(B3) stock symbols Returns ------- B3 stock symbols Here is the function: def get_br_stock_symbols(qlib_data_path: [str, Path] = None) -> list: """get Brazil(B3) stock symbols Returns ------- B3 stock symbols """ global _BR_SYMBOLS # pylint: disable=W0603 @deco_retry def _get_ibovespa(): _symbols = [] url = "https://www.fundamentus.com.br/detalhes.php?papel=" # Request agent = {"User-Agent": "Mozilla/5.0"} page = requests.get(url, headers=agent, timeout=None) # BeautifulSoup soup = BeautifulSoup(page.content, "html.parser") tbody = soup.find("tbody") children = tbody.findChildren("a", recursive=True) for child in children: _symbols.append(str(child).rsplit('"', maxsplit=1)[-1].split(">")[1].split("<")[0]) return _symbols if _BR_SYMBOLS is None: _all_symbols = _get_ibovespa() if qlib_data_path is not None: for _index in ["ibov"]: ins_df = pd.read_csv( Path(qlib_data_path).joinpath(f"instruments/{_index}.txt"), sep="\t", names=["symbol", "start_date", "end_date"], ) _all_symbols += ins_df["symbol"].unique().tolist() def _format(s_): s_ = s_.strip() s_ = s_.strip("$") s_ = s_.strip("*") s_ = s_ + ".SA" return s_ _BR_SYMBOLS = sorted(set(map(_format, _all_symbols))) return _BR_SYMBOLS
get Brazil(B3) stock symbols Returns ------- B3 stock symbols
19,745
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup _EN_FUND_SYMBOLS = None def deco_retry(retry: int = 5, retry_sleep: int = 3): def deco_func(func): def wrapper(*args, **kwargs): _retry = 5 if callable(retry) else retry _result = None for _i in range(1, _retry + 1): try: _result = func(*args, **kwargs) break except Exception as e: logger.warning(f"{func.__name__}: {_i} :{e}") if _i == _retry: raise time.sleep(retry_sleep) return _result return wrapper return deco_func(retry) if callable(retry) else deco_func The provided code snippet includes necessary dependencies for implementing the `get_en_fund_symbols` function. Write a Python function `def get_en_fund_symbols(qlib_data_path: [str, Path] = None) -> list` to solve the following problem: get en fund symbols Returns ------- fund symbols in China Here is the function: def get_en_fund_symbols(qlib_data_path: [str, Path] = None) -> list: """get en fund symbols Returns ------- fund symbols in China """ global _EN_FUND_SYMBOLS # pylint: disable=W0603 @deco_retry def _get_eastmoney(): url = "http://fund.eastmoney.com/js/fundcode_search.js" resp = requests.get(url, timeout=None) if resp.status_code != 200: raise ValueError("request error") try: _symbols = [] for sub_data in re.findall(r"[\[](.*?)[\]]", resp.content.decode().split("= [")[-1].replace("];", "")): data = sub_data.replace('"', "").replace("'", "") # TODO: do we need other information, like fund_name from ['000001', 'HXCZHH', '华夏成长混合', '混合型', 'HUAXIACHENGZHANGHUNHE'] _symbols.append(data.split(",")[0]) except Exception as e: logger.warning(f"request error: {e}") raise if len(_symbols) < 8000: raise ValueError("request error") return _symbols if _EN_FUND_SYMBOLS is None: _all_symbols = _get_eastmoney() _EN_FUND_SYMBOLS = sorted(set(_all_symbols)) return _EN_FUND_SYMBOLS
get en fund symbols Returns ------- fund symbols in China
19,746
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup The provided code snippet includes necessary dependencies for implementing the `symbol_suffix_to_prefix` function. Write a Python function `def symbol_suffix_to_prefix(symbol: str, capital: bool = True) -> str` to solve the following problem: symbol suffix to prefix Parameters ---------- symbol: str symbol capital : bool by default True Returns ------- Here is the function: def symbol_suffix_to_prefix(symbol: str, capital: bool = True) -> str: """symbol suffix to prefix Parameters ---------- symbol: str symbol capital : bool by default True Returns ------- """ code, exchange = symbol.split(".") if exchange.lower() in ["sh", "ss"]: res = f"sh{code}" else: res = f"{exchange}{code}" return res.upper() if capital else res.lower()
symbol suffix to prefix Parameters ---------- symbol: str symbol capital : bool by default True Returns -------
19,747
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup The provided code snippet includes necessary dependencies for implementing the `symbol_prefix_to_sufix` function. Write a Python function `def symbol_prefix_to_sufix(symbol: str, capital: bool = True) -> str` to solve the following problem: symbol prefix to sufix Parameters ---------- symbol: str symbol capital : bool by default True Returns ------- Here is the function: def symbol_prefix_to_sufix(symbol: str, capital: bool = True) -> str: """symbol prefix to sufix Parameters ---------- symbol: str symbol capital : bool by default True Returns ------- """ res = f"{symbol[:-2]}.{symbol[-2:]}" return res.upper() if capital else res.lower()
symbol prefix to sufix Parameters ---------- symbol: str symbol capital : bool by default True Returns -------
19,748
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup The provided code snippet includes necessary dependencies for implementing the `get_trading_date_by_shift` function. Write a Python function `def get_trading_date_by_shift(trading_list: list, trading_date: pd.Timestamp, shift: int = 1)` to solve the following problem: get trading date by shift Parameters ---------- trading_list: list trading calendar list shift : int shift, default is 1 trading_date : pd.Timestamp trading date Returns ------- Here is the function: def get_trading_date_by_shift(trading_list: list, trading_date: pd.Timestamp, shift: int = 1): """get trading date by shift Parameters ---------- trading_list: list trading calendar list shift : int shift, default is 1 trading_date : pd.Timestamp trading date Returns ------- """ trading_date = pd.Timestamp(trading_date) left_index = bisect.bisect_left(trading_list, trading_date) try: res = trading_list[left_index + shift] except IndexError: res = trading_date return res
get trading date by shift Parameters ---------- trading_list: list trading calendar list shift : int shift, default is 1 trading_date : pd.Timestamp trading date Returns -------
19,749
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup The provided code snippet includes necessary dependencies for implementing the `get_instruments` function. Write a Python function `def get_instruments( qlib_dir: str, index_name: str, method: str = "parse_instruments", freq: str = "day", request_retry: int = 5, retry_sleep: int = 3, market_index: str = "cn_index", )` to solve the following problem: Parameters ---------- qlib_dir: str qlib data dir, default "Path(__file__).parent/qlib_data" index_name: str index name, value from ["csi100", "csi300"] method: str method, value from ["parse_instruments", "save_new_companies"] freq: str freq, value from ["day", "1min"] request_retry: int request retry, by default 5 retry_sleep: int request sleep, by default 3 market_index: str Where the files to obtain the index are located, for example data_collector.cn_index.collector Examples ------- # parse instruments $ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method parse_instruments # parse new companies $ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method save_new_companies Here is the function: def get_instruments( qlib_dir: str, index_name: str, method: str = "parse_instruments", freq: str = "day", request_retry: int = 5, retry_sleep: int = 3, market_index: str = "cn_index", ): """ Parameters ---------- qlib_dir: str qlib data dir, default "Path(__file__).parent/qlib_data" index_name: str index name, value from ["csi100", "csi300"] method: str method, value from ["parse_instruments", "save_new_companies"] freq: str freq, value from ["day", "1min"] request_retry: int request retry, by default 5 retry_sleep: int request sleep, by default 3 market_index: str Where the files to obtain the index are located, for example data_collector.cn_index.collector Examples ------- # parse instruments $ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method parse_instruments # parse new companies $ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method save_new_companies """ _cur_module = importlib.import_module("data_collector.{}.collector".format(market_index)) obj = getattr(_cur_module, f"{index_name.upper()}Index")( qlib_dir=qlib_dir, index_name=index_name, freq=freq, request_retry=request_retry, retry_sleep=retry_sleep ) getattr(obj, method)()
Parameters ---------- qlib_dir: str qlib data dir, default "Path(__file__).parent/qlib_data" index_name: str index name, value from ["csi100", "csi300"] method: str method, value from ["parse_instruments", "save_new_companies"] freq: str freq, value from ["day", "1min"] request_retry: int request retry, by default 5 retry_sleep: int request sleep, by default 3 market_index: str Where the files to obtain the index are located, for example data_collector.cn_index.collector Examples ------- # parse instruments $ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method parse_instruments # parse new companies $ python collector.py --index_name CSI300 --qlib_dir ~/.qlib/qlib_data/cn_data --method save_new_companies
19,750
import re import copy import importlib import time import bisect import pickle import random import requests import functools from pathlib import Path from typing import Iterable, Tuple, List import numpy as np import pandas as pd from lxml import etree from loguru import logger from yahooquery import Ticker from tqdm import tqdm from functools import partial from concurrent.futures import ProcessPoolExecutor from bs4 import BeautifulSoup def generate_minutes_calendar_from_daily( calendars: Iterable, freq: str = "1min", am_range: Tuple[str, str] = ("09:30:00", "11:29:00"), pm_range: Tuple[str, str] = ("13:00:00", "14:59:00"), ) -> pd.Index: """generate minutes calendar Parameters ---------- calendars: Iterable daily calendar freq: str by default 1min am_range: Tuple[str, str] AM Time Range, by default China-Stock: ("09:30:00", "11:29:00") pm_range: Tuple[str, str] PM Time Range, by default China-Stock: ("13:00:00", "14:59:00") """ daily_format: str = "%Y-%m-%d" res = [] for _day in calendars: for _range in [am_range, pm_range]: res.append( pd.date_range( f"{pd.Timestamp(_day).strftime(daily_format)} {_range[0]}", f"{pd.Timestamp(_day).strftime(daily_format)} {_range[1]}", freq=freq, ) ) return pd.Index(sorted(set(np.hstack(res)))) def get_1d_data( _date_field_name: str, _symbol_field_name: str, symbol: str, start: str, end: str, _1d_data_all: pd.DataFrame, ) -> pd.DataFrame: """get 1d data Returns ------ data_1d: pd.DataFrame data_1d.columns = [_date_field_name, _symbol_field_name, "paused", "volume", "factor", "close"] """ _all_1d_data = _get_all_1d_data(_date_field_name, _symbol_field_name, _1d_data_all) return _all_1d_data[ (_all_1d_data[_symbol_field_name] == symbol.upper()) & (_all_1d_data[_date_field_name] >= pd.Timestamp(start)) & (_all_1d_data[_date_field_name] < pd.Timestamp(end)) ] def calc_paused_num(df: pd.DataFrame, _date_field_name, _symbol_field_name): """calc paused num This method adds the paused_num field - The `paused_num` is the number of consecutive days of trading suspension. """ _symbol = df.iloc[0][_symbol_field_name] df = df.copy() df["_tmp_date"] = df[_date_field_name].apply(lambda x: pd.Timestamp(x).date()) # remove data that starts and ends with `np.nan` all day all_data = [] # Record the number of consecutive trading days where the whole day is nan, to remove the last trading day where the whole day is nan all_nan_nums = 0 # Record the number of consecutive occurrences of trading days that are not nan throughout the day not_nan_nums = 0 for _date, _df in df.groupby("_tmp_date"): _df["paused"] = 0 if not _df.loc[_df["volume"] < 0].empty: logger.warning(f"volume < 0, will fill np.nan: {_date} {_symbol}") _df.loc[_df["volume"] < 0, "volume"] = np.nan check_fields = set(_df.columns) - { "_tmp_date", "paused", "factor", _date_field_name, _symbol_field_name, } if _df.loc[:, list(check_fields)].isna().values.all() or (_df["volume"] == 0).all(): all_nan_nums += 1 not_nan_nums = 0 _df["paused"] = 1 if all_data: _df["paused_num"] = not_nan_nums all_data.append(_df) else: all_nan_nums = 0 not_nan_nums += 1 _df["paused_num"] = not_nan_nums all_data.append(_df) all_data = all_data[: len(all_data) - all_nan_nums] if all_data: df = pd.concat(all_data, sort=False) else: logger.warning(f"data is empty: {_symbol}") df = pd.DataFrame() return df del df["_tmp_date"] return df The provided code snippet includes necessary dependencies for implementing the `calc_adjusted_price` function. Write a Python function `def calc_adjusted_price( df: pd.DataFrame, _1d_data_all: pd.DataFrame, _date_field_name: str, _symbol_field_name: str, frequence: str, consistent_1d: bool = True, calc_paused: bool = True, ) -> pd.DataFrame` to solve the following problem: calc adjusted price This method does 4 things. 1. Adds the `paused` field. - The added paused field comes from the paused field of the 1d data. 2. Aligns the time of the 1d data. 3. The data is reweighted. - The reweighting method: - volume / factor - open * factor - high * factor - low * factor - close * factor 4. Called `calc_paused_num` method to add the `paused_num` field. - The `paused_num` is the number of consecutive days of trading suspension. Here is the function: def calc_adjusted_price( df: pd.DataFrame, _1d_data_all: pd.DataFrame, _date_field_name: str, _symbol_field_name: str, frequence: str, consistent_1d: bool = True, calc_paused: bool = True, ) -> pd.DataFrame: """calc adjusted price This method does 4 things. 1. Adds the `paused` field. - The added paused field comes from the paused field of the 1d data. 2. Aligns the time of the 1d data. 3. The data is reweighted. - The reweighting method: - volume / factor - open * factor - high * factor - low * factor - close * factor 4. Called `calc_paused_num` method to add the `paused_num` field. - The `paused_num` is the number of consecutive days of trading suspension. """ # TODO: using daily data factor if df.empty: return df df = df.copy() df.drop_duplicates(subset=_date_field_name, inplace=True) df.sort_values(_date_field_name, inplace=True) symbol = df.iloc[0][_symbol_field_name] df[_date_field_name] = pd.to_datetime(df[_date_field_name]) # get 1d data from qlib _start = pd.Timestamp(df[_date_field_name].min()).strftime("%Y-%m-%d") _end = (pd.Timestamp(df[_date_field_name].max()) + pd.Timedelta(days=1)).strftime("%Y-%m-%d") data_1d: pd.DataFrame = get_1d_data(_date_field_name, _symbol_field_name, symbol, _start, _end, _1d_data_all) data_1d = data_1d.copy() if data_1d is None or data_1d.empty: df["factor"] = 1 / df.loc[df["close"].first_valid_index()]["close"] # TODO: np.nan or 1 or 0 df["paused"] = np.nan else: # NOTE: volume is np.nan or volume <= 0, paused = 1 # FIXME: find a more accurate data source data_1d["paused"] = 0 data_1d.loc[(data_1d["volume"].isna()) | (data_1d["volume"] <= 0), "paused"] = 1 data_1d = data_1d.set_index(_date_field_name) # add factor from 1d data # NOTE: 1d data info: # - Close price adjusted for splits. Adjusted close price adjusted for both dividends and splits. # - data_1d.adjclose: Adjusted close price adjusted for both dividends and splits. # - data_1d.close: `data_1d.adjclose / (close for the first trading day that is not np.nan)` def _calc_factor(df_1d: pd.DataFrame): try: _date = pd.Timestamp(pd.Timestamp(df_1d[_date_field_name].iloc[0]).date()) df_1d["factor"] = data_1d.loc[_date]["close"] / df_1d.loc[df_1d["close"].last_valid_index()]["close"] df_1d["paused"] = data_1d.loc[_date]["paused"] except Exception: df_1d["factor"] = np.nan df_1d["paused"] = np.nan return df_1d df = df.groupby([df[_date_field_name].dt.date], group_keys=False).apply(_calc_factor) if consistent_1d: # the date sequence is consistent with 1d df.set_index(_date_field_name, inplace=True) df = df.reindex( generate_minutes_calendar_from_daily( calendars=pd.to_datetime(data_1d.reset_index()[_date_field_name].drop_duplicates()), freq=frequence, am_range=("09:30:00", "11:29:00"), pm_range=("13:00:00", "14:59:00"), ) ) df[_symbol_field_name] = df.loc[df[_symbol_field_name].first_valid_index()][_symbol_field_name] df.index.names = [_date_field_name] df.reset_index(inplace=True) for _col in ["open", "close", "high", "low", "volume"]: if _col not in df.columns: continue if _col == "volume": df[_col] = df[_col] / df["factor"] else: df[_col] = df[_col] * df["factor"] if calc_paused: df = calc_paused_num(df, _date_field_name, _symbol_field_name) return df
calc adjusted price This method does 4 things. 1. Adds the `paused` field. - The added paused field comes from the paused field of the 1d data. 2. Aligns the time of the 1d data. 3. The data is reweighted. - The reweighting method: - volume / factor - open * factor - high * factor - low * factor - close * factor 4. Called `calc_paused_num` method to add the `paused_num` field. - The `paused_num` is the number of consecutive days of trading suspension.
19,751
import sys from pathlib import Path from concurrent.futures import ThreadPoolExecutor import fire import qlib import pandas as pd from tqdm import tqdm from qlib.data import D from loguru import logger from data_collector.utils import generate_minutes_calendar_from_daily def get_date_range(data_1min_dir: Path, max_workers: int = 16, date_field_name: str = "date"): csv_files = list(data_1min_dir.glob("*.csv")) min_date = None max_date = None with tqdm(total=len(csv_files)) as p_bar: with ThreadPoolExecutor(max_workers=max_workers) as executor: for _file, _result in zip(csv_files, executor.map(pd.read_csv, csv_files)): if not _result.empty: _dates = pd.to_datetime(_result[date_field_name]) _tmp_min = _dates.min() min_date = min(min_date, _tmp_min) if min_date is not None else _tmp_min _tmp_max = _dates.max() max_date = max(max_date, _tmp_max) if max_date is not None else _tmp_max p_bar.update() return min_date, max_date def get_symbols(data_1min_dir: Path): return list(map(lambda x: x.name[:-4].upper(), data_1min_dir.glob("*.csv"))) def generate_minutes_calendar_from_daily( calendars: Iterable, freq: str = "1min", am_range: Tuple[str, str] = ("09:30:00", "11:29:00"), pm_range: Tuple[str, str] = ("13:00:00", "14:59:00"), ) -> pd.Index: """generate minutes calendar Parameters ---------- calendars: Iterable daily calendar freq: str by default 1min am_range: Tuple[str, str] AM Time Range, by default China-Stock: ("09:30:00", "11:29:00") pm_range: Tuple[str, str] PM Time Range, by default China-Stock: ("13:00:00", "14:59:00") """ daily_format: str = "%Y-%m-%d" res = [] for _day in calendars: for _range in [am_range, pm_range]: res.append( pd.date_range( f"{pd.Timestamp(_day).strftime(daily_format)} {_range[0]}", f"{pd.Timestamp(_day).strftime(daily_format)} {_range[1]}", freq=freq, ) ) return pd.Index(sorted(set(np.hstack(res)))) The provided code snippet includes necessary dependencies for implementing the `fill_1min_using_1d` function. Write a Python function `def fill_1min_using_1d( data_1min_dir: [str, Path], qlib_data_1d_dir: [str, Path], max_workers: int = 16, date_field_name: str = "date", symbol_field_name: str = "symbol", )` to solve the following problem: Use 1d data to fill in the missing symbols relative to 1min Parameters ---------- data_1min_dir: str 1min data dir qlib_data_1d_dir: str 1d qlib data(bin data) dir, from: https://qlib.readthedocs.io/en/latest/component/data.html#converting-csv-format-into-qlib-format max_workers: int ThreadPoolExecutor(max_workers), by default 16 date_field_name: str date field name, by default date symbol_field_name: str symbol field name, by default symbol Here is the function: def fill_1min_using_1d( data_1min_dir: [str, Path], qlib_data_1d_dir: [str, Path], max_workers: int = 16, date_field_name: str = "date", symbol_field_name: str = "symbol", ): """Use 1d data to fill in the missing symbols relative to 1min Parameters ---------- data_1min_dir: str 1min data dir qlib_data_1d_dir: str 1d qlib data(bin data) dir, from: https://qlib.readthedocs.io/en/latest/component/data.html#converting-csv-format-into-qlib-format max_workers: int ThreadPoolExecutor(max_workers), by default 16 date_field_name: str date field name, by default date symbol_field_name: str symbol field name, by default symbol """ data_1min_dir = Path(data_1min_dir).expanduser().resolve() qlib_data_1d_dir = Path(qlib_data_1d_dir).expanduser().resolve() min_date, max_date = get_date_range(data_1min_dir, max_workers, date_field_name) symbols_1min = get_symbols(data_1min_dir) qlib.init(provider_uri=str(qlib_data_1d_dir)) data_1d = D.features(D.instruments("all"), ["$close"], min_date, max_date, freq="day") miss_symbols = set(data_1d.index.get_level_values(level="instrument").unique()) - set(symbols_1min) if not miss_symbols: logger.warning("More symbols in 1min than 1d, no padding required") return logger.info(f"miss_symbols {len(miss_symbols)}: {miss_symbols}") tmp_df = pd.read_csv(list(data_1min_dir.glob("*.csv"))[0]) columns = tmp_df.columns _si = tmp_df[symbol_field_name].first_valid_index() is_lower = tmp_df.loc[_si][symbol_field_name].islower() for symbol in tqdm(miss_symbols): if is_lower: symbol = symbol.lower() index_1d = data_1d.loc(axis=0)[symbol.upper()].index index_1min = generate_minutes_calendar_from_daily(index_1d) index_1min.name = date_field_name _df = pd.DataFrame(columns=columns, index=index_1min) if date_field_name in _df.columns: del _df[date_field_name] _df.reset_index(inplace=True) _df[symbol_field_name] = symbol _df["paused_num"] = 0 _df.to_csv(data_1min_dir.joinpath(f"{symbol}.csv"), index=False)
Use 1d data to fill in the missing symbols relative to 1min Parameters ---------- data_1min_dir: str 1min data dir qlib_data_1d_dir: str 1d qlib data(bin data) dir, from: https://qlib.readthedocs.io/en/latest/component/data.html#converting-csv-format-into-qlib-format max_workers: int ThreadPoolExecutor(max_workers), by default 16 date_field_name: str date field name, by default date symbol_field_name: str symbol field name, by default symbol
19,752
import sys from typing import List from pathlib import Path import fire import numpy as np import pandas as pd from loguru import logger import baostock as bs from data_collector.utils import generate_minutes_calendar_from_daily def read_calendar_from_qlib(qlib_dir: Path) -> pd.DataFrame: calendar_path = qlib_dir.joinpath("calendars").joinpath("day.txt") if not calendar_path.exists(): return pd.DataFrame() return pd.read_csv(calendar_path, header=None) def write_calendar_to_qlib(qlib_dir: Path, date_list: List[str], freq: str = "day"): calendar_path = str(qlib_dir.joinpath("calendars").joinpath(f"{freq}_future.txt")) np.savetxt(calendar_path, date_list, fmt="%s", encoding="utf-8") logger.info(f"write future calendars success: {calendar_path}") def generate_qlib_calendar(date_list: List[str], freq: str) -> List[str]: print(freq) if freq == "day": return date_list elif freq == "1min": date_list = generate_minutes_calendar_from_daily(date_list, freq=freq).tolist() return list(map(lambda x: pd.Timestamp(x).strftime("%Y-%m-%d %H:%M:%S"), date_list)) else: raise ValueError(f"Unsupported freq: {freq}") The provided code snippet includes necessary dependencies for implementing the `future_calendar_collector` function. Write a Python function `def future_calendar_collector(qlib_dir: [str, Path], freq: str = "day")` to solve the following problem: get future calendar Parameters ---------- qlib_dir: str or Path qlib data directory freq: str value from ["day", "1min"], by default day Here is the function: def future_calendar_collector(qlib_dir: [str, Path], freq: str = "day"): """get future calendar Parameters ---------- qlib_dir: str or Path qlib data directory freq: str value from ["day", "1min"], by default day """ qlib_dir = Path(qlib_dir).expanduser().resolve() if not qlib_dir.exists(): raise FileNotFoundError(str(qlib_dir)) lg = bs.login() if lg.error_code != "0": logger.error(f"login error: {lg.error_msg}") return # read daily calendar daily_calendar = read_calendar_from_qlib(qlib_dir) end_year = pd.Timestamp.now().year if daily_calendar.empty: start_year = pd.Timestamp.now().year else: start_year = pd.Timestamp(daily_calendar.iloc[-1, 0]).year rs = bs.query_trade_dates(start_date=pd.Timestamp(f"{start_year}-01-01"), end_date=f"{end_year}-12-31") data_list = [] while (rs.error_code == "0") & rs.next(): _row_data = rs.get_row_data() if int(_row_data[1]) == 1: data_list.append(_row_data[0]) data_list = sorted(data_list) date_list = generate_qlib_calendar(data_list, freq=freq) date_list = sorted(set(daily_calendar.loc[:, 0].values.tolist() + date_list)) write_calendar_to_qlib(qlib_dir, date_list, freq=freq) bs.logout() logger.info(f"get trading dates success: {start_year}-01-01 to {end_year}-12-31")
get future calendar Parameters ---------- qlib_dir: str or Path qlib data directory freq: str value from ["day", "1min"], by default day
19,753
import abc import sys import datetime from abc import ABC from pathlib import Path import fire import pandas as pd from loguru import logger from dateutil.tz import tzlocal from data_collector.base import BaseCollector, BaseNormalize, BaseRun from data_collector.utils import deco_retry from pycoingecko import CoinGeckoAPI from time import mktime from datetime import datetime as dt import time _CG_CRYPTO_SYMBOLS = None def deco_retry(retry: int = 5, retry_sleep: int = 3): def deco_func(func): def wrapper(*args, **kwargs): _retry = 5 if callable(retry) else retry _result = None for _i in range(1, _retry + 1): try: _result = func(*args, **kwargs) break except Exception as e: logger.warning(f"{func.__name__}: {_i} :{e}") if _i == _retry: raise time.sleep(retry_sleep) return _result return wrapper return deco_func(retry) if callable(retry) else deco_func The provided code snippet includes necessary dependencies for implementing the `get_cg_crypto_symbols` function. Write a Python function `def get_cg_crypto_symbols(qlib_data_path: [str, Path] = None) -> list` to solve the following problem: get crypto symbols in coingecko Returns ------- crypto symbols in given exchanges list of coingecko Here is the function: def get_cg_crypto_symbols(qlib_data_path: [str, Path] = None) -> list: """get crypto symbols in coingecko Returns ------- crypto symbols in given exchanges list of coingecko """ global _CG_CRYPTO_SYMBOLS # pylint: disable=W0603 @deco_retry def _get_coingecko(): try: cg = CoinGeckoAPI() resp = pd.DataFrame(cg.get_coins_markets(vs_currency="usd")) except Exception as e: raise ValueError("request error") from e try: _symbols = resp["id"].to_list() except Exception as e: logger.warning(f"request error: {e}") raise return _symbols if _CG_CRYPTO_SYMBOLS is None: _all_symbols = _get_coingecko() _CG_CRYPTO_SYMBOLS = sorted(set(_all_symbols)) return _CG_CRYPTO_SYMBOLS
get crypto symbols in coingecko Returns ------- crypto symbols in given exchanges list of coingecko
19,754
import re import abc import sys from io import BytesIO from typing import List, Iterable from pathlib import Path import fire import requests import pandas as pd import baostock as bs from tqdm import tqdm from loguru import logger from data_collector.index import IndexBase from data_collector.utils import get_calendar_list, get_trading_date_by_shift, deco_retry from data_collector.utils import get_instruments REQ_HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36 Edg/91.0.864.48" } def retry_request(url: str, method: str = "get", exclude_status: List = None): if exclude_status is None: exclude_status = [] method_func = getattr(requests, method) _resp = method_func(url, headers=REQ_HEADERS, timeout=None) _status = _resp.status_code if _status not in exclude_status and _status != 200: raise ValueError(f"response status: {_status}, url={url}") return _resp
null
19,755
import mysql.connector def print_user(user): config = { "host": "127.0.0.1", "port": "3306", "database": "hello_mysql", "user": "root", "password": "root1234" } # config = { # "host": "bpw0hq9h09e7mqicjhtl-mysql.services.clever-cloud.com", # "port": "3306", # "database": "bpw0hq9h09e7mqicjhtl", # "user": "uqzby88erlhvkrty", # "password": "oePXiCOHdU1WRV80NPyv" # } connection = mysql.connector.connect(**config) cursor = connection.cursor() query = "SELECT * FROM users WHERE name=%s;" print(query) cursor.execute(query, (user,)) result = cursor.fetchall() for row in result: print(row) cursor.close() connection.close()
null
19,756
from hatchway import QueryOrBody, api_view from api import schemas from api.decorators import scope_required from api.models import Application def add_app( request, client_name: QueryOrBody[str], redirect_uris: QueryOrBody[str], scopes: QueryOrBody[None | str] = None, website: QueryOrBody[None | str] = None, ) -> schemas.Application: application = Application.create( client_name=client_name, website=website, redirect_uris=redirect_uris, scopes=scopes, ) return schemas.Application.from_application(application)
null
19,757
from hatchway import QueryOrBody, api_view from api import schemas from api.decorators import scope_required from api.models import Application def verify_credentials( request, ) -> schemas.Application: return schemas.Application.from_application_no_keys(request.token.application)
null
19,758
from django.core.files import File from django.shortcuts import get_object_or_404 from hatchway import ApiError, QueryOrBody, api_view from activities.models import PostAttachment, PostAttachmentStates from api import schemas from core.files import blurhash_image, resize_image from ..decorators import scope_required def resize_image( image: File, *, size: tuple[int, int], cover=True, keep_format=False, ) -> ImageFile: """ Resizes an image to fit insize the given size (cropping one dimension to fit if needed) """ with Image.open(image) as img: try: # Take any orientation EXIF data, apply it, and strip the # orientation data from the new image. img = ImageOps.exif_transpose(img) except Exception: # noqa # exif_transpose can crash with different errors depending on # the EXIF keys. Just ignore them all, better to have a rotated # image than no image. pass if cover: resized_image = ImageOps.fit(img, size, method=Image.Resampling.BILINEAR) else: resized_image = img.copy() resized_image.thumbnail(size, resample=Image.Resampling.BILINEAR) new_image_bytes = io.BytesIO() if keep_format: resized_image.save(new_image_bytes, format=img.format) file = ImageFile(new_image_bytes) else: resized_image.save(new_image_bytes, format="webp", save_all=True) file = ImageFile(new_image_bytes, name="image.webp") file.image = resized_image return file def blurhash_image(file) -> str: """ Returns the blurhash for an image """ return blurhash.encode(file, 4, 4) def upload_media( request, file: File, description: QueryOrBody[str] = "", focus: QueryOrBody[str] = "0,0", ) -> schemas.MediaAttachment: main_file = resize_image( file, size=(2000, 2000), cover=False, ) thumbnail_file = resize_image( file, size=(400, 225), cover=True, ) attachment = PostAttachment.objects.create( blurhash=blurhash_image(thumbnail_file), mimetype="image/webp", width=main_file.image.width, height=main_file.image.height, name=description or None, state=PostAttachmentStates.fetched, author=request.identity, ) attachment.file.save( main_file.name, main_file, ) attachment.thumbnail.save( thumbnail_file.name, thumbnail_file, ) attachment.save() return schemas.MediaAttachment.from_post_attachment(attachment)
null
19,759
from django.core.files import File from django.shortcuts import get_object_or_404 from hatchway import ApiError, QueryOrBody, api_view from activities.models import PostAttachment, PostAttachmentStates from api import schemas from core.files import blurhash_image, resize_image from ..decorators import scope_required def get_media( request, id: str, ) -> schemas.MediaAttachment: attachment = get_object_or_404(PostAttachment, pk=id) if attachment.post: if attachment.post.author != request.identity: raise ApiError(401, "Not the author of this attachment") elif attachment.author and attachment.author != request.identity: raise ApiError(401, "Not the author of this attachment") return schemas.MediaAttachment.from_post_attachment(attachment)
null
19,760
from django.core.files import File from django.shortcuts import get_object_or_404 from hatchway import ApiError, QueryOrBody, api_view from activities.models import PostAttachment, PostAttachmentStates from api import schemas from core.files import blurhash_image, resize_image from ..decorators import scope_required def update_media( request, id: str, description: QueryOrBody[str] = "", focus: QueryOrBody[str] = "0,0", ) -> schemas.MediaAttachment: attachment = get_object_or_404(PostAttachment, pk=id) if attachment.post: if attachment.post.author != request.identity: raise ApiError(401, "Not the author of this attachment") elif attachment.author != request.identity: raise ApiError(401, "Not the author of this attachment") attachment.name = description or None attachment.save() return schemas.MediaAttachment.from_post_attachment(attachment)
null
19,761
from hatchway import api_view from activities.models import Emoji from api.schemas import CustomEmoji class CustomEmoji(Schema): def from_emoji(cls, emoji: activities_models.Emoji) -> "CustomEmoji": def emojis(request) -> list[CustomEmoji]: return [ CustomEmoji.from_emoji(e) for e in Emoji.objects.usable().filter(local=True) ]
null
19,762
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import ApiResponse, api_view from activities.models import PostInteraction, TimelineEvent from activities.services import TimelineService from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult NOTIFICATION_TYPES = { "favourite": TimelineEvent.Types.liked, "reblog": TimelineEvent.Types.boosted, "mention": TimelineEvent.Types.mentioned, "follow": TimelineEvent.Types.followed, "admin.sign_up": TimelineEvent.Types.identity_created, } def notifications( request: HttpRequest, max_id: str | None = None, since_id: str | None = None, min_id: str | None = None, limit: int = 20, account_id: str | None = None, ) -> ApiResponse[list[schemas.Notification]]: def get_notification( request: HttpRequest, id: str, ) -> schemas.Notification: notification = get_object_or_404( TimelineService(request.identity).notifications( list(NOTIFICATION_TYPES.values()) ), id=id, ) return schemas.Notification.from_timeline_event(notification)
null
19,763
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import ApiResponse, api_view from activities.models import PostInteraction, TimelineEvent from activities.services import TimelineService from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult NOTIFICATION_TYPES = { "favourite": TimelineEvent.Types.liked, "reblog": TimelineEvent.Types.boosted, "mention": TimelineEvent.Types.mentioned, "follow": TimelineEvent.Types.followed, "admin.sign_up": TimelineEvent.Types.identity_created, } def notifications( request: HttpRequest, max_id: str | None = None, since_id: str | None = None, min_id: str | None = None, limit: int = 20, account_id: str | None = None, ) -> ApiResponse[list[schemas.Notification]]: requested_types = set(request.GET.getlist("types[]")) excluded_types = set(request.GET.getlist("exclude_types[]")) if not requested_types: requested_types = set(NOTIFICATION_TYPES.keys()) requested_types.difference_update(excluded_types) # Use that to pull relevant events queryset = TimelineService(request.identity).notifications( [NOTIFICATION_TYPES[r] for r in requested_types if r in NOTIFICATION_TYPES] ) paginator = MastodonPaginator() pager: PaginationResult[TimelineEvent] = paginator.paginate( queryset, min_id=min_id, max_id=max_id, since_id=since_id, limit=limit, ) interactions = PostInteraction.get_event_interactions( pager.results, request.identity, ) return PaginatingApiResponse( [ schemas.Notification.from_timeline_event(event, interactions=interactions) for event in pager.results ], request=request, include_params=["limit", "account_id"], ) def dismiss_notifications(request: HttpRequest) -> dict: TimelineService(request.identity).notifications( list(NOTIFICATION_TYPES.values()) ).update(dismissed=True) return {}
null
19,764
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import ApiResponse, api_view from activities.models import PostInteraction, TimelineEvent from activities.services import TimelineService from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult NOTIFICATION_TYPES = { "favourite": TimelineEvent.Types.liked, "reblog": TimelineEvent.Types.boosted, "mention": TimelineEvent.Types.mentioned, "follow": TimelineEvent.Types.followed, "admin.sign_up": TimelineEvent.Types.identity_created, } def notifications( request: HttpRequest, max_id: str | None = None, since_id: str | None = None, min_id: str | None = None, limit: int = 20, account_id: str | None = None, ) -> ApiResponse[list[schemas.Notification]]: requested_types = set(request.GET.getlist("types[]")) excluded_types = set(request.GET.getlist("exclude_types[]")) if not requested_types: requested_types = set(NOTIFICATION_TYPES.keys()) requested_types.difference_update(excluded_types) # Use that to pull relevant events queryset = TimelineService(request.identity).notifications( [NOTIFICATION_TYPES[r] for r in requested_types if r in NOTIFICATION_TYPES] ) paginator = MastodonPaginator() pager: PaginationResult[TimelineEvent] = paginator.paginate( queryset, min_id=min_id, max_id=max_id, since_id=since_id, limit=limit, ) interactions = PostInteraction.get_event_interactions( pager.results, request.identity, ) return PaginatingApiResponse( [ schemas.Notification.from_timeline_event(event, interactions=interactions) for event in pager.results ], request=request, include_params=["limit", "account_id"], ) def dismiss_notification(request: HttpRequest, id: str) -> dict: notification = get_object_or_404( TimelineService(request.identity).notifications( list(NOTIFICATION_TYPES.values()) ), id=id, ) notification.dismissed = True notification.save() return {}
null
19,765
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import api_view from activities.models import Hashtag from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from users.models import HashtagFollow class PaginatingApiResponse(ApiResponse[list[TI]]): """ An ApiResponse subclass that also handles pagination link headers """ def __init__( self, data: list[TI], request: HttpRequest, include_params: list[str], **kwargs, ): # Call superclass super().__init__(data, **kwargs) # Figure out if we need link headers self._request = request self.extra_params = self.filter_params(self._request, include_params) link_header = self.build_link_header() if link_header: self.headers["link"] = link_header def filter_params(request: HttpRequest, allowed_params: list[str]): params = {} for key in allowed_params: value = request.GET.get(key, None) if value: params[key] = value return params def get_part(self, data_index: int, param_name: str, rel: str) -> str | None: """ Used to get next/prev URLs """ if not self.data: return None # Use the ID of the last object for the next page start params = dict(self.extra_params) params[param_name] = self.data[data_index].id return ( "<" + self._request.build_absolute_uri(self._request.path) + "?" + urllib.parse.urlencode(params) + f'>; rel="{rel}"' ) def build_link_header(self): parts = [ entry for entry in [ self.get_part(-1, "max_id", "next"), self.get_part(0, "min_id", "prev"), ] if entry ] if not parts: return None return ", ".join(parts) class PaginationResult(Generic[T]): """ Represents a pagination result for Mastodon (it does Link header stuff) """ #: A list of objects that matched the pagination query. results: list[T] #: The actual applied limit, which may be different from what was requested. limit: int #: A list of transformed JSON objects json_results: list[dict] | None = None def empty(cls): return cls(results=[], limit=20) def next(self, request: HttpRequest, allowed_params: list[str]): """ Returns a URL to the next page of results. """ if not self.results: return None if self.json_results is None: raise ValueError("You must JSONify the results first") params = self.filter_params(request, allowed_params) params["max_id"] = self.json_results[-1]["id"] return f"{request.build_absolute_uri(request.path)}?{urllib.parse.urlencode(params)}" def prev(self, request: HttpRequest, allowed_params: list[str]): """ Returns a URL to the previous page of results. """ if not self.results: return None if self.json_results is None: raise ValueError("You must JSONify the results first") params = self.filter_params(request, allowed_params) params["min_id"] = self.json_results[0]["id"] return f"{request.build_absolute_uri(request.path)}?{urllib.parse.urlencode(params)}" def link_header(self, request: HttpRequest, allowed_params: list[str]): """ Creates a link header for the given request """ return ", ".join( ( f'<{self.next(request, allowed_params)}>; rel="next"', f'<{self.prev(request, allowed_params)}>; rel="prev"', ) ) def jsonify_results(self, map_function: Callable[[Any], Any]): """ Replaces our results with ones transformed via map_function """ self.json_results = [map_function(result) for result in self.results] def jsonify_posts(self, identity): """ Predefined way of JSON-ifying Post objects """ interactions = PostInteraction.get_post_interactions(self.results, identity) self.jsonify_results( lambda post: post.to_mastodon_json( interactions=interactions, identity=identity ) ) def jsonify_status_events(self, identity): """ Predefined way of JSON-ifying TimelineEvent objects representing statuses """ interactions = PostInteraction.get_event_interactions(self.results, identity) self.jsonify_results( lambda event: event.to_mastodon_status_json( interactions=interactions, identity=identity ) ) def jsonify_notification_events(self, identity): """ Predefined way of JSON-ifying TimelineEvent objects representing notifications """ interactions = PostInteraction.get_event_interactions(self.results, identity) self.jsonify_results( lambda event: event.to_mastodon_notification_json(interactions=interactions) ) def jsonify_identities(self): """ Predefined way of JSON-ifying Identity objects """ self.jsonify_results(lambda identity: identity.to_mastodon_json()) def filter_params(request: HttpRequest, allowed_params: list[str]): params = {} for key in allowed_params: value = request.GET.get(key, None) if value: params[key] = value return params class MastodonPaginator: """ Paginates in the Mastodon style (max_id, min_id, etc). Note that this basically _requires_ us to always do it on IDs, so we do. """ def __init__( self, default_limit: int = 20, max_limit: int = 40, ): self.default_limit = default_limit self.max_limit = max_limit def paginate( self, queryset: models.QuerySet[TM], min_id: str | None, max_id: str | None, since_id: str | None, limit: int | None, home: bool = False, ) -> PaginationResult[TM]: limit = min(limit or self.default_limit, self.max_limit) filters = {} id_field = "id" reverse = False if home: # The home timeline interleaves Post IDs and PostInteraction IDs in an # annotated field called "subject_id". id_field = "subject_id" queryset = queryset.annotate( subject_id=Case( When(type=TimelineEvent.Types.post, then=F("subject_post_id")), default=F("subject_post_interaction"), ) ) # These "does not start with interaction" checks can be removed after a # couple months, when clients have flushed them out. if max_id and not max_id.startswith("interaction"): filters[f"{id_field}__lt"] = max_id if since_id and not since_id.startswith("interaction"): filters[f"{id_field}__gt"] = since_id if min_id and not min_id.startswith("interaction"): # Min ID requires items _immediately_ newer than specified, so we # invert the ordering to accommodate filters[f"{id_field}__gt"] = min_id reverse = True # Default is to order by ID descending (newest first), except for min_id # queries, which should order by ID for limiting, then reverse the results to be # consistent. The clearest explanation of this I've found so far is this: # https://mastodon.social/@Gargron/100846335353411164 ordering = id_field if reverse else f"-{id_field}" results = list(queryset.filter(**filters).order_by(ordering)[:limit]) if reverse: results.reverse() return PaginationResult( results=results, limit=limit, ) def followed_tags( request: HttpRequest, max_id: str | None = None, since_id: str | None = None, min_id: str | None = None, limit: int = 100, ) -> list[schemas.Tag]: queryset = HashtagFollow.objects.by_identity(request.identity) paginator = MastodonPaginator() pager: PaginationResult[HashtagFollow] = paginator.paginate( queryset, min_id=min_id, max_id=max_id, since_id=since_id, limit=limit, ) return PaginatingApiResponse( schemas.FollowedTag.map_from_follows(pager.results), request=request, include_params=["limit"], )
null
19,766
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import api_view from activities.models import Hashtag from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from users.models import HashtagFollow def hashtag(request: HttpRequest, hashtag: str) -> schemas.Tag: tag = get_object_or_404( Hashtag, pk=hashtag.lower(), ) following = None if request.identity: following = tag.followers.filter(identity=request.identity).exists() return schemas.Tag.from_hashtag( tag, following=following, ) def follow( request: HttpRequest, id: str, ) -> schemas.Tag: hashtag = get_object_or_404( Hashtag, pk=id.lower(), ) request.identity.hashtag_follows.get_or_create(hashtag=hashtag) return schemas.Tag.from_hashtag( hashtag, following=True, )
null
19,767
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import api_view from activities.models import Hashtag from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from users.models import HashtagFollow def hashtag(request: HttpRequest, hashtag: str) -> schemas.Tag: tag = get_object_or_404( Hashtag, pk=hashtag.lower(), ) following = None if request.identity: following = tag.followers.filter(identity=request.identity).exists() return schemas.Tag.from_hashtag( tag, following=following, ) def unfollow( request: HttpRequest, id: str, ) -> schemas.Tag: hashtag = get_object_or_404( Hashtag, pk=id.lower(), ) request.identity.hashtag_follows.filter(hashtag=hashtag).delete() return schemas.Tag.from_hashtag( hashtag, following=False, )
null
19,768
from django.http import HttpRequest from hatchway import api_view from activities.models import Post from activities.services import TimelineService from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult class PaginatingApiResponse(ApiResponse[list[TI]]): """ An ApiResponse subclass that also handles pagination link headers """ def __init__( self, data: list[TI], request: HttpRequest, include_params: list[str], **kwargs, ): # Call superclass super().__init__(data, **kwargs) # Figure out if we need link headers self._request = request self.extra_params = self.filter_params(self._request, include_params) link_header = self.build_link_header() if link_header: self.headers["link"] = link_header def filter_params(request: HttpRequest, allowed_params: list[str]): params = {} for key in allowed_params: value = request.GET.get(key, None) if value: params[key] = value return params def get_part(self, data_index: int, param_name: str, rel: str) -> str | None: """ Used to get next/prev URLs """ if not self.data: return None # Use the ID of the last object for the next page start params = dict(self.extra_params) params[param_name] = self.data[data_index].id return ( "<" + self._request.build_absolute_uri(self._request.path) + "?" + urllib.parse.urlencode(params) + f'>; rel="{rel}"' ) def build_link_header(self): parts = [ entry for entry in [ self.get_part(-1, "max_id", "next"), self.get_part(0, "min_id", "prev"), ] if entry ] if not parts: return None return ", ".join(parts) class PaginationResult(Generic[T]): """ Represents a pagination result for Mastodon (it does Link header stuff) """ #: A list of objects that matched the pagination query. results: list[T] #: The actual applied limit, which may be different from what was requested. limit: int #: A list of transformed JSON objects json_results: list[dict] | None = None def empty(cls): return cls(results=[], limit=20) def next(self, request: HttpRequest, allowed_params: list[str]): """ Returns a URL to the next page of results. """ if not self.results: return None if self.json_results is None: raise ValueError("You must JSONify the results first") params = self.filter_params(request, allowed_params) params["max_id"] = self.json_results[-1]["id"] return f"{request.build_absolute_uri(request.path)}?{urllib.parse.urlencode(params)}" def prev(self, request: HttpRequest, allowed_params: list[str]): """ Returns a URL to the previous page of results. """ if not self.results: return None if self.json_results is None: raise ValueError("You must JSONify the results first") params = self.filter_params(request, allowed_params) params["min_id"] = self.json_results[0]["id"] return f"{request.build_absolute_uri(request.path)}?{urllib.parse.urlencode(params)}" def link_header(self, request: HttpRequest, allowed_params: list[str]): """ Creates a link header for the given request """ return ", ".join( ( f'<{self.next(request, allowed_params)}>; rel="next"', f'<{self.prev(request, allowed_params)}>; rel="prev"', ) ) def jsonify_results(self, map_function: Callable[[Any], Any]): """ Replaces our results with ones transformed via map_function """ self.json_results = [map_function(result) for result in self.results] def jsonify_posts(self, identity): """ Predefined way of JSON-ifying Post objects """ interactions = PostInteraction.get_post_interactions(self.results, identity) self.jsonify_results( lambda post: post.to_mastodon_json( interactions=interactions, identity=identity ) ) def jsonify_status_events(self, identity): """ Predefined way of JSON-ifying TimelineEvent objects representing statuses """ interactions = PostInteraction.get_event_interactions(self.results, identity) self.jsonify_results( lambda event: event.to_mastodon_status_json( interactions=interactions, identity=identity ) ) def jsonify_notification_events(self, identity): """ Predefined way of JSON-ifying TimelineEvent objects representing notifications """ interactions = PostInteraction.get_event_interactions(self.results, identity) self.jsonify_results( lambda event: event.to_mastodon_notification_json(interactions=interactions) ) def jsonify_identities(self): """ Predefined way of JSON-ifying Identity objects """ self.jsonify_results(lambda identity: identity.to_mastodon_json()) def filter_params(request: HttpRequest, allowed_params: list[str]): params = {} for key in allowed_params: value = request.GET.get(key, None) if value: params[key] = value return params class MastodonPaginator: """ Paginates in the Mastodon style (max_id, min_id, etc). Note that this basically _requires_ us to always do it on IDs, so we do. """ def __init__( self, default_limit: int = 20, max_limit: int = 40, ): self.default_limit = default_limit self.max_limit = max_limit def paginate( self, queryset: models.QuerySet[TM], min_id: str | None, max_id: str | None, since_id: str | None, limit: int | None, home: bool = False, ) -> PaginationResult[TM]: limit = min(limit or self.default_limit, self.max_limit) filters = {} id_field = "id" reverse = False if home: # The home timeline interleaves Post IDs and PostInteraction IDs in an # annotated field called "subject_id". id_field = "subject_id" queryset = queryset.annotate( subject_id=Case( When(type=TimelineEvent.Types.post, then=F("subject_post_id")), default=F("subject_post_interaction"), ) ) # These "does not start with interaction" checks can be removed after a # couple months, when clients have flushed them out. if max_id and not max_id.startswith("interaction"): filters[f"{id_field}__lt"] = max_id if since_id and not since_id.startswith("interaction"): filters[f"{id_field}__gt"] = since_id if min_id and not min_id.startswith("interaction"): # Min ID requires items _immediately_ newer than specified, so we # invert the ordering to accommodate filters[f"{id_field}__gt"] = min_id reverse = True # Default is to order by ID descending (newest first), except for min_id # queries, which should order by ID for limiting, then reverse the results to be # consistent. The clearest explanation of this I've found so far is this: # https://mastodon.social/@Gargron/100846335353411164 ordering = id_field if reverse else f"-{id_field}" results = list(queryset.filter(**filters).order_by(ordering)[:limit]) if reverse: results.reverse() return PaginationResult( results=results, limit=limit, ) def bookmarks( request: HttpRequest, max_id: str | None = None, since_id: str | None = None, min_id: str | None = None, limit: int = 20, ) -> list[schemas.Status]: queryset = TimelineService(request.identity).bookmarks() paginator = MastodonPaginator() pager: PaginationResult[Post] = paginator.paginate( queryset, min_id=min_id, max_id=max_id, since_id=since_id, limit=limit, ) return PaginatingApiResponse( schemas.Status.map_from_post(pager.results, request.identity), request=request, include_params=["limit"], )
null
19,769
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import api_view from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from users.models.identity import Identity from users.services.identity import IdentityService class PaginatingApiResponse(ApiResponse[list[TI]]): """ An ApiResponse subclass that also handles pagination link headers """ def __init__( self, data: list[TI], request: HttpRequest, include_params: list[str], **kwargs, ): # Call superclass super().__init__(data, **kwargs) # Figure out if we need link headers self._request = request self.extra_params = self.filter_params(self._request, include_params) link_header = self.build_link_header() if link_header: self.headers["link"] = link_header def filter_params(request: HttpRequest, allowed_params: list[str]): params = {} for key in allowed_params: value = request.GET.get(key, None) if value: params[key] = value return params def get_part(self, data_index: int, param_name: str, rel: str) -> str | None: """ Used to get next/prev URLs """ if not self.data: return None # Use the ID of the last object for the next page start params = dict(self.extra_params) params[param_name] = self.data[data_index].id return ( "<" + self._request.build_absolute_uri(self._request.path) + "?" + urllib.parse.urlencode(params) + f'>; rel="{rel}"' ) def build_link_header(self): parts = [ entry for entry in [ self.get_part(-1, "max_id", "next"), self.get_part(0, "min_id", "prev"), ] if entry ] if not parts: return None return ", ".join(parts) class PaginationResult(Generic[T]): """ Represents a pagination result for Mastodon (it does Link header stuff) """ #: A list of objects that matched the pagination query. results: list[T] #: The actual applied limit, which may be different from what was requested. limit: int #: A list of transformed JSON objects json_results: list[dict] | None = None def empty(cls): return cls(results=[], limit=20) def next(self, request: HttpRequest, allowed_params: list[str]): """ Returns a URL to the next page of results. """ if not self.results: return None if self.json_results is None: raise ValueError("You must JSONify the results first") params = self.filter_params(request, allowed_params) params["max_id"] = self.json_results[-1]["id"] return f"{request.build_absolute_uri(request.path)}?{urllib.parse.urlencode(params)}" def prev(self, request: HttpRequest, allowed_params: list[str]): """ Returns a URL to the previous page of results. """ if not self.results: return None if self.json_results is None: raise ValueError("You must JSONify the results first") params = self.filter_params(request, allowed_params) params["min_id"] = self.json_results[0]["id"] return f"{request.build_absolute_uri(request.path)}?{urllib.parse.urlencode(params)}" def link_header(self, request: HttpRequest, allowed_params: list[str]): """ Creates a link header for the given request """ return ", ".join( ( f'<{self.next(request, allowed_params)}>; rel="next"', f'<{self.prev(request, allowed_params)}>; rel="prev"', ) ) def jsonify_results(self, map_function: Callable[[Any], Any]): """ Replaces our results with ones transformed via map_function """ self.json_results = [map_function(result) for result in self.results] def jsonify_posts(self, identity): """ Predefined way of JSON-ifying Post objects """ interactions = PostInteraction.get_post_interactions(self.results, identity) self.jsonify_results( lambda post: post.to_mastodon_json( interactions=interactions, identity=identity ) ) def jsonify_status_events(self, identity): """ Predefined way of JSON-ifying TimelineEvent objects representing statuses """ interactions = PostInteraction.get_event_interactions(self.results, identity) self.jsonify_results( lambda event: event.to_mastodon_status_json( interactions=interactions, identity=identity ) ) def jsonify_notification_events(self, identity): """ Predefined way of JSON-ifying TimelineEvent objects representing notifications """ interactions = PostInteraction.get_event_interactions(self.results, identity) self.jsonify_results( lambda event: event.to_mastodon_notification_json(interactions=interactions) ) def jsonify_identities(self): """ Predefined way of JSON-ifying Identity objects """ self.jsonify_results(lambda identity: identity.to_mastodon_json()) def filter_params(request: HttpRequest, allowed_params: list[str]): params = {} for key in allowed_params: value = request.GET.get(key, None) if value: params[key] = value return params class MastodonPaginator: """ Paginates in the Mastodon style (max_id, min_id, etc). Note that this basically _requires_ us to always do it on IDs, so we do. """ def __init__( self, default_limit: int = 20, max_limit: int = 40, ): self.default_limit = default_limit self.max_limit = max_limit def paginate( self, queryset: models.QuerySet[TM], min_id: str | None, max_id: str | None, since_id: str | None, limit: int | None, home: bool = False, ) -> PaginationResult[TM]: limit = min(limit or self.default_limit, self.max_limit) filters = {} id_field = "id" reverse = False if home: # The home timeline interleaves Post IDs and PostInteraction IDs in an # annotated field called "subject_id". id_field = "subject_id" queryset = queryset.annotate( subject_id=Case( When(type=TimelineEvent.Types.post, then=F("subject_post_id")), default=F("subject_post_interaction"), ) ) # These "does not start with interaction" checks can be removed after a # couple months, when clients have flushed them out. if max_id and not max_id.startswith("interaction"): filters[f"{id_field}__lt"] = max_id if since_id and not since_id.startswith("interaction"): filters[f"{id_field}__gt"] = since_id if min_id and not min_id.startswith("interaction"): # Min ID requires items _immediately_ newer than specified, so we # invert the ordering to accommodate filters[f"{id_field}__gt"] = min_id reverse = True # Default is to order by ID descending (newest first), except for min_id # queries, which should order by ID for limiting, then reverse the results to be # consistent. The clearest explanation of this I've found so far is this: # https://mastodon.social/@Gargron/100846335353411164 ordering = id_field if reverse else f"-{id_field}" results = list(queryset.filter(**filters).order_by(ordering)[:limit]) if reverse: results.reverse() return PaginationResult( results=results, limit=limit, ) class Identity(StatorModel): """ Represents both local and remote Fediverse identities (actors) """ class Restriction(models.IntegerChoices): none = 0 limited = 1 blocked = 2 ACTOR_TYPES = ["person", "service", "application", "group", "organization"] id = models.BigIntegerField(primary_key=True, default=Snowflake.generate_identity) # The Actor URI is essentially also a PK - we keep the default numeric # one around as well for making nice URLs etc. actor_uri = models.CharField(max_length=500, unique=True) state = StateField(IdentityStates) local = models.BooleanField(db_index=True) users = models.ManyToManyField( "users.User", related_name="identities", blank=True, ) username = models.CharField(max_length=500, blank=True, null=True) # Must be a display domain if present domain = models.ForeignKey( "users.Domain", blank=True, null=True, on_delete=models.PROTECT, related_name="identities", ) name = models.CharField(max_length=500, blank=True, null=True) summary = models.TextField(blank=True, null=True) manually_approves_followers = models.BooleanField(blank=True, null=True) discoverable = models.BooleanField(default=True) profile_uri = models.CharField(max_length=500, blank=True, null=True) inbox_uri = models.CharField(max_length=500, blank=True, null=True) shared_inbox_uri = models.CharField(max_length=500, blank=True, null=True) outbox_uri = models.CharField(max_length=500, blank=True, null=True) icon_uri = models.CharField(max_length=500, blank=True, null=True) image_uri = models.CharField(max_length=500, blank=True, null=True) followers_uri = models.CharField(max_length=500, blank=True, null=True) following_uri = models.CharField(max_length=500, blank=True, null=True) featured_collection_uri = models.CharField(max_length=500, blank=True, null=True) actor_type = models.CharField(max_length=100, default="person") icon = models.ImageField( upload_to=partial(upload_namer, "profile_images"), blank=True, null=True ) image = models.ImageField( upload_to=partial(upload_namer, "background_images"), blank=True, null=True ) # Should be a list of {"name":..., "value":...} dicts metadata = models.JSONField(blank=True, null=True) # Should be a list of object URIs (we don't want a full M2M here) pinned = models.JSONField(blank=True, null=True) # A list of other actor URIs - if this account was moved, should contain # the one URI it was moved to. aliases = models.JSONField(blank=True, null=True) # Admin-only moderation fields sensitive = models.BooleanField(default=False) restriction = models.IntegerField( choices=Restriction.choices, default=Restriction.none, db_index=True ) admin_notes = models.TextField(null=True, blank=True) private_key = models.TextField(null=True, blank=True) public_key = models.TextField(null=True, blank=True) public_key_id = models.TextField(null=True, blank=True) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) fetched = models.DateTimeField(null=True, blank=True) deleted = models.DateTimeField(null=True, blank=True) objects = IdentityManager() ### Model attributes ### class Meta: verbose_name_plural = "identities" unique_together = [("username", "domain")] indexes: list = [] # We need this so Stator can add its own class urls(urlman.Urls): view = "/@{self.username}@{self.domain_id}/" replies = "{view}replies/" settings = "{view}settings/" action = "{view}action/" followers = "{view}followers/" following = "{view}following/" search = "{view}search/" activate = "{view}activate/" admin = "/admin/identities/" admin_edit = "{admin}{self.pk}/" djadmin_edit = "/djadmin/users/identity/{self.id}/change/" def get_scheme(self, url): return "https" def get_hostname(self, url): return self.instance.domain.uri_domain def __str__(self): if self.username and self.domain_id: return self.handle return self.actor_uri def absolute_profile_uri(self): """ Returns a profile URI that is always absolute, for sending out to other servers. """ if self.local: return f"https://{self.domain.uri_domain}/@{self.username}/" else: return self.profile_uri def all_absolute_profile_uris(self) -> list[str]: """ Returns alist of profile URIs that are always absolute. For local addresses, this includes the short and long form URIs. """ if not self.local: return [self.profile_uri] return [ f"https://{self.domain.uri_domain}/@{self.username}/", f"https://{self.domain.uri_domain}/@{self.username}@{self.domain_id}/", ] def local_icon_url(self) -> RelativeAbsoluteUrl: """ Returns an icon for use by us, with fallbacks to a placeholder """ if self.icon: return RelativeAbsoluteUrl(self.icon.url) elif self.icon_uri: return ProxyAbsoluteUrl( f"/proxy/identity_icon/{self.pk}/", remote_url=self.icon_uri, ) else: return StaticAbsoluteUrl("img/unknown-icon-128.png") def local_image_url(self) -> RelativeAbsoluteUrl | None: """ Returns a background image for us, returning None if there isn't one """ if self.image: return AutoAbsoluteUrl(self.image.url) elif self.image_uri: return ProxyAbsoluteUrl( f"/proxy/identity_image/{self.pk}/", remote_url=self.image_uri, ) return None def safe_summary(self): return ContentRenderer(local=True).render_identity_summary(self.summary, self) def safe_metadata(self): renderer = ContentRenderer(local=True) if not self.metadata: return [] return [ { "name": renderer.render_identity_data(data["name"], self, strip=True), "value": renderer.render_identity_data(data["value"], self, strip=True), } for data in self.metadata ] def ensure_uris(self): """ Ensures that local identities have all the URIs populated on their fields (this lets us add new ones easily) """ if self.local: self.inbox_uri = self.actor_uri + "inbox/" self.outbox_uri = self.actor_uri + "outbox/" self.featured_collection_uri = self.actor_uri + "collections/featured/" self.followers_uri = self.actor_uri + "followers/" self.following_uri = self.actor_uri + "following/" self.shared_inbox_uri = f"https://{self.domain.uri_domain}/inbox/" def add_alias(self, actor_uri: str): self.aliases = (self.aliases or []) + [actor_uri] self.save() def remove_alias(self, actor_uri: str): self.aliases = [x for x in (self.aliases or []) if x != actor_uri] self.save() ### Alternate constructors/fetchers ### def by_handle(cls, handle, fetch: bool = False) -> Optional["Identity"]: username, domain = handle.lstrip("@").split("@", 1) return cls.by_username_and_domain(username=username, domain=domain, fetch=fetch) def by_username_and_domain( cls, username: str, domain: str | Domain, fetch: bool = False, local: bool = False, ) -> Optional["Identity"]: """ Get an Identity by username and domain. When fetch is True, a failed lookup will do a webfinger lookup to attempt to do a lookup by actor_uri, creating an Identity record if one does not exist. When local is True, lookups will be restricted to local domains. If domain is a Domain, domain.local is used instead of passsed local. """ if username.startswith("@"): raise ValueError("Username must not start with @") domain_instance = None if isinstance(domain, Domain): domain_instance = domain local = domain.local domain = domain.domain else: domain = domain.lower() domain_instance = Domain.get_domain(domain) local = domain_instance.local if domain_instance else local with transaction.atomic(): try: if local: return cls.objects.get( username__iexact=username, domain_id=domain, local=True, ) else: return cls.objects.get( username__iexact=username, domain_id=domain, ) except cls.DoesNotExist: if fetch and not local: actor_uri, handle = cls.fetch_webfinger(f"{username}@{domain}") if handle is None: return None # See if this actually does match an existing actor try: return cls.objects.get(actor_uri=actor_uri) except cls.DoesNotExist: pass # OK, make one username, domain = handle.split("@") if not domain_instance: domain_instance = Domain.get_remote_domain(domain) return cls.objects.create( actor_uri=actor_uri, username=username, domain_id=domain_instance, local=False, ) return None def by_actor_uri(cls, uri, create=False, transient=False) -> "Identity": try: return cls.objects.get(actor_uri=uri) except cls.DoesNotExist: if create: if transient: # Some code (like inbox fetching) doesn't need this saved # to the DB until the fetch succeeds return cls(actor_uri=uri, local=False) else: # parallelism may cause another simultaneous worker thread # to try to create the same identity - so use database level # constructs to avoid an integrity error identity, created = cls.objects.update_or_create( actor_uri=uri, local=False ) return identity else: raise cls.DoesNotExist(f"No identity found with actor_uri {uri}") ### Dynamic properties ### def name_or_handle(self): return self.name or self.handle def html_name_or_handle(self): """ Return the name_or_handle with any HTML substitutions made """ return ContentRenderer(local=True).render_identity_data( self.name_or_handle, self, strip=True ) def handle(self): if self.username is None: return "(unknown user)" if self.domain_id: return f"{self.username}@{self.domain_id}" return f"{self.username}@(unknown server)" def data_age(self) -> float: """ How old our copy of this data is, in seconds """ if self.local: return 0 if self.fetched is None: return 10000000000 return (timezone.now() - self.fetched).total_seconds() def outdated(self) -> bool: # TODO: Setting return self.data_age > 60 * 24 * 24 def blocked(self) -> bool: return self.restriction == self.Restriction.blocked def limited(self) -> bool: return self.restriction == self.Restriction.limited ### ActivityPub (outbound) ### def to_webfinger(self): aliases = [self.absolute_profile_uri()] actor_links = [] if self.restriction != Identity.Restriction.blocked: # Blocked users don't get a profile page actor_links.append( { "rel": "http://webfinger.net/rel/profile-page", "type": "text/html", "href": self.absolute_profile_uri(), }, ) # TODO: How to handle Restriction.limited and Restriction.blocked? # Exposing the activity+json will allow migrating off server actor_links.extend( [ { "rel": "self", "type": "application/activity+json", "href": self.actor_uri, } ] ) return { "subject": f"acct:{self.handle}", "aliases": aliases, "links": actor_links, } def to_ap(self): from activities.models import Emoji self.ensure_uris() response = { "id": self.actor_uri, "type": self.actor_type.title(), "inbox": self.inbox_uri, "outbox": self.outbox_uri, "featured": self.featured_collection_uri, "followers": self.followers_uri, "following": self.following_uri, "preferredUsername": self.username, "publicKey": { "id": self.public_key_id, "owner": self.actor_uri, "publicKeyPem": self.public_key, }, "published": self.created.strftime("%Y-%m-%dT%H:%M:%SZ"), "url": self.absolute_profile_uri(), "toot:discoverable": self.discoverable, } if self.name: response["name"] = self.name if self.summary: response["summary"] = self.summary if self.icon: response["icon"] = { "type": "Image", "mediaType": media_type_from_filename(self.icon.name), "url": self.icon.url, } if self.image: response["image"] = { "type": "Image", "mediaType": media_type_from_filename(self.image.name), "url": self.image.url, } if self.shared_inbox_uri: response["endpoints"] = { "sharedInbox": self.shared_inbox_uri, } if self.metadata: response["attachment"] = [ { "type": "PropertyValue", "name": FediverseHtmlParser(item["name"]).plain_text, "value": FediverseHtmlParser(item["value"]).html, } for item in self.metadata ] if self.aliases: response["alsoKnownAs"] = self.aliases # Emoji emojis = Emoji.emojis_from_content( (self.name or "") + " " + (self.summary or ""), None ) if emojis: response["tag"] = [] for emoji in emojis: response["tag"].append(emoji.to_ap_tag()) return response def to_ap_tag(self): """ Return this Identity as an ActivityPub Tag """ return { "href": self.actor_uri, "name": "@" + self.handle, "type": "Mention", } def to_update_ap(self): """ Returns the AP JSON to update this object """ object = self.to_ap() return { "type": "Update", "id": self.actor_uri + "#update", "actor": self.actor_uri, "object": object, } def to_delete_ap(self): """ Returns the AP JSON to delete this object """ object = self.to_ap() return { "type": "Delete", "id": self.actor_uri + "#delete", "actor": self.actor_uri, "object": object, } ### ActivityPub (inbound) ### def handle_update_ap(cls, data): """ Takes an incoming update.person message and just forces us to add it to our fetch queue (don't want to bother with two load paths right now) """ # Find by actor try: actor = cls.by_actor_uri(data["actor"]) actor.transition_perform(IdentityStates.outdated) except cls.DoesNotExist: pass def handle_delete_ap(cls, data): """ Takes an incoming update.person message and just forces us to add it to our fetch queue (don't want to bother with two load paths right now) """ # Assert that the actor matches the object if data["actor"] != data["object"]: raise ActorMismatchError( f"Actor {data['actor']} trying to delete identity {data['object']}" ) # Find by actor try: actor = cls.by_actor_uri(data["actor"]) actor.delete() except cls.DoesNotExist: pass ### Deletion ### def mark_deleted(self): """ Marks the identity and all of its related content as deleted. """ from api.models import Authorization, Token # Remove all login tokens Authorization.objects.filter(identity=self).delete() Token.objects.filter(identity=self).delete() # Remove all users from ourselves and mark deletion date self.users.set([]) self.deleted = timezone.now() self.save() # Move ourselves to deleted self.transition_perform(IdentityStates.deleted) ### Actor/Webfinger fetching ### def fetch_webfinger_url(cls, domain: str): """ Given a domain (hostname), returns the correct webfinger URL to use based on probing host-meta. """ with httpx.Client( timeout=settings.SETUP.REMOTE_TIMEOUT, headers={"User-Agent": settings.TAKAHE_USER_AGENT}, ) as client: try: response = client.get( f"https://{domain}/.well-known/host-meta", follow_redirects=True, headers={"Accept": "application/xml"}, ) # In the case of anything other than a success, we'll still try # hitting the webfinger URL on the domain we were given to handle # incorrectly setup servers. if response.status_code == 200 and response.content.strip(): tree = etree.fromstring(response.content) template = tree.xpath( "string(.//*[local-name() = 'Link' and @rel='lrdd' and (not(@type) or @type='application/jrd+json')]/@template)" ) if template: return template except (httpx.RequestError, etree.ParseError): pass return f"https://{domain}/.well-known/webfinger?resource={{uri}}" def fetch_webfinger(cls, handle: str) -> tuple[str | None, str | None]: """ Given a username@domain handle, returns a tuple of (actor uri, canonical handle) or None, None if it does not resolve. """ domain = handle.split("@")[1].lower() try: webfinger_url = cls.fetch_webfinger_url(domain) except ssl.SSLCertVerificationError: return None, None # Go make a Webfinger request with httpx.Client( timeout=settings.SETUP.REMOTE_TIMEOUT, headers={"User-Agent": settings.TAKAHE_USER_AGENT}, ) as client: try: response = client.get( webfinger_url.format(uri=f"acct:{handle}"), follow_redirects=True, headers={"Accept": "application/json"}, ) response.raise_for_status() except (httpx.HTTPError, ssl.SSLCertVerificationError) as ex: response = getattr(ex, "response", None) if isinstance(ex, httpx.TimeoutException) or ( response and response.status_code in [408, 429, 504] ): raise TryAgainLater() from ex elif ( response and response.status_code < 500 and response.status_code not in [400, 401, 403, 404, 406, 410] ): raise ValueError( f"Client error fetching webfinger: {response.status_code}", response.content, ) return None, None try: data = response.json() except ValueError: # Some servers return these with a 200 status code! if b"not found" in response.content.lower(): return None, None raise ValueError( "JSON parse error fetching webfinger", response.content, ) try: if data["subject"].startswith("acct:"): data["subject"] = data["subject"][5:] for link in data["links"]: if ( link.get("type") == "application/activity+json" and link.get("rel") == "self" ): return link["href"], data["subject"] except KeyError: # Server returning wrong payload structure pass return None, None def fetch_pinned_post_uris(cls, uri: str) -> list[str]: """ Fetch an identity's featured collection. """ with httpx.Client( timeout=settings.SETUP.REMOTE_TIMEOUT, headers={"User-Agent": settings.TAKAHE_USER_AGENT}, ) as client: try: response = client.get( uri, follow_redirects=True, headers={"Accept": "application/activity+json"}, ) response.raise_for_status() except (httpx.HTTPError, ssl.SSLCertVerificationError) as ex: response = getattr(ex, "response", None) if isinstance(ex, httpx.TimeoutException) or ( response and response.status_code in [408, 429, 504] ): raise TryAgainLater() from ex elif ( response and response.status_code < 500 and response.status_code not in [401, 403, 404, 406, 410] ): raise ValueError( f"Client error fetching featured collection: {response.status_code}", response.content, ) return [] try: data = canonicalise(response.json(), include_security=True) items: list[dict | str] = [] if "orderedItems" in data: items = list(reversed(data["orderedItems"])) elif "items" in data: items = list(data["items"]) ids = [] for item in items: if not isinstance(item, dict): continue post_obj: dict | None = item if item["type"] in ["Create", "Update"]: post_obj = item.get("object") if post_obj: ids.append(post_obj["id"]) return ids except ValueError: # Some servers return these with a 200 status code! if b"not found" in response.content.lower(): return [] raise ValueError( "JSON parse error fetching featured collection", response.content, ) def fetch_actor(self) -> bool: """ Fetches the user's actor information, as well as their domain from webfinger if it's available. """ from activities.models import Emoji if self.local: raise ValueError("Cannot fetch local identities") try: response = SystemActor().signed_request( method="get", uri=self.actor_uri, ) except httpx.TimeoutException: raise TryAgainLater() except (httpx.RequestError, ssl.SSLCertVerificationError): return False content_type = response.headers.get("content-type") if content_type and "html" in content_type: # Some servers don't properly handle "application/activity+json" return False status_code = response.status_code if status_code >= 400: if status_code in [408, 429, 504]: raise TryAgainLater() if status_code == 410 and self.pk: # Their account got deleted, so let's do the same. Identity.objects.filter(pk=self.pk).delete() if status_code < 500 and status_code not in [401, 403, 404, 406, 410]: logger.info( "Client error fetching actor: %d %s", status_code, self.actor_uri ) return False json_data = json_from_response(response) if not json_data: return False try: document = canonicalise(json_data, include_security=True) except ValueError: # servers with empty or invalid responses are inevitable logger.info( "Invalid response fetching actor %s", self.actor_uri, extra={ "content": response.content, }, ) return False if "type" not in document: return False self.name = document.get("name") self.profile_uri = document.get("url") self.inbox_uri = document.get("inbox") self.outbox_uri = document.get("outbox") self.followers_uri = document.get("followers") self.following_uri = document.get("following") self.featured_collection_uri = document.get("featured") self.actor_type = document["type"].lower() self.shared_inbox_uri = document.get("endpoints", {}).get("sharedInbox") self.summary = document.get("summary") self.username = document.get("preferredUsername") if self.username and "@value" in self.username: self.username = self.username["@value"] if self.username: self.username = self.username self.manually_approves_followers = document.get("manuallyApprovesFollowers") self.public_key = document.get("publicKey", {}).get("publicKeyPem") self.public_key_id = document.get("publicKey", {}).get("id") # Sometimes the public key PEM is in a language construct? if isinstance(self.public_key, dict): self.public_key = self.public_key["@value"] self.icon_uri = get_first_image_url(document.get("icon", None)) self.image_uri = get_first_image_url(document.get("image", None)) self.discoverable = document.get("toot:discoverable", True) # Profile links/metadata self.metadata = [] for attachment in get_list(document, "attachment"): if ( attachment["type"] == "PropertyValue" and "name" in attachment and "value" in attachment ): self.metadata.append( { "name": attachment["name"], "value": FediverseHtmlParser(attachment["value"]).html, } ) # Now go do webfinger with that info to see if we can get a canonical domain actor_url_parts = urlparse(self.actor_uri) self.domain = Domain.get_remote_domain(actor_url_parts.hostname) if self.username: try: webfinger_actor, webfinger_handle = self.fetch_webfinger( f"{self.username}@{actor_url_parts.hostname}" ) if webfinger_handle: webfinger_username, webfinger_domain = webfinger_handle.split("@") self.username = webfinger_username self.domain = Domain.get_remote_domain(webfinger_domain) except TryAgainLater: # continue with original domain when webfinger times out logger.info("WebFinger timed out: %s", self.actor_uri) pass except ValueError as exc: logger.info( "Can't parse WebFinger: %s %s", exc.args[0], self.actor_uri, exc_info=exc, ) return False # Emojis (we need the domain so we do them here) for tag in get_list(document, "tag"): if tag["type"].lower() in ["toot:emoji", "emoji"]: Emoji.by_ap_tag(self.domain, tag, create=True) # Mark as fetched self.fetched = timezone.now() try: with transaction.atomic(): # if we don't wrap this in its own transaction, the exception # handler is guaranteed to fail self.save() except IntegrityError as e: # See if we can fetch a PK and save there if self.pk is None: try: other_row = Identity.objects.get(actor_uri=self.actor_uri) except Identity.DoesNotExist: raise ValueError( f"Could not save Identity at end of actor fetch: {e}" ) self.pk: int | None = other_row.pk with transaction.atomic(): self.save() # Fetch pinned posts in a followup task if self.featured_collection_uri: InboxMessage.create_internal( { "type": "SyncPins", "identity": self.pk, } ) return True ### OpenGraph API ### def to_opengraph_dict(self) -> dict: return { "og:title": f"{self.name} (@{self.handle})", "og:type": "profile", "og:description": self.summary, "og:profile:username": self.handle, "og:image:url": self.local_icon_url().absolute, "og:image:height": 85, "og:image:width": 85, } ### Mastodon Client API ### def to_mastodon_mention_json(self): return { "id": self.id, "username": self.username or "", "url": self.absolute_profile_uri() or "", "acct": self.handle or "", } def to_mastodon_json(self, source=False, include_counts=True): from activities.models import Emoji, Post header_image = self.local_image_url() missing = StaticAbsoluteUrl("img/missing.png").absolute metadata_value_text = ( " ".join([m["value"] for m in self.metadata]) if self.metadata else "" ) emojis = Emoji.emojis_from_content( f"{self.name} {self.summary} {metadata_value_text}", self.domain ) renderer = ContentRenderer(local=False) result = { "id": self.pk, "username": self.username or "", "acct": self.username if source else self.handle, "url": self.absolute_profile_uri() or "", "display_name": self.name or "", "note": self.summary or "", "avatar": self.local_icon_url().absolute, "avatar_static": self.local_icon_url().absolute, "header": header_image.absolute if header_image else missing, "header_static": header_image.absolute if header_image else missing, "locked": bool(self.manually_approves_followers), "fields": ( [ { "name": m["name"], "value": renderer.render_identity_data(m["value"], self), "verified_at": None, } for m in self.metadata ] if self.metadata else [] ), "emojis": [emoji.to_mastodon_json() for emoji in emojis], "bot": self.actor_type.lower() in ["service", "application"], "group": self.actor_type.lower() == "group", "discoverable": self.discoverable, "suspended": False, "limited": False, "created_at": format_ld_date( self.created.replace(hour=0, minute=0, second=0, microsecond=0) ), "last_status_at": None, # TODO: populate "statuses_count": self.posts.count() if include_counts else 0, "followers_count": self.inbound_follows.count() if include_counts else 0, "following_count": self.outbound_follows.count() if include_counts else 0, } if source: privacy_map = { Post.Visibilities.public: "public", Post.Visibilities.unlisted: "unlisted", Post.Visibilities.local_only: "unlisted", Post.Visibilities.followers: "private", Post.Visibilities.mentioned: "direct", } result["source"] = { "note": FediverseHtmlParser(self.summary).plain_text if self.summary else "", "fields": ( [ { "name": m["name"], "value": FediverseHtmlParser(m["value"]).plain_text, "verified_at": None, } for m in self.metadata ] if self.metadata else [] ), "privacy": privacy_map[ Config.load_identity(self).default_post_visibility ], "sensitive": False, "language": "unk", "follow_requests_count": 0, } return result ### Cryptography ### def signed_request( self, method: Literal["get", "post"], uri: str, body: dict | None = None, ): """ Performs a signed request on behalf of the System Actor. """ return HttpSignature.signed_request( method=method, uri=uri, body=body, private_key=self.private_key, key_id=self.public_key_id, ) def generate_keypair(self): if not self.local: raise ValueError("Cannot generate keypair for remote user") self.private_key, self.public_key = RsaKeys.generate_keypair() self.public_key_id = self.actor_uri + "#main-key" self.save() ### Config ### def config_identity(self) -> Config.IdentityOptions: return Config.load_identity(self) def lazy_config_value(self, key: str): """ Lazily load a config value for this Identity """ if key not in Config.IdentityOptions.__fields__: raise KeyError(f"Undefined IdentityOption for {key}") return lazy(lambda: getattr(self.config_identity, key)) class IdentityService: """ High-level helper methods for doing things to identities """ def __init__(self, identity: Identity): self.identity = identity def create( cls, user: User, username: str, domain: Domain, name: str, discoverable: bool = True, ) -> Identity: identity = Identity.objects.create( actor_uri=f"https://{domain.uri_domain}/@{username}@{domain.domain}/", username=username, domain=domain, name=name, local=True, discoverable=discoverable, ) identity.users.add(user) identity.generate_keypair() # Send fanouts to all admin identities for admin_identity in cls.admin_identities(): FanOut.objects.create( type=FanOut.Types.identity_created, identity=admin_identity, subject_identity=identity, ) return identity def admin_identities(cls) -> models.QuerySet[Identity]: return Identity.objects.filter(users__admin=True).distinct() def following(self) -> models.QuerySet[Identity]: return ( Identity.objects.filter( inbound_follows__source=self.identity, inbound_follows__state__in=FollowStates.group_active(), ) .not_deleted() .distinct() .order_by("username") .select_related("domain") ) def followers(self) -> models.QuerySet[Identity]: return ( Identity.objects.filter( outbound_follows__target=self.identity, outbound_follows__state=FollowStates.accepted, ) .not_deleted() .distinct() .order_by("username") .select_related("domain") ) def follow_requests(self) -> models.QuerySet[Identity]: return ( Identity.objects.filter( outbound_follows__target=self.identity, outbound_follows__state=FollowStates.pending_approval, ) .not_deleted() .distinct() .order_by("username") .select_related("domain") ) def accept_follow_request(self, source_identity): existing_follow = Follow.maybe_get(source_identity, self.identity) if existing_follow: existing_follow.transition_perform(FollowStates.accepting) def reject_follow_request(self, source_identity): existing_follow = Follow.maybe_get(source_identity, self.identity) if existing_follow: existing_follow.transition_perform(FollowStates.rejecting) def follow(self, target_identity: Identity, boosts=True) -> Follow: """ Follows a user (or does nothing if already followed). Returns the follow. """ if target_identity == self.identity: raise ValueError("You cannot follow yourself") return Follow.create_local(self.identity, target_identity, boosts=boosts) def unfollow(self, target_identity: Identity): """ Unfollows a user (or does nothing if not followed). """ if target_identity == self.identity: raise ValueError("You cannot unfollow yourself") existing_follow = Follow.maybe_get(self.identity, target_identity) if existing_follow: existing_follow.transition_perform(FollowStates.undone) InboxMessage.create_internal( { "type": "ClearTimeline", "object": target_identity.pk, "actor": self.identity.pk, } ) def block(self, target_identity: Identity) -> Block: """ Blocks a user. """ if target_identity == self.identity: raise ValueError("You cannot block yourself") self.unfollow(target_identity) self.reject_follow_request(target_identity) block = Block.create_local_block(self.identity, target_identity) InboxMessage.create_internal( { "type": "ClearTimeline", "actor": self.identity.pk, "object": target_identity.pk, "fullErase": True, } ) return block def unblock(self, target_identity: Identity): """ Unlocks a user """ if target_identity == self.identity: raise ValueError("You cannot unblock yourself") existing_block = Block.maybe_get(self.identity, target_identity, mute=False) if existing_block and existing_block.active: existing_block.transition_perform(BlockStates.undone) def mute( self, target_identity: Identity, duration: int = 0, include_notifications: bool = False, ) -> Block: """ Mutes a user. """ if target_identity == self.identity: raise ValueError("You cannot mute yourself") return Block.create_local_mute( self.identity, target_identity, duration=duration or None, include_notifications=include_notifications, ) def unmute(self, target_identity: Identity): """ Unmutes a user """ if target_identity == self.identity: raise ValueError("You cannot unmute yourself") existing_block = Block.maybe_get(self.identity, target_identity, mute=True) if existing_block and existing_block.active: existing_block.transition_perform(BlockStates.undone) def relationships(self, from_identity: Identity): """ Returns a dict of any active relationships from the given identity. """ return { "outbound_follow": Follow.maybe_get( from_identity, self.identity, require_active=True ), "inbound_follow": Follow.maybe_get( self.identity, from_identity, require_active=True ), "outbound_block": Block.maybe_get( from_identity, self.identity, mute=False, require_active=True ), "inbound_block": Block.maybe_get( self.identity, from_identity, mute=False, require_active=True ), "outbound_mute": Block.maybe_get( from_identity, self.identity, mute=True, require_active=True ), } def sync_pins(self, object_uris): if not object_uris or self.identity.domain.blocked: return with transaction.atomic(): for object_uri in object_uris: try: post = Post.by_object_uri(object_uri, fetch=True) PostInteraction.objects.get_or_create( type=PostInteraction.Types.pin, identity=self.identity, post=post, state__in=PostInteractionStates.group_active(), ) except MultipleObjectsReturned as exc: logger.exception("%s on %s", exc, object_uri) pass except Post.DoesNotExist: # ignore 404s... pass except TryAgainLater: # don't wait for it now, it'll be synced on next refresh pass for removed in PostInteraction.objects.filter( type=PostInteraction.Types.pin, identity=self.identity, state__in=PostInteractionStates.group_active(), ).exclude(post__object_uri__in=object_uris): removed.transition_perform(PostInteractionStates.undone_fanned_out) def mastodon_json_relationship(self, from_identity: Identity): """ Returns a Relationship object for the from_identity's relationship with this identity. """ relationships = self.relationships(from_identity) return { "id": self.identity.pk, "following": relationships["outbound_follow"] is not None and relationships["outbound_follow"].accepted, "followed_by": relationships["inbound_follow"] is not None and relationships["inbound_follow"].accepted, "showing_reblogs": ( relationships["outbound_follow"] and relationships["outbound_follow"].boosts or False ), "notifying": False, "blocking": relationships["outbound_block"] is not None, "blocked_by": relationships["inbound_block"] is not None, "muting": relationships["outbound_mute"] is not None, "muting_notifications": False, "requested": relationships["outbound_follow"] is not None and relationships["outbound_follow"].state == FollowStates.pending_approval, "domain_blocking": False, "endorsed": False, "note": ( relationships["outbound_follow"] and relationships["outbound_follow"].note or "" ), } def set_summary(self, summary: str): """ Safely sets a summary and turns linebreaks into HTML """ if summary: self.identity.summary = FediverseHtmlParser(linebreaks_filter(summary)).html else: self.identity.summary = None self.identity.save() def set_icon(self, file): """ Sets the user's avatar image """ self.identity.icon.save( file.name, resize_image(file, size=(400, 400)), ) def set_image(self, file): """ Sets the user's header image """ self.identity.image.save( file.name, resize_image(file, size=(1500, 500)), ) def handle_internal_add_follow(cls, payload): """ Handles an inbox message saying we need to follow a handle Message format: { "type": "AddFollow", "source": "90310938129083", "target_handle": "andrew@aeracode.org", "boosts": true, } """ # Retrieve ourselves self = cls(Identity.objects.get(pk=payload["source"])) # Get the remote end (may need a fetch) username, domain = payload["target_handle"].split("@") target_identity = Identity.by_username_and_domain(username, domain, fetch=True) if target_identity is None: raise ValueError(f"Cannot find identity to follow: {target_identity}") # Follow! self.follow(target_identity=target_identity, boosts=payload.get("boosts", True)) def handle_internal_sync_pins(cls, payload): """ Handles an inbox message saying we need to sync featured posts Message format: { "type": "SyncPins", "identity": "90310938129083", } """ # Retrieve ourselves actor = Identity.objects.get(pk=payload["identity"]) self = cls(actor) # Get the remote end (may need a fetch) if actor.featured_collection_uri: featured = actor.fetch_pinned_post_uris(actor.featured_collection_uri) self.sync_pins(featured) def follow_requests( request: HttpRequest, max_id: str | None = None, since_id: str | None = None, min_id: str | None = None, limit: int = 40, ) -> list[schemas.Account]: service = IdentityService(request.identity) paginator = MastodonPaginator(max_limit=80) pager: PaginationResult[Identity] = paginator.paginate( service.follow_requests(), min_id=min_id, max_id=max_id, since_id=since_id, limit=limit, ) return PaginatingApiResponse( [schemas.Account.from_identity(i) for i in pager.results], request=request, include_params=["limit"], )
null
19,770
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import api_view from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from users.models.identity import Identity from users.services.identity import IdentityService class Identity(StatorModel): """ Represents both local and remote Fediverse identities (actors) """ class Restriction(models.IntegerChoices): none = 0 limited = 1 blocked = 2 ACTOR_TYPES = ["person", "service", "application", "group", "organization"] id = models.BigIntegerField(primary_key=True, default=Snowflake.generate_identity) # The Actor URI is essentially also a PK - we keep the default numeric # one around as well for making nice URLs etc. actor_uri = models.CharField(max_length=500, unique=True) state = StateField(IdentityStates) local = models.BooleanField(db_index=True) users = models.ManyToManyField( "users.User", related_name="identities", blank=True, ) username = models.CharField(max_length=500, blank=True, null=True) # Must be a display domain if present domain = models.ForeignKey( "users.Domain", blank=True, null=True, on_delete=models.PROTECT, related_name="identities", ) name = models.CharField(max_length=500, blank=True, null=True) summary = models.TextField(blank=True, null=True) manually_approves_followers = models.BooleanField(blank=True, null=True) discoverable = models.BooleanField(default=True) profile_uri = models.CharField(max_length=500, blank=True, null=True) inbox_uri = models.CharField(max_length=500, blank=True, null=True) shared_inbox_uri = models.CharField(max_length=500, blank=True, null=True) outbox_uri = models.CharField(max_length=500, blank=True, null=True) icon_uri = models.CharField(max_length=500, blank=True, null=True) image_uri = models.CharField(max_length=500, blank=True, null=True) followers_uri = models.CharField(max_length=500, blank=True, null=True) following_uri = models.CharField(max_length=500, blank=True, null=True) featured_collection_uri = models.CharField(max_length=500, blank=True, null=True) actor_type = models.CharField(max_length=100, default="person") icon = models.ImageField( upload_to=partial(upload_namer, "profile_images"), blank=True, null=True ) image = models.ImageField( upload_to=partial(upload_namer, "background_images"), blank=True, null=True ) # Should be a list of {"name":..., "value":...} dicts metadata = models.JSONField(blank=True, null=True) # Should be a list of object URIs (we don't want a full M2M here) pinned = models.JSONField(blank=True, null=True) # A list of other actor URIs - if this account was moved, should contain # the one URI it was moved to. aliases = models.JSONField(blank=True, null=True) # Admin-only moderation fields sensitive = models.BooleanField(default=False) restriction = models.IntegerField( choices=Restriction.choices, default=Restriction.none, db_index=True ) admin_notes = models.TextField(null=True, blank=True) private_key = models.TextField(null=True, blank=True) public_key = models.TextField(null=True, blank=True) public_key_id = models.TextField(null=True, blank=True) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) fetched = models.DateTimeField(null=True, blank=True) deleted = models.DateTimeField(null=True, blank=True) objects = IdentityManager() ### Model attributes ### class Meta: verbose_name_plural = "identities" unique_together = [("username", "domain")] indexes: list = [] # We need this so Stator can add its own class urls(urlman.Urls): view = "/@{self.username}@{self.domain_id}/" replies = "{view}replies/" settings = "{view}settings/" action = "{view}action/" followers = "{view}followers/" following = "{view}following/" search = "{view}search/" activate = "{view}activate/" admin = "/admin/identities/" admin_edit = "{admin}{self.pk}/" djadmin_edit = "/djadmin/users/identity/{self.id}/change/" def get_scheme(self, url): return "https" def get_hostname(self, url): return self.instance.domain.uri_domain def __str__(self): if self.username and self.domain_id: return self.handle return self.actor_uri def absolute_profile_uri(self): """ Returns a profile URI that is always absolute, for sending out to other servers. """ if self.local: return f"https://{self.domain.uri_domain}/@{self.username}/" else: return self.profile_uri def all_absolute_profile_uris(self) -> list[str]: """ Returns alist of profile URIs that are always absolute. For local addresses, this includes the short and long form URIs. """ if not self.local: return [self.profile_uri] return [ f"https://{self.domain.uri_domain}/@{self.username}/", f"https://{self.domain.uri_domain}/@{self.username}@{self.domain_id}/", ] def local_icon_url(self) -> RelativeAbsoluteUrl: """ Returns an icon for use by us, with fallbacks to a placeholder """ if self.icon: return RelativeAbsoluteUrl(self.icon.url) elif self.icon_uri: return ProxyAbsoluteUrl( f"/proxy/identity_icon/{self.pk}/", remote_url=self.icon_uri, ) else: return StaticAbsoluteUrl("img/unknown-icon-128.png") def local_image_url(self) -> RelativeAbsoluteUrl | None: """ Returns a background image for us, returning None if there isn't one """ if self.image: return AutoAbsoluteUrl(self.image.url) elif self.image_uri: return ProxyAbsoluteUrl( f"/proxy/identity_image/{self.pk}/", remote_url=self.image_uri, ) return None def safe_summary(self): return ContentRenderer(local=True).render_identity_summary(self.summary, self) def safe_metadata(self): renderer = ContentRenderer(local=True) if not self.metadata: return [] return [ { "name": renderer.render_identity_data(data["name"], self, strip=True), "value": renderer.render_identity_data(data["value"], self, strip=True), } for data in self.metadata ] def ensure_uris(self): """ Ensures that local identities have all the URIs populated on their fields (this lets us add new ones easily) """ if self.local: self.inbox_uri = self.actor_uri + "inbox/" self.outbox_uri = self.actor_uri + "outbox/" self.featured_collection_uri = self.actor_uri + "collections/featured/" self.followers_uri = self.actor_uri + "followers/" self.following_uri = self.actor_uri + "following/" self.shared_inbox_uri = f"https://{self.domain.uri_domain}/inbox/" def add_alias(self, actor_uri: str): self.aliases = (self.aliases or []) + [actor_uri] self.save() def remove_alias(self, actor_uri: str): self.aliases = [x for x in (self.aliases or []) if x != actor_uri] self.save() ### Alternate constructors/fetchers ### def by_handle(cls, handle, fetch: bool = False) -> Optional["Identity"]: username, domain = handle.lstrip("@").split("@", 1) return cls.by_username_and_domain(username=username, domain=domain, fetch=fetch) def by_username_and_domain( cls, username: str, domain: str | Domain, fetch: bool = False, local: bool = False, ) -> Optional["Identity"]: """ Get an Identity by username and domain. When fetch is True, a failed lookup will do a webfinger lookup to attempt to do a lookup by actor_uri, creating an Identity record if one does not exist. When local is True, lookups will be restricted to local domains. If domain is a Domain, domain.local is used instead of passsed local. """ if username.startswith("@"): raise ValueError("Username must not start with @") domain_instance = None if isinstance(domain, Domain): domain_instance = domain local = domain.local domain = domain.domain else: domain = domain.lower() domain_instance = Domain.get_domain(domain) local = domain_instance.local if domain_instance else local with transaction.atomic(): try: if local: return cls.objects.get( username__iexact=username, domain_id=domain, local=True, ) else: return cls.objects.get( username__iexact=username, domain_id=domain, ) except cls.DoesNotExist: if fetch and not local: actor_uri, handle = cls.fetch_webfinger(f"{username}@{domain}") if handle is None: return None # See if this actually does match an existing actor try: return cls.objects.get(actor_uri=actor_uri) except cls.DoesNotExist: pass # OK, make one username, domain = handle.split("@") if not domain_instance: domain_instance = Domain.get_remote_domain(domain) return cls.objects.create( actor_uri=actor_uri, username=username, domain_id=domain_instance, local=False, ) return None def by_actor_uri(cls, uri, create=False, transient=False) -> "Identity": try: return cls.objects.get(actor_uri=uri) except cls.DoesNotExist: if create: if transient: # Some code (like inbox fetching) doesn't need this saved # to the DB until the fetch succeeds return cls(actor_uri=uri, local=False) else: # parallelism may cause another simultaneous worker thread # to try to create the same identity - so use database level # constructs to avoid an integrity error identity, created = cls.objects.update_or_create( actor_uri=uri, local=False ) return identity else: raise cls.DoesNotExist(f"No identity found with actor_uri {uri}") ### Dynamic properties ### def name_or_handle(self): return self.name or self.handle def html_name_or_handle(self): """ Return the name_or_handle with any HTML substitutions made """ return ContentRenderer(local=True).render_identity_data( self.name_or_handle, self, strip=True ) def handle(self): if self.username is None: return "(unknown user)" if self.domain_id: return f"{self.username}@{self.domain_id}" return f"{self.username}@(unknown server)" def data_age(self) -> float: """ How old our copy of this data is, in seconds """ if self.local: return 0 if self.fetched is None: return 10000000000 return (timezone.now() - self.fetched).total_seconds() def outdated(self) -> bool: # TODO: Setting return self.data_age > 60 * 24 * 24 def blocked(self) -> bool: return self.restriction == self.Restriction.blocked def limited(self) -> bool: return self.restriction == self.Restriction.limited ### ActivityPub (outbound) ### def to_webfinger(self): aliases = [self.absolute_profile_uri()] actor_links = [] if self.restriction != Identity.Restriction.blocked: # Blocked users don't get a profile page actor_links.append( { "rel": "http://webfinger.net/rel/profile-page", "type": "text/html", "href": self.absolute_profile_uri(), }, ) # TODO: How to handle Restriction.limited and Restriction.blocked? # Exposing the activity+json will allow migrating off server actor_links.extend( [ { "rel": "self", "type": "application/activity+json", "href": self.actor_uri, } ] ) return { "subject": f"acct:{self.handle}", "aliases": aliases, "links": actor_links, } def to_ap(self): from activities.models import Emoji self.ensure_uris() response = { "id": self.actor_uri, "type": self.actor_type.title(), "inbox": self.inbox_uri, "outbox": self.outbox_uri, "featured": self.featured_collection_uri, "followers": self.followers_uri, "following": self.following_uri, "preferredUsername": self.username, "publicKey": { "id": self.public_key_id, "owner": self.actor_uri, "publicKeyPem": self.public_key, }, "published": self.created.strftime("%Y-%m-%dT%H:%M:%SZ"), "url": self.absolute_profile_uri(), "toot:discoverable": self.discoverable, } if self.name: response["name"] = self.name if self.summary: response["summary"] = self.summary if self.icon: response["icon"] = { "type": "Image", "mediaType": media_type_from_filename(self.icon.name), "url": self.icon.url, } if self.image: response["image"] = { "type": "Image", "mediaType": media_type_from_filename(self.image.name), "url": self.image.url, } if self.shared_inbox_uri: response["endpoints"] = { "sharedInbox": self.shared_inbox_uri, } if self.metadata: response["attachment"] = [ { "type": "PropertyValue", "name": FediverseHtmlParser(item["name"]).plain_text, "value": FediverseHtmlParser(item["value"]).html, } for item in self.metadata ] if self.aliases: response["alsoKnownAs"] = self.aliases # Emoji emojis = Emoji.emojis_from_content( (self.name or "") + " " + (self.summary or ""), None ) if emojis: response["tag"] = [] for emoji in emojis: response["tag"].append(emoji.to_ap_tag()) return response def to_ap_tag(self): """ Return this Identity as an ActivityPub Tag """ return { "href": self.actor_uri, "name": "@" + self.handle, "type": "Mention", } def to_update_ap(self): """ Returns the AP JSON to update this object """ object = self.to_ap() return { "type": "Update", "id": self.actor_uri + "#update", "actor": self.actor_uri, "object": object, } def to_delete_ap(self): """ Returns the AP JSON to delete this object """ object = self.to_ap() return { "type": "Delete", "id": self.actor_uri + "#delete", "actor": self.actor_uri, "object": object, } ### ActivityPub (inbound) ### def handle_update_ap(cls, data): """ Takes an incoming update.person message and just forces us to add it to our fetch queue (don't want to bother with two load paths right now) """ # Find by actor try: actor = cls.by_actor_uri(data["actor"]) actor.transition_perform(IdentityStates.outdated) except cls.DoesNotExist: pass def handle_delete_ap(cls, data): """ Takes an incoming update.person message and just forces us to add it to our fetch queue (don't want to bother with two load paths right now) """ # Assert that the actor matches the object if data["actor"] != data["object"]: raise ActorMismatchError( f"Actor {data['actor']} trying to delete identity {data['object']}" ) # Find by actor try: actor = cls.by_actor_uri(data["actor"]) actor.delete() except cls.DoesNotExist: pass ### Deletion ### def mark_deleted(self): """ Marks the identity and all of its related content as deleted. """ from api.models import Authorization, Token # Remove all login tokens Authorization.objects.filter(identity=self).delete() Token.objects.filter(identity=self).delete() # Remove all users from ourselves and mark deletion date self.users.set([]) self.deleted = timezone.now() self.save() # Move ourselves to deleted self.transition_perform(IdentityStates.deleted) ### Actor/Webfinger fetching ### def fetch_webfinger_url(cls, domain: str): """ Given a domain (hostname), returns the correct webfinger URL to use based on probing host-meta. """ with httpx.Client( timeout=settings.SETUP.REMOTE_TIMEOUT, headers={"User-Agent": settings.TAKAHE_USER_AGENT}, ) as client: try: response = client.get( f"https://{domain}/.well-known/host-meta", follow_redirects=True, headers={"Accept": "application/xml"}, ) # In the case of anything other than a success, we'll still try # hitting the webfinger URL on the domain we were given to handle # incorrectly setup servers. if response.status_code == 200 and response.content.strip(): tree = etree.fromstring(response.content) template = tree.xpath( "string(.//*[local-name() = 'Link' and @rel='lrdd' and (not(@type) or @type='application/jrd+json')]/@template)" ) if template: return template except (httpx.RequestError, etree.ParseError): pass return f"https://{domain}/.well-known/webfinger?resource={{uri}}" def fetch_webfinger(cls, handle: str) -> tuple[str | None, str | None]: """ Given a username@domain handle, returns a tuple of (actor uri, canonical handle) or None, None if it does not resolve. """ domain = handle.split("@")[1].lower() try: webfinger_url = cls.fetch_webfinger_url(domain) except ssl.SSLCertVerificationError: return None, None # Go make a Webfinger request with httpx.Client( timeout=settings.SETUP.REMOTE_TIMEOUT, headers={"User-Agent": settings.TAKAHE_USER_AGENT}, ) as client: try: response = client.get( webfinger_url.format(uri=f"acct:{handle}"), follow_redirects=True, headers={"Accept": "application/json"}, ) response.raise_for_status() except (httpx.HTTPError, ssl.SSLCertVerificationError) as ex: response = getattr(ex, "response", None) if isinstance(ex, httpx.TimeoutException) or ( response and response.status_code in [408, 429, 504] ): raise TryAgainLater() from ex elif ( response and response.status_code < 500 and response.status_code not in [400, 401, 403, 404, 406, 410] ): raise ValueError( f"Client error fetching webfinger: {response.status_code}", response.content, ) return None, None try: data = response.json() except ValueError: # Some servers return these with a 200 status code! if b"not found" in response.content.lower(): return None, None raise ValueError( "JSON parse error fetching webfinger", response.content, ) try: if data["subject"].startswith("acct:"): data["subject"] = data["subject"][5:] for link in data["links"]: if ( link.get("type") == "application/activity+json" and link.get("rel") == "self" ): return link["href"], data["subject"] except KeyError: # Server returning wrong payload structure pass return None, None def fetch_pinned_post_uris(cls, uri: str) -> list[str]: """ Fetch an identity's featured collection. """ with httpx.Client( timeout=settings.SETUP.REMOTE_TIMEOUT, headers={"User-Agent": settings.TAKAHE_USER_AGENT}, ) as client: try: response = client.get( uri, follow_redirects=True, headers={"Accept": "application/activity+json"}, ) response.raise_for_status() except (httpx.HTTPError, ssl.SSLCertVerificationError) as ex: response = getattr(ex, "response", None) if isinstance(ex, httpx.TimeoutException) or ( response and response.status_code in [408, 429, 504] ): raise TryAgainLater() from ex elif ( response and response.status_code < 500 and response.status_code not in [401, 403, 404, 406, 410] ): raise ValueError( f"Client error fetching featured collection: {response.status_code}", response.content, ) return [] try: data = canonicalise(response.json(), include_security=True) items: list[dict | str] = [] if "orderedItems" in data: items = list(reversed(data["orderedItems"])) elif "items" in data: items = list(data["items"]) ids = [] for item in items: if not isinstance(item, dict): continue post_obj: dict | None = item if item["type"] in ["Create", "Update"]: post_obj = item.get("object") if post_obj: ids.append(post_obj["id"]) return ids except ValueError: # Some servers return these with a 200 status code! if b"not found" in response.content.lower(): return [] raise ValueError( "JSON parse error fetching featured collection", response.content, ) def fetch_actor(self) -> bool: """ Fetches the user's actor information, as well as their domain from webfinger if it's available. """ from activities.models import Emoji if self.local: raise ValueError("Cannot fetch local identities") try: response = SystemActor().signed_request( method="get", uri=self.actor_uri, ) except httpx.TimeoutException: raise TryAgainLater() except (httpx.RequestError, ssl.SSLCertVerificationError): return False content_type = response.headers.get("content-type") if content_type and "html" in content_type: # Some servers don't properly handle "application/activity+json" return False status_code = response.status_code if status_code >= 400: if status_code in [408, 429, 504]: raise TryAgainLater() if status_code == 410 and self.pk: # Their account got deleted, so let's do the same. Identity.objects.filter(pk=self.pk).delete() if status_code < 500 and status_code not in [401, 403, 404, 406, 410]: logger.info( "Client error fetching actor: %d %s", status_code, self.actor_uri ) return False json_data = json_from_response(response) if not json_data: return False try: document = canonicalise(json_data, include_security=True) except ValueError: # servers with empty or invalid responses are inevitable logger.info( "Invalid response fetching actor %s", self.actor_uri, extra={ "content": response.content, }, ) return False if "type" not in document: return False self.name = document.get("name") self.profile_uri = document.get("url") self.inbox_uri = document.get("inbox") self.outbox_uri = document.get("outbox") self.followers_uri = document.get("followers") self.following_uri = document.get("following") self.featured_collection_uri = document.get("featured") self.actor_type = document["type"].lower() self.shared_inbox_uri = document.get("endpoints", {}).get("sharedInbox") self.summary = document.get("summary") self.username = document.get("preferredUsername") if self.username and "@value" in self.username: self.username = self.username["@value"] if self.username: self.username = self.username self.manually_approves_followers = document.get("manuallyApprovesFollowers") self.public_key = document.get("publicKey", {}).get("publicKeyPem") self.public_key_id = document.get("publicKey", {}).get("id") # Sometimes the public key PEM is in a language construct? if isinstance(self.public_key, dict): self.public_key = self.public_key["@value"] self.icon_uri = get_first_image_url(document.get("icon", None)) self.image_uri = get_first_image_url(document.get("image", None)) self.discoverable = document.get("toot:discoverable", True) # Profile links/metadata self.metadata = [] for attachment in get_list(document, "attachment"): if ( attachment["type"] == "PropertyValue" and "name" in attachment and "value" in attachment ): self.metadata.append( { "name": attachment["name"], "value": FediverseHtmlParser(attachment["value"]).html, } ) # Now go do webfinger with that info to see if we can get a canonical domain actor_url_parts = urlparse(self.actor_uri) self.domain = Domain.get_remote_domain(actor_url_parts.hostname) if self.username: try: webfinger_actor, webfinger_handle = self.fetch_webfinger( f"{self.username}@{actor_url_parts.hostname}" ) if webfinger_handle: webfinger_username, webfinger_domain = webfinger_handle.split("@") self.username = webfinger_username self.domain = Domain.get_remote_domain(webfinger_domain) except TryAgainLater: # continue with original domain when webfinger times out logger.info("WebFinger timed out: %s", self.actor_uri) pass except ValueError as exc: logger.info( "Can't parse WebFinger: %s %s", exc.args[0], self.actor_uri, exc_info=exc, ) return False # Emojis (we need the domain so we do them here) for tag in get_list(document, "tag"): if tag["type"].lower() in ["toot:emoji", "emoji"]: Emoji.by_ap_tag(self.domain, tag, create=True) # Mark as fetched self.fetched = timezone.now() try: with transaction.atomic(): # if we don't wrap this in its own transaction, the exception # handler is guaranteed to fail self.save() except IntegrityError as e: # See if we can fetch a PK and save there if self.pk is None: try: other_row = Identity.objects.get(actor_uri=self.actor_uri) except Identity.DoesNotExist: raise ValueError( f"Could not save Identity at end of actor fetch: {e}" ) self.pk: int | None = other_row.pk with transaction.atomic(): self.save() # Fetch pinned posts in a followup task if self.featured_collection_uri: InboxMessage.create_internal( { "type": "SyncPins", "identity": self.pk, } ) return True ### OpenGraph API ### def to_opengraph_dict(self) -> dict: return { "og:title": f"{self.name} (@{self.handle})", "og:type": "profile", "og:description": self.summary, "og:profile:username": self.handle, "og:image:url": self.local_icon_url().absolute, "og:image:height": 85, "og:image:width": 85, } ### Mastodon Client API ### def to_mastodon_mention_json(self): return { "id": self.id, "username": self.username or "", "url": self.absolute_profile_uri() or "", "acct": self.handle or "", } def to_mastodon_json(self, source=False, include_counts=True): from activities.models import Emoji, Post header_image = self.local_image_url() missing = StaticAbsoluteUrl("img/missing.png").absolute metadata_value_text = ( " ".join([m["value"] for m in self.metadata]) if self.metadata else "" ) emojis = Emoji.emojis_from_content( f"{self.name} {self.summary} {metadata_value_text}", self.domain ) renderer = ContentRenderer(local=False) result = { "id": self.pk, "username": self.username or "", "acct": self.username if source else self.handle, "url": self.absolute_profile_uri() or "", "display_name": self.name or "", "note": self.summary or "", "avatar": self.local_icon_url().absolute, "avatar_static": self.local_icon_url().absolute, "header": header_image.absolute if header_image else missing, "header_static": header_image.absolute if header_image else missing, "locked": bool(self.manually_approves_followers), "fields": ( [ { "name": m["name"], "value": renderer.render_identity_data(m["value"], self), "verified_at": None, } for m in self.metadata ] if self.metadata else [] ), "emojis": [emoji.to_mastodon_json() for emoji in emojis], "bot": self.actor_type.lower() in ["service", "application"], "group": self.actor_type.lower() == "group", "discoverable": self.discoverable, "suspended": False, "limited": False, "created_at": format_ld_date( self.created.replace(hour=0, minute=0, second=0, microsecond=0) ), "last_status_at": None, # TODO: populate "statuses_count": self.posts.count() if include_counts else 0, "followers_count": self.inbound_follows.count() if include_counts else 0, "following_count": self.outbound_follows.count() if include_counts else 0, } if source: privacy_map = { Post.Visibilities.public: "public", Post.Visibilities.unlisted: "unlisted", Post.Visibilities.local_only: "unlisted", Post.Visibilities.followers: "private", Post.Visibilities.mentioned: "direct", } result["source"] = { "note": FediverseHtmlParser(self.summary).plain_text if self.summary else "", "fields": ( [ { "name": m["name"], "value": FediverseHtmlParser(m["value"]).plain_text, "verified_at": None, } for m in self.metadata ] if self.metadata else [] ), "privacy": privacy_map[ Config.load_identity(self).default_post_visibility ], "sensitive": False, "language": "unk", "follow_requests_count": 0, } return result ### Cryptography ### def signed_request( self, method: Literal["get", "post"], uri: str, body: dict | None = None, ): """ Performs a signed request on behalf of the System Actor. """ return HttpSignature.signed_request( method=method, uri=uri, body=body, private_key=self.private_key, key_id=self.public_key_id, ) def generate_keypair(self): if not self.local: raise ValueError("Cannot generate keypair for remote user") self.private_key, self.public_key = RsaKeys.generate_keypair() self.public_key_id = self.actor_uri + "#main-key" self.save() ### Config ### def config_identity(self) -> Config.IdentityOptions: return Config.load_identity(self) def lazy_config_value(self, key: str): """ Lazily load a config value for this Identity """ if key not in Config.IdentityOptions.__fields__: raise KeyError(f"Undefined IdentityOption for {key}") return lazy(lambda: getattr(self.config_identity, key)) class IdentityService: """ High-level helper methods for doing things to identities """ def __init__(self, identity: Identity): self.identity = identity def create( cls, user: User, username: str, domain: Domain, name: str, discoverable: bool = True, ) -> Identity: identity = Identity.objects.create( actor_uri=f"https://{domain.uri_domain}/@{username}@{domain.domain}/", username=username, domain=domain, name=name, local=True, discoverable=discoverable, ) identity.users.add(user) identity.generate_keypair() # Send fanouts to all admin identities for admin_identity in cls.admin_identities(): FanOut.objects.create( type=FanOut.Types.identity_created, identity=admin_identity, subject_identity=identity, ) return identity def admin_identities(cls) -> models.QuerySet[Identity]: return Identity.objects.filter(users__admin=True).distinct() def following(self) -> models.QuerySet[Identity]: return ( Identity.objects.filter( inbound_follows__source=self.identity, inbound_follows__state__in=FollowStates.group_active(), ) .not_deleted() .distinct() .order_by("username") .select_related("domain") ) def followers(self) -> models.QuerySet[Identity]: return ( Identity.objects.filter( outbound_follows__target=self.identity, outbound_follows__state=FollowStates.accepted, ) .not_deleted() .distinct() .order_by("username") .select_related("domain") ) def follow_requests(self) -> models.QuerySet[Identity]: return ( Identity.objects.filter( outbound_follows__target=self.identity, outbound_follows__state=FollowStates.pending_approval, ) .not_deleted() .distinct() .order_by("username") .select_related("domain") ) def accept_follow_request(self, source_identity): existing_follow = Follow.maybe_get(source_identity, self.identity) if existing_follow: existing_follow.transition_perform(FollowStates.accepting) def reject_follow_request(self, source_identity): existing_follow = Follow.maybe_get(source_identity, self.identity) if existing_follow: existing_follow.transition_perform(FollowStates.rejecting) def follow(self, target_identity: Identity, boosts=True) -> Follow: """ Follows a user (or does nothing if already followed). Returns the follow. """ if target_identity == self.identity: raise ValueError("You cannot follow yourself") return Follow.create_local(self.identity, target_identity, boosts=boosts) def unfollow(self, target_identity: Identity): """ Unfollows a user (or does nothing if not followed). """ if target_identity == self.identity: raise ValueError("You cannot unfollow yourself") existing_follow = Follow.maybe_get(self.identity, target_identity) if existing_follow: existing_follow.transition_perform(FollowStates.undone) InboxMessage.create_internal( { "type": "ClearTimeline", "object": target_identity.pk, "actor": self.identity.pk, } ) def block(self, target_identity: Identity) -> Block: """ Blocks a user. """ if target_identity == self.identity: raise ValueError("You cannot block yourself") self.unfollow(target_identity) self.reject_follow_request(target_identity) block = Block.create_local_block(self.identity, target_identity) InboxMessage.create_internal( { "type": "ClearTimeline", "actor": self.identity.pk, "object": target_identity.pk, "fullErase": True, } ) return block def unblock(self, target_identity: Identity): """ Unlocks a user """ if target_identity == self.identity: raise ValueError("You cannot unblock yourself") existing_block = Block.maybe_get(self.identity, target_identity, mute=False) if existing_block and existing_block.active: existing_block.transition_perform(BlockStates.undone) def mute( self, target_identity: Identity, duration: int = 0, include_notifications: bool = False, ) -> Block: """ Mutes a user. """ if target_identity == self.identity: raise ValueError("You cannot mute yourself") return Block.create_local_mute( self.identity, target_identity, duration=duration or None, include_notifications=include_notifications, ) def unmute(self, target_identity: Identity): """ Unmutes a user """ if target_identity == self.identity: raise ValueError("You cannot unmute yourself") existing_block = Block.maybe_get(self.identity, target_identity, mute=True) if existing_block and existing_block.active: existing_block.transition_perform(BlockStates.undone) def relationships(self, from_identity: Identity): """ Returns a dict of any active relationships from the given identity. """ return { "outbound_follow": Follow.maybe_get( from_identity, self.identity, require_active=True ), "inbound_follow": Follow.maybe_get( self.identity, from_identity, require_active=True ), "outbound_block": Block.maybe_get( from_identity, self.identity, mute=False, require_active=True ), "inbound_block": Block.maybe_get( self.identity, from_identity, mute=False, require_active=True ), "outbound_mute": Block.maybe_get( from_identity, self.identity, mute=True, require_active=True ), } def sync_pins(self, object_uris): if not object_uris or self.identity.domain.blocked: return with transaction.atomic(): for object_uri in object_uris: try: post = Post.by_object_uri(object_uri, fetch=True) PostInteraction.objects.get_or_create( type=PostInteraction.Types.pin, identity=self.identity, post=post, state__in=PostInteractionStates.group_active(), ) except MultipleObjectsReturned as exc: logger.exception("%s on %s", exc, object_uri) pass except Post.DoesNotExist: # ignore 404s... pass except TryAgainLater: # don't wait for it now, it'll be synced on next refresh pass for removed in PostInteraction.objects.filter( type=PostInteraction.Types.pin, identity=self.identity, state__in=PostInteractionStates.group_active(), ).exclude(post__object_uri__in=object_uris): removed.transition_perform(PostInteractionStates.undone_fanned_out) def mastodon_json_relationship(self, from_identity: Identity): """ Returns a Relationship object for the from_identity's relationship with this identity. """ relationships = self.relationships(from_identity) return { "id": self.identity.pk, "following": relationships["outbound_follow"] is not None and relationships["outbound_follow"].accepted, "followed_by": relationships["inbound_follow"] is not None and relationships["inbound_follow"].accepted, "showing_reblogs": ( relationships["outbound_follow"] and relationships["outbound_follow"].boosts or False ), "notifying": False, "blocking": relationships["outbound_block"] is not None, "blocked_by": relationships["inbound_block"] is not None, "muting": relationships["outbound_mute"] is not None, "muting_notifications": False, "requested": relationships["outbound_follow"] is not None and relationships["outbound_follow"].state == FollowStates.pending_approval, "domain_blocking": False, "endorsed": False, "note": ( relationships["outbound_follow"] and relationships["outbound_follow"].note or "" ), } def set_summary(self, summary: str): """ Safely sets a summary and turns linebreaks into HTML """ if summary: self.identity.summary = FediverseHtmlParser(linebreaks_filter(summary)).html else: self.identity.summary = None self.identity.save() def set_icon(self, file): """ Sets the user's avatar image """ self.identity.icon.save( file.name, resize_image(file, size=(400, 400)), ) def set_image(self, file): """ Sets the user's header image """ self.identity.image.save( file.name, resize_image(file, size=(1500, 500)), ) def handle_internal_add_follow(cls, payload): """ Handles an inbox message saying we need to follow a handle Message format: { "type": "AddFollow", "source": "90310938129083", "target_handle": "andrew@aeracode.org", "boosts": true, } """ # Retrieve ourselves self = cls(Identity.objects.get(pk=payload["source"])) # Get the remote end (may need a fetch) username, domain = payload["target_handle"].split("@") target_identity = Identity.by_username_and_domain(username, domain, fetch=True) if target_identity is None: raise ValueError(f"Cannot find identity to follow: {target_identity}") # Follow! self.follow(target_identity=target_identity, boosts=payload.get("boosts", True)) def handle_internal_sync_pins(cls, payload): """ Handles an inbox message saying we need to sync featured posts Message format: { "type": "SyncPins", "identity": "90310938129083", } """ # Retrieve ourselves actor = Identity.objects.get(pk=payload["identity"]) self = cls(actor) # Get the remote end (may need a fetch) if actor.featured_collection_uri: featured = actor.fetch_pinned_post_uris(actor.featured_collection_uri) self.sync_pins(featured) def accept_follow_request( request: HttpRequest, id: str | None = None, ) -> schemas.Relationship: source_identity = get_object_or_404( Identity.objects.exclude(restriction=Identity.Restriction.blocked), pk=id ) IdentityService(request.identity).accept_follow_request(source_identity) return IdentityService(source_identity).mastodon_json_relationship(request.identity)
null
19,771
from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import api_view from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from users.models.identity import Identity from users.services.identity import IdentityService class Identity(StatorModel): def get_scheme(self, url): def get_hostname(self, url): def __str__(self): def absolute_profile_uri(self): def all_absolute_profile_uris(self) -> list[str]: def local_icon_url(self) -> RelativeAbsoluteUrl: def local_image_url(self) -> RelativeAbsoluteUrl | None: def safe_summary(self): def safe_metadata(self): def ensure_uris(self): def add_alias(self, actor_uri: str): def remove_alias(self, actor_uri: str): def by_handle(cls, handle, fetch: bool = False) -> Optional["Identity"]: def by_username_and_domain( cls, username: str, domain: str | Domain, fetch: bool = False, local: bool = False, ) -> Optional["Identity"]: def by_actor_uri(cls, uri, create=False, transient=False) -> "Identity": def name_or_handle(self): def html_name_or_handle(self): def handle(self): def data_age(self) -> float: def outdated(self) -> bool: def blocked(self) -> bool: def limited(self) -> bool: def to_webfinger(self): def to_ap(self): def to_ap_tag(self): def to_update_ap(self): def to_delete_ap(self): def handle_update_ap(cls, data): def handle_delete_ap(cls, data): def mark_deleted(self): def fetch_webfinger_url(cls, domain: str): def fetch_webfinger(cls, handle: str) -> tuple[str | None, str | None]: def fetch_pinned_post_uris(cls, uri: str) -> list[str]: def fetch_actor(self) -> bool: def to_opengraph_dict(self) -> dict: def to_mastodon_mention_json(self): def to_mastodon_json(self, source=False, include_counts=True): def signed_request( self, method: Literal["get", "post"], uri: str, body: dict | None = None, ): def generate_keypair(self): def config_identity(self) -> Config.IdentityOptions: def lazy_config_value(self, key: str): class IdentityService: def __init__(self, identity: Identity): def create( cls, user: User, username: str, domain: Domain, name: str, discoverable: bool = True, ) -> Identity: def admin_identities(cls) -> models.QuerySet[Identity]: def following(self) -> models.QuerySet[Identity]: def followers(self) -> models.QuerySet[Identity]: def follow_requests(self) -> models.QuerySet[Identity]: def accept_follow_request(self, source_identity): def reject_follow_request(self, source_identity): def follow(self, target_identity: Identity, boosts=True) -> Follow: def unfollow(self, target_identity: Identity): def block(self, target_identity: Identity) -> Block: def unblock(self, target_identity: Identity): def mute( self, target_identity: Identity, duration: int = 0, include_notifications: bool = False, ) -> Block: def unmute(self, target_identity: Identity): def relationships(self, from_identity: Identity): def sync_pins(self, object_uris): def mastodon_json_relationship(self, from_identity: Identity): def set_summary(self, summary: str): def set_icon(self, file): def set_image(self, file): def handle_internal_add_follow(cls, payload): def handle_internal_sync_pins(cls, payload): def reject_follow_request( request: HttpRequest, id: str | None = None, ) -> schemas.Relationship: source_identity = get_object_or_404( Identity.objects.exclude(restriction=Identity.Restriction.blocked), pk=id ) IdentityService(request.identity).reject_follow_request(source_identity) return IdentityService(source_identity).mastodon_json_relationship(request.identity)
null
19,772
from typing import Any from django.core.files import File from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import ApiResponse, QueryOrBody, api_view from activities.models import Post, PostInteraction, PostInteractionStates from activities.services import SearchService from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from core.models import Config from users.models import Identity, IdentityStates from users.services import IdentityService from users.shortcuts import by_handle_or_404 def verify_credentials(request) -> schemas.Account: return schemas.Account.from_identity(request.identity, source=True)
null
19,773
from typing import Any from django.core.files import File from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import ApiResponse, QueryOrBody, api_view from activities.models import Post, PostInteraction, PostInteractionStates from activities.services import SearchService from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from core.models import Config from users.models import Identity, IdentityStates from users.services import IdentityService from users.shortcuts import by_handle_or_404 def update_credentials( request, display_name: QueryOrBody[str | None] = None, note: QueryOrBody[str | None] = None, discoverable: QueryOrBody[bool | None] = None, locked: QueryOrBody[bool | None] = None, source: QueryOrBody[dict[str, Any] | None] = None, fields_attributes: QueryOrBody[dict[str, dict[str, str]] | None] = None, avatar: File | None = None, header: File | None = None, ) -> schemas.Account: identity = request.identity service = IdentityService(identity) if display_name is not None: identity.name = display_name if note is not None: service.set_summary(note) if discoverable is not None: identity.discoverable = discoverable if locked is not None: identity.manually_approves_followers = locked if source: if "privacy" in source: privacy_map = { "public": Post.Visibilities.public, "unlisted": Post.Visibilities.unlisted, "private": Post.Visibilities.followers, "direct": Post.Visibilities.mentioned, } Config.set_identity( identity, "default_post_visibility", privacy_map[source["privacy"]], ) if fields_attributes: identity.metadata = [] for attribute in fields_attributes.values(): attr_name = attribute.get("name", None) attr_value = attribute.get("value", None) if attr_name: # Empty value means delete this item if not attr_value: break identity.metadata.append({"name": attr_name, "value": attr_value}) if avatar: service.set_icon(avatar) if header: service.set_image(header) identity.save() identity.transition_perform(IdentityStates.edited) return schemas.Account.from_identity(identity, source=True)
null
19,774
from typing import Any from django.core.files import File from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import ApiResponse, QueryOrBody, api_view from activities.models import Post, PostInteraction, PostInteractionStates from activities.services import SearchService from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from core.models import Config from users.models import Identity, IdentityStates from users.services import IdentityService from users.shortcuts import by_handle_or_404 def account_relationships( request, id: list[str] | str | None ) -> list[schemas.Relationship]: result = [] if isinstance(id, str): ids = [id] elif id is None: ids = [] else: ids = id for actual_id in ids: identity = get_object_or_404(Identity, pk=actual_id) result.append( IdentityService(identity).mastodon_json_relationship(request.identity) ) return result
null
19,775
from typing import Any from django.core.files import File from django.http import HttpRequest from django.shortcuts import get_object_or_404 from hatchway import ApiResponse, QueryOrBody, api_view from activities.models import Post, PostInteraction, PostInteractionStates from activities.services import SearchService from api import schemas from api.decorators import scope_required from api.pagination import MastodonPaginator, PaginatingApiResponse, PaginationResult from core.models import Config from users.models import Identity, IdentityStates from users.services import IdentityService from users.shortcuts import by_handle_or_404 The provided code snippet includes necessary dependencies for implementing the `familiar_followers` function. Write a Python function `def familiar_followers( request, id: list[str] | str | None ) -> list[schemas.FamiliarFollowers]` to solve the following problem: Returns people you follow that also follow given account IDs Here is the function: def familiar_followers( request, id: list[str] | str | None ) -> list[schemas.FamiliarFollowers]: """ Returns people you follow that also follow given account IDs """ if isinstance(id, str): ids = [id] elif id is None: ids = [] else: ids = id result = [] for actual_id in ids: target_identity = get_object_or_404(Identity, pk=actual_id) result.append( schemas.FamiliarFollowers( id=actual_id, accounts=[ schemas.Account.from_identity(identity) for identity in Identity.objects.filter( inbound_follows__source=request.identity, outbound_follows__target=target_identity, )[:20] ], ) ) return result
Returns people you follow that also follow given account IDs