hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fcc95deeeeb42de9b52e629a5151cc201fa630 | 2,161 | py | Python | mello/member/models.py | mmbln/mello | baf13571fd6b14130a586cca40ef9c1d739faa60 | [
"Apache-2.0"
] | null | null | null | mello/member/models.py | mmbln/mello | baf13571fd6b14130a586cca40ef9c1d739faa60 | [
"Apache-2.0"
] | null | null | null | mello/member/models.py | mmbln/mello | baf13571fd6b14130a586cca40ef9c1d739faa60 | [
"Apache-2.0"
] | null | null | null | # member/models.py
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager
)
class MemberManager(BaseUserManager):
def create_user(self,
login_name,
email,
full_name='',
member_img='',
password=None):
email_ = self.normalize_email(email)
user = self.model(login_name=login_name,
email=email_,
full_name = full_name,
image=member_img)
if password:
user.set_password(password)
user.save()
return (user)
def create_superuser(self,
login_name,
email,
full_name='',
member_img='',
password=None):
"""
creates a superuser
"""
user = self.create_user(login_name,
email,
full_name = full_name,
member_img=member_img,
password=password)
user.is_admin = True
user.save()
return(user)
STATUS_CHOICES=(
('en', 'Entered'),
('au', 'Authorized'),
('bl', 'Blocked'),
('pr', 'Password rocover'),
('il', 'Illegal')
)
class Member(AbstractBaseUser):
login_name = models.CharField(max_length = 32,
unique = True)
is_admin = models.BooleanField(default=False)
email = models.EmailField(unique=True)
full_name = models.CharField(max_length = 64)
status = models.CharField(max_length = 2,
choices=STATUS_CHOICES,
default='en') # entered
is_staff = models.BooleanField(default=False)
image = models.ImageField(upload_to='member')
objects = MemberManager()
USERNAME_FIELD = 'login_name'
REQUIRED_FIELDS = ['email']
def __str__(self):
return self.login_name
| 27.705128 | 54 | 0.49329 |
from django.db import models
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager
)
class MemberManager(BaseUserManager):
def create_user(self,
login_name,
email,
full_name='',
member_img='',
password=None):
email_ = self.normalize_email(email)
user = self.model(login_name=login_name,
email=email_,
full_name = full_name,
image=member_img)
if password:
user.set_password(password)
user.save()
return (user)
def create_superuser(self,
login_name,
email,
full_name='',
member_img='',
password=None):
user = self.create_user(login_name,
email,
full_name = full_name,
member_img=member_img,
password=password)
user.is_admin = True
user.save()
return(user)
STATUS_CHOICES=(
('en', 'Entered'),
('au', 'Authorized'),
('bl', 'Blocked'),
('pr', 'Password rocover'),
('il', 'Illegal')
)
class Member(AbstractBaseUser):
login_name = models.CharField(max_length = 32,
unique = True)
is_admin = models.BooleanField(default=False)
email = models.EmailField(unique=True)
full_name = models.CharField(max_length = 64)
status = models.CharField(max_length = 2,
choices=STATUS_CHOICES,
default='en')
is_staff = models.BooleanField(default=False)
image = models.ImageField(upload_to='member')
objects = MemberManager()
USERNAME_FIELD = 'login_name'
REQUIRED_FIELDS = ['email']
def __str__(self):
return self.login_name
| true | true |
f7fcca0a35e1be8d8151f161a1c5a5e661c20009 | 2,211 | py | Python | archives/tk/mixin.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 7 | 2016-07-08T10:53:13.000Z | 2021-07-20T00:20:10.000Z | archives/tk/mixin.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 1 | 2021-05-11T05:20:18.000Z | 2021-05-11T05:20:18.000Z | archives/tk/mixin.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 7 | 2016-10-31T06:31:54.000Z | 2020-08-31T20:55:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-08-10 21:24:18
# GUI编码技术
from __future__ import print_function
import sys
import os
from Tkinter import *
from lib import *
from tkSimpleDialog import *
from tkFileDialog import *
from tkMessageBox import *
from launchmodes import PortableLauncher,System
class GuiMixin:
def infobox(self,title,text,*args):
return showinfo(title,text)
def errorbox(self,text):
showerror('Error!',text)
def question(self,title,text,*args):
return askyesno(title,text)
def notdone(self):
showerror('Not implemented','Option not available')
def quit(self):
ans=self.question('Verify quit','Are you sure you want to quit?')
if ans:
Frame.quit(self)
def help(self):
self.infobox('RTFM','See figure 1...')
def selectOpenFile(self,file='',dir=''):
return askopenfilename(initialdir=dir,initialfile=file)
def selectSaveFile(self,file='',dir=''):
return asksaveasfilename(initialfile=file,initialdir=dir)
def clone(self,args=()):
new=Toplevel()
myclass=self.__class__
myclass(new,*args)
def spawn(self,pycmdline, wait=False):
if not wait:
PortableLauncher(pycmdline,pycmdline)()
else:
System(pycmdline,pycmdline)()
def browser(self,filename):
new=Toplevel()
view=ScrolledText(new,file=filename)
view.text.config(height=30,width=85)
view.text.config(font=('courier',12,'normal'))
new.title('Text Viewer')
new.iconname('brower')
def _mixin_demo():
class TestMixin(GuiMixin,Frame):
def __init__(self,parent=None):
Frame.__init__(self,parent)
self.pack()
Button(self,text='quit',command=self.quit).pack(fill=X)
Button(self,text='help',command=self.help).pack(fill=X)
Button(self,text='clone',command=self.clone).pack(fill=X)
Button(self,text='spawn',command=self.other).pack(fill=X)
def other(self):
self.spawn('mixin.py')
TestMixin().mainloop()
if __name__ == '__main__':
_mixin_demo()
| 26.638554 | 73 | 0.631841 |
from __future__ import print_function
import sys
import os
from Tkinter import *
from lib import *
from tkSimpleDialog import *
from tkFileDialog import *
from tkMessageBox import *
from launchmodes import PortableLauncher,System
class GuiMixin:
def infobox(self,title,text,*args):
return showinfo(title,text)
def errorbox(self,text):
showerror('Error!',text)
def question(self,title,text,*args):
return askyesno(title,text)
def notdone(self):
showerror('Not implemented','Option not available')
def quit(self):
ans=self.question('Verify quit','Are you sure you want to quit?')
if ans:
Frame.quit(self)
def help(self):
self.infobox('RTFM','See figure 1...')
def selectOpenFile(self,file='',dir=''):
return askopenfilename(initialdir=dir,initialfile=file)
def selectSaveFile(self,file='',dir=''):
return asksaveasfilename(initialfile=file,initialdir=dir)
def clone(self,args=()):
new=Toplevel()
myclass=self.__class__
myclass(new,*args)
def spawn(self,pycmdline, wait=False):
if not wait:
PortableLauncher(pycmdline,pycmdline)()
else:
System(pycmdline,pycmdline)()
def browser(self,filename):
new=Toplevel()
view=ScrolledText(new,file=filename)
view.text.config(height=30,width=85)
view.text.config(font=('courier',12,'normal'))
new.title('Text Viewer')
new.iconname('brower')
def _mixin_demo():
class TestMixin(GuiMixin,Frame):
def __init__(self,parent=None):
Frame.__init__(self,parent)
self.pack()
Button(self,text='quit',command=self.quit).pack(fill=X)
Button(self,text='help',command=self.help).pack(fill=X)
Button(self,text='clone',command=self.clone).pack(fill=X)
Button(self,text='spawn',command=self.other).pack(fill=X)
def other(self):
self.spawn('mixin.py')
TestMixin().mainloop()
if __name__ == '__main__':
_mixin_demo()
| true | true |
f7fcca1eb0939f053393119dec322ffe3e75b582 | 76,611 | py | Python | mlflow/pyfunc/__init__.py | sniafas/mlflow | c577c7f199d9efa076344785dabb2121edb7e6c8 | [
"Apache-2.0"
] | null | null | null | mlflow/pyfunc/__init__.py | sniafas/mlflow | c577c7f199d9efa076344785dabb2121edb7e6c8 | [
"Apache-2.0"
] | null | null | null | mlflow/pyfunc/__init__.py | sniafas/mlflow | c577c7f199d9efa076344785dabb2121edb7e6c8 | [
"Apache-2.0"
] | null | null | null | """
The ``python_function`` model flavor serves as a default model interface for MLflow Python models.
Any MLflow Python model is expected to be loadable as a ``python_function`` model.
In addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format
<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from
this format. The format is self contained in the sense that it includes all necessary information
for anyone to load it and use it. Dependencies are either stored directly with the model or
referenced via a Conda environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-inference-api:
*************
Inference API
*************
Python function models are loaded as an instance of :py:class:`PyFuncModel
<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model
metadata (MLmodel file). You can score the model by calling the :py:func:`predict()
<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray, scipy.sparse.(csc.csc_matrix | csr.csr_matrix),
List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
All PyFunc models will support `pandas.DataFrame` as input and DL PyFunc models will also support
tensor inputs in the form of Dict[str, numpy.ndarray] (named tensors) and `numpy.ndarrays`
(unnamed tensors).
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model implementation>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory. The model implementation is expected to be an object with a
``predict`` method with the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
::
tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
::
cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import tempfile
import signal
import sys
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
import threading
import collections
import subprocess
from typing import Any, Union, List, Dict, Iterator, Tuple
import mlflow
import mlflow.pyfunc.model
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import ( # pylint: disable=unused-import
PythonModel,
PythonModelContext,
get_default_conda_env,
)
from mlflow.pyfunc.model import get_default_pip_requirements
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema, TensorSpec
from mlflow.types.utils import clean_tensor_type
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version, _is_in_ipython_notebook
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import _copy_file_or_tree, write_to
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
_get_flavor_configuration_from_uri,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.uri import append_to_uri_path
from mlflow.utils.environment import (
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_PythonEnv,
)
from mlflow.utils import env_manager as _EnvManager
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.databricks_utils import is_in_databricks_runtime
from mlflow.utils.file_utils import get_or_create_tmp_dir, get_or_create_nfs_tmp_dir
from mlflow.utils.process import cache_return_value_per_process
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from scipy.sparse import csc_matrix, csr_matrix
from mlflow.utils.requirements_utils import (
_check_requirement_satisfied,
_parse_requirements,
)
from mlflow.utils import find_free_port
from mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param req: pip requirements file.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if env:
params[ENV] = env
return model.add_flavor(FLAVOR_NAME, **params)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
5. np.datetime64[x] -> datetime (any precision)
6. object -> datetime
Any other type mismatch will raise error.
"""
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
# NB: the object can contain any type and we currently cannot cast to pandas Strings
# due to how None is cast
return values
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
# NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]
# denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we
# ignore precision when matching datetime columns.
return values
if t == DataType.datetime and values.dtype == object:
# NB: Pyspark date columns get converted to object when converted to a pandas
# DataFrame. To respect the original typing, we convert the column to datetime.
try:
return values.astype(np.datetime64, errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all(pandas.isnull(x) or int(x) == x for x in xs)
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_tensor_spec(
values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec
):
"""
Enforce the input tensor shape and type matches the provided tensor spec.
"""
expected_shape = tensor_spec.shape
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
if len(expected_shape) != len(actual_shape):
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
if clean_tensor_type(actual_type) != tensor_spec.type:
raise MlflowException(
"dtype of input {0} does not match expected dtype {1}".format(
values.dtype, tensor_spec.type
)
)
return values
def _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
if input_schema.has_input_names():
input_names = input_schema.input_names()
else:
input_names = pfInput.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pfInput = pandas.DataFrame()
for i, x in enumerate(input_names):
new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])
return new_pfInput
def _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input tensor(s) conforms to the model's tensor-based signature."""
if input_schema.has_input_names():
if isinstance(pfInput, dict):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pfInput[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
" array, but a dict with value type {0} was found.".format(
type(pfInput[col_name])
)
)
new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)
elif isinstance(pfInput, pandas.DataFrame):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
new_pfInput[col_name] = _enforce_tensor_spec(
np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, but an input of"
" type {0} was found.".format(type(pfInput))
)
else:
if isinstance(pfInput, pandas.DataFrame):
new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])
elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):
new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input, but an input of type {0} was"
" found.".format(type(pfInput))
)
return new_pfInput
def _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):
"""
Enforces the provided input matches the model's input schema,
For signatures with input names, we check there are no missing inputs and reorder the inputs to
match the ordering declared in schema if necessary. Any extra columns are ignored.
For column-based signatures, we make sure the types of the input match the type specified in
the schema or if it can be safely converted to match the input schema.
For tensor-based signatures, we make sure the shape and type of the input matches the shape
and type specified in model's input schema.
"""
if not input_schema.is_tensor_spec():
if isinstance(pfInput, (list, np.ndarray, dict)):
try:
pfInput = pandas.DataFrame(pfInput)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
" {0}".format(str(e))
)
if not isinstance(pfInput, pandas.DataFrame):
raise MlflowException(
"Expected input to be DataFrame or list. Found: %s" % type(pfInput).__name__
)
if input_schema.has_input_names():
# make sure there are no missing columns
input_names = input_schema.input_names()
expected_cols = set(input_names)
actual_cols = set()
if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):
# for schemas with a single column, match input with column
pfInput = {input_names[0]: pfInput}
actual_cols = expected_cols
elif isinstance(pfInput, pandas.DataFrame):
actual_cols = set(pfInput.columns)
elif isinstance(pfInput, dict):
actual_cols = set(pfInput.keys())
missing_cols = expected_cols - actual_cols
extra_cols = actual_cols - expected_cols
# Preserve order from the original columns, since missing/extra columns are likely to
# be in same order.
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
raise MlflowException(
"Model is missing inputs {0}."
" Note that there were extra inputs: {1}".format(missing_cols, extra_cols)
)
elif not input_schema.is_tensor_spec():
# The model signature does not specify column names => we can only verify column count.
num_actual_columns = len(pfInput.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{0} inputs but the provided value only has "
"{1} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
return (
_enforce_tensor_schema(pfInput, input_schema)
if input_schema.is_tensor_spec()
else _enforce_col_schema(pfInput, input_schema)
)
class PyFuncModel:
"""
MLflow 'python function' model.
Wrapper around model implementation and metadata. This class is not meant to be constructed
directly. Instead, instances of this class are constructed and returned from
:py:func:`load_model() <mlflow.pyfunc.load_model>`.
``model_impl`` can be any Python object that implements the `Pyfunc interface
<https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is
returned by invoking the model's ``loader_module``.
``model_meta`` contains model metadata loaded from the MLmodel file.
"""
def __init__(self, model_meta: Model, model_impl: Any):
if not hasattr(model_impl, "predict"):
raise MlflowException("Model implementation is missing required predict method.")
if not model_meta:
raise MlflowException("Model is missing metadata.")
self._model_meta = model_meta
self._model_impl = model_impl
def predict(self, data: PyFuncInput) -> PyFuncOutput:
"""
Generate model predictions.
If the model contains signature, enforce the input schema first before calling the model
implementation with the sanitized input. If the pyfunc model does not include model schema,
the input is passed to the model implementation as is. See `Model Signature Enforcement
<https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details."
:param data: Model input as one of pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], or
Dict[str, numpy.ndarray]
:return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.
"""
input_schema = self.metadata.get_input_schema()
if input_schema is not None:
data = _enforce_schema(data, input_schema)
return self._model_impl.predict(data)
@property
def metadata(self):
"""Model metadata."""
if self._model_meta is None:
raise MlflowException("Model is missing metadata.")
return self._model_meta
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump({"mlflow.pyfunc.loaded_model": info}, default_flow_style=False)
def _warn_dependency_requirement_mismatches(model_path):
"""
Inspects the model's dependencies and prints a warning if the current Python environment
doesn't satisfy them.
"""
req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)
if not os.path.exists(req_file_path):
return
try:
mismatch_infos = []
for req in _parse_requirements(req_file_path, is_constraint=False):
req_line = req.req_str
mismatch_info = _check_requirement_satisfied(req_line)
if mismatch_info is not None:
mismatch_infos.append(str(mismatch_info))
if len(mismatch_infos) > 0:
mismatch_str = " - " + "\n - ".join(mismatch_infos)
warning_msg = (
"Detected one or more mismatches between the model's dependencies and the current "
f"Python environment:\n{mismatch_str}\n"
"To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` "
"to fetch the model's environment and install dependencies using the resulting "
"environment file."
)
_logger.warning(warning_msg)
except Exception as e:
_logger.warning(
f"Encountered an unexpected error ({repr(e)}) while detecting model dependency "
"mismatches. Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def load_model(
model_uri: str, suppress_warnings: bool = False, dst_path: str = None
) -> PyFuncModel:
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
"""
local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
if not suppress_warnings:
_warn_dependency_requirement_mismatches(local_path)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
conf = model_meta.flavors.get(FLAVOR_NAME)
if conf is None:
raise MlflowException(
'Model does not have the "{flavor_name}" flavor'.format(flavor_name=FLAVOR_NAME),
RESOURCE_DOES_NOT_EXIST,
)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
_add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
return PyFuncModel(model_meta=model_meta, model_impl=model_impl)
def _download_model_conda_env(model_uri):
conda_yml_file_name = _get_flavor_configuration_from_uri(model_uri, FLAVOR_NAME)[ENV]
return _download_artifact_from_uri(append_to_uri_path(model_uri, conda_yml_file_name))
def _get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
if format == "pip":
req_file_uri = append_to_uri_path(model_uri, _REQUIREMENTS_FILE_NAME)
try:
return _download_artifact_from_uri(req_file_uri)
except Exception as e:
# fallback to download conda.yaml file and parse the "pip" section from it.
_logger.info(
f"Downloading model '{_REQUIREMENTS_FILE_NAME}' file failed, error is {repr(e)}. "
"Falling back to fetching pip requirements from the model's 'conda.yaml' file. "
"Other conda dependencies will be ignored."
)
conda_yml_path = _download_model_conda_env(model_uri)
with open(conda_yml_path, "r") as yf:
conda_yml = yaml.safe_load(yf)
conda_deps = conda_yml.get("dependencies", [])
for index, dep in enumerate(conda_deps):
if isinstance(dep, dict) and "pip" in dep:
pip_deps_index = index
break
else:
raise MlflowException(
"No pip section found in conda.yaml file in the model directory.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
pip_deps = conda_deps.pop(pip_deps_index)["pip"]
tmp_dir = tempfile.mkdtemp()
pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)
with open(pip_file_path, "w") as f:
f.write("\n".join(pip_deps) + "\n")
if len(conda_deps) > 0:
_logger.warning(
"The following conda dependencies have been excluded from the environment file:"
f" {', '.join(conda_deps)}."
)
return pip_file_path
elif format == "conda":
conda_yml_path = _download_model_conda_env(model_uri)
return conda_yml_path
else:
raise MlflowException(
f"Illegal format argument '{format}'.", error_code=INVALID_PARAMETER_VALUE
)
def get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
"""
:param model_uri: The uri of the model to get dependencies from.
:param format: The format of the returned dependency file. If the ``"pip"`` format is
specified, the path to a pip ``requirements.txt`` file is returned.
If the ``"conda"`` format is specified, the path to a ``"conda.yaml"``
file is returned . If the ``"pip"`` format is specified but the model
was not saved with a ``requirements.txt`` file, the ``pip`` section
of the model's ``conda.yaml`` file is extracted instead, and any
additional conda dependencies are ignored. Default value is ``"pip"``.
:return: The local filesystem path to either a pip ``requirements.txt`` file
(if ``format="pip"``) or a ``conda.yaml`` file (if ``format="conda"``)
specifying the model's dependencies.
"""
dep_file = _get_model_dependencies(model_uri, format)
if format == "pip":
prefix = "%" if _is_in_ipython_notebook() else ""
_logger.info(
"To install the dependencies that were used to train the model, run the "
f"following command: '{prefix}pip install -r {dep_file}'."
)
return dep_file
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _create_model_downloading_tmp_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
root_model_cache_dir = os.path.join(root_tmp_dir, "models")
os.makedirs(root_model_cache_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_model_cache_dir)
# mkdtemp creates a directory with permission 0o700
# change it to be 0o777 to ensure it can be seen in spark UDF
os.chmod(tmp_model_dir, 0o777)
return tmp_model_dir
@cache_return_value_per_process
def _get_or_create_env_root_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
env_root_dir = os.path.join(root_tmp_dir, "envs")
os.makedirs(env_root_dir, exist_ok=True)
return env_root_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the python environment
for model inference. Note that environment is only restored in the context
of the PySpark UDF; the software environment outside of the UDF is
unaffected. Default value is ``local``, and the following values are
supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model.
- ``virtualenv``: Use virtualenv to restore the python environment that
was used to train the model.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
_EnvManager.validate(env_manager)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
env_root_dir = _get_or_create_env_root_dir(should_use_nfs)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any(isinstance(elem_type, x) for x in supported_types):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_create_model_downloading_tmp_dir(should_use_nfs)
)
if env_manager == _EnvManager.LOCAL:
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
# Note: In databricks runtime, because databricks notebook cell output cannot capture
# child process output, so that set capture_output to be True so that when `conda prepare
# env` command failed, the exception message will include command stdout/stderr output.
# Otherwise user have to check cluster driver log to find command stdout/stderr output.
# In non-databricks runtime, set capture_output to be False, because the benefit of
# "capture_output=False" is the output will be printed immediately, otherwise you have
# to wait conda command fail and suddenly get all output printed (included in error
# message).
if env_manager != _EnvManager.LOCAL:
_get_flavor_backend(
local_model_path,
env_manager=env_manager,
install_mlflow=False,
env_root_dir=env_root_dir,
).prepare_env(model_uri=local_model_path, capture_output=is_in_databricks_runtime())
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
if env_manager != _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Create individual conda_env_root_dir for each spark UDF task process.
env_root_dir_on_executor = _get_or_create_env_root_dir(should_use_nfs)
else:
local_model_path_on_executor = local_model_path
env_root_dir_on_executor = env_root_dir
pyfunc_backend = _get_flavor_backend(
local_model_path_on_executor,
workers=1,
install_mlflow=False,
env_manager=env_manager,
env_root_dir=env_root_dir_on_executor,
)
if should_use_spark_to_broadcast_file:
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(
model_uri=local_model_path_on_executor, capture_output=True
)
# launch scoring server
server_port = find_free_port()
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
timeout=60,
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager == _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def save_model(
path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
**kwargs,
):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
"""
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError("Argument code_path should be a list, not {}".format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any(item is not None for item in first_argument_set.values())
second_argument_set_specified = any(item is not None for item in second_argument_set.values())
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set,
)
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
_validate_and_prepare_target_save_path(path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_path,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_path,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def log_model(
artifact_path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code_dir_subpath,
data=data,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
| 45.251624 | 114 | 0.649933 |
import importlib
import tempfile
import signal
import sys
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
import threading
import collections
import subprocess
from typing import Any, Union, List, Dict, Iterator, Tuple
import mlflow
import mlflow.pyfunc.model
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import (
PythonModel,
PythonModelContext,
get_default_conda_env,
)
from mlflow.pyfunc.model import get_default_pip_requirements
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema, TensorSpec
from mlflow.types.utils import clean_tensor_type
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version, _is_in_ipython_notebook
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import _copy_file_or_tree, write_to
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
_get_flavor_configuration_from_uri,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.uri import append_to_uri_path
from mlflow.utils.environment import (
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_PythonEnv,
)
from mlflow.utils import env_manager as _EnvManager
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.databricks_utils import is_in_databricks_runtime
from mlflow.utils.file_utils import get_or_create_tmp_dir, get_or_create_nfs_tmp_dir
from mlflow.utils.process import cache_return_value_per_process
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from scipy.sparse import csc_matrix, csr_matrix
from mlflow.utils.requirements_utils import (
_check_requirement_satisfied,
_parse_requirements,
)
from mlflow.utils import find_free_port
from mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if env:
params[ENV] = env
return model.add_flavor(FLAVOR_NAME, **params)
def _load_model_env(path):
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
return values
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
return values
if t == DataType.datetime and values.dtype == object:
try:
return values.astype(np.datetime64, errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
def all_ints(xs):
return all(pandas.isnull(x) or int(x) == x for x in xs)
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_tensor_spec(
values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec
):
expected_shape = tensor_spec.shape
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
if len(expected_shape) != len(actual_shape):
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
if clean_tensor_type(actual_type) != tensor_spec.type:
raise MlflowException(
"dtype of input {0} does not match expected dtype {1}".format(
values.dtype, tensor_spec.type
)
)
return values
def _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):
if input_schema.has_input_names():
input_names = input_schema.input_names()
else:
input_names = pfInput.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pfInput = pandas.DataFrame()
for i, x in enumerate(input_names):
new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])
return new_pfInput
def _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):
if input_schema.has_input_names():
if isinstance(pfInput, dict):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pfInput[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
" array, but a dict with value type {0} was found.".format(
type(pfInput[col_name])
)
)
new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)
elif isinstance(pfInput, pandas.DataFrame):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
new_pfInput[col_name] = _enforce_tensor_spec(
np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, but an input of"
" type {0} was found.".format(type(pfInput))
)
else:
if isinstance(pfInput, pandas.DataFrame):
new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])
elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):
new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input, but an input of type {0} was"
" found.".format(type(pfInput))
)
return new_pfInput
def _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):
if not input_schema.is_tensor_spec():
if isinstance(pfInput, (list, np.ndarray, dict)):
try:
pfInput = pandas.DataFrame(pfInput)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
" {0}".format(str(e))
)
if not isinstance(pfInput, pandas.DataFrame):
raise MlflowException(
"Expected input to be DataFrame or list. Found: %s" % type(pfInput).__name__
)
if input_schema.has_input_names():
input_names = input_schema.input_names()
expected_cols = set(input_names)
actual_cols = set()
if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):
pfInput = {input_names[0]: pfInput}
actual_cols = expected_cols
elif isinstance(pfInput, pandas.DataFrame):
actual_cols = set(pfInput.columns)
elif isinstance(pfInput, dict):
actual_cols = set(pfInput.keys())
missing_cols = expected_cols - actual_cols
extra_cols = actual_cols - expected_cols
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
raise MlflowException(
"Model is missing inputs {0}."
" Note that there were extra inputs: {1}".format(missing_cols, extra_cols)
)
elif not input_schema.is_tensor_spec():
num_actual_columns = len(pfInput.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{0} inputs but the provided value only has "
"{1} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
return (
_enforce_tensor_schema(pfInput, input_schema)
if input_schema.is_tensor_spec()
else _enforce_col_schema(pfInput, input_schema)
)
class PyFuncModel:
def __init__(self, model_meta: Model, model_impl: Any):
if not hasattr(model_impl, "predict"):
raise MlflowException("Model implementation is missing required predict method.")
if not model_meta:
raise MlflowException("Model is missing metadata.")
self._model_meta = model_meta
self._model_impl = model_impl
def predict(self, data: PyFuncInput) -> PyFuncOutput:
input_schema = self.metadata.get_input_schema()
if input_schema is not None:
data = _enforce_schema(data, input_schema)
return self._model_impl.predict(data)
@property
def metadata(self):
if self._model_meta is None:
raise MlflowException("Model is missing metadata.")
return self._model_meta
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump({"mlflow.pyfunc.loaded_model": info}, default_flow_style=False)
def _warn_dependency_requirement_mismatches(model_path):
req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)
if not os.path.exists(req_file_path):
return
try:
mismatch_infos = []
for req in _parse_requirements(req_file_path, is_constraint=False):
req_line = req.req_str
mismatch_info = _check_requirement_satisfied(req_line)
if mismatch_info is not None:
mismatch_infos.append(str(mismatch_info))
if len(mismatch_infos) > 0:
mismatch_str = " - " + "\n - ".join(mismatch_infos)
warning_msg = (
"Detected one or more mismatches between the model's dependencies and the current "
f"Python environment:\n{mismatch_str}\n"
"To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` "
"to fetch the model's environment and install dependencies using the resulting "
"environment file."
)
_logger.warning(warning_msg)
except Exception as e:
_logger.warning(
f"Encountered an unexpected error ({repr(e)}) while detecting model dependency "
"mismatches. Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def load_model(
model_uri: str, suppress_warnings: bool = False, dst_path: str = None
) -> PyFuncModel:
local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
if not suppress_warnings:
_warn_dependency_requirement_mismatches(local_path)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
conf = model_meta.flavors.get(FLAVOR_NAME)
if conf is None:
raise MlflowException(
'Model does not have the "{flavor_name}" flavor'.format(flavor_name=FLAVOR_NAME),
RESOURCE_DOES_NOT_EXIST,
)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
_add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
return PyFuncModel(model_meta=model_meta, model_impl=model_impl)
def _download_model_conda_env(model_uri):
conda_yml_file_name = _get_flavor_configuration_from_uri(model_uri, FLAVOR_NAME)[ENV]
return _download_artifact_from_uri(append_to_uri_path(model_uri, conda_yml_file_name))
def _get_model_dependencies(model_uri, format="pip"):
if format == "pip":
req_file_uri = append_to_uri_path(model_uri, _REQUIREMENTS_FILE_NAME)
try:
return _download_artifact_from_uri(req_file_uri)
except Exception as e:
_logger.info(
f"Downloading model '{_REQUIREMENTS_FILE_NAME}' file failed, error is {repr(e)}. "
"Falling back to fetching pip requirements from the model's 'conda.yaml' file. "
"Other conda dependencies will be ignored."
)
conda_yml_path = _download_model_conda_env(model_uri)
with open(conda_yml_path, "r") as yf:
conda_yml = yaml.safe_load(yf)
conda_deps = conda_yml.get("dependencies", [])
for index, dep in enumerate(conda_deps):
if isinstance(dep, dict) and "pip" in dep:
pip_deps_index = index
break
else:
raise MlflowException(
"No pip section found in conda.yaml file in the model directory.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
pip_deps = conda_deps.pop(pip_deps_index)["pip"]
tmp_dir = tempfile.mkdtemp()
pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)
with open(pip_file_path, "w") as f:
f.write("\n".join(pip_deps) + "\n")
if len(conda_deps) > 0:
_logger.warning(
"The following conda dependencies have been excluded from the environment file:"
f" {', '.join(conda_deps)}."
)
return pip_file_path
elif format == "conda":
conda_yml_path = _download_model_conda_env(model_uri)
return conda_yml_path
else:
raise MlflowException(
f"Illegal format argument '{format}'.", error_code=INVALID_PARAMETER_VALUE
)
def get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
dep_file = _get_model_dependencies(model_uri, format)
if format == "pip":
prefix = "%" if _is_in_ipython_notebook() else ""
_logger.info(
"To install the dependencies that were used to train the model, run the "
f"following command: '{prefix}pip install -r {dep_file}'."
)
return dep_file
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _create_model_downloading_tmp_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
root_model_cache_dir = os.path.join(root_tmp_dir, "models")
os.makedirs(root_model_cache_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_model_cache_dir)
# mkdtemp creates a directory with permission 0o700
# change it to be 0o777 to ensure it can be seen in spark UDF
os.chmod(tmp_model_dir, 0o777)
return tmp_model_dir
@cache_return_value_per_process
def _get_or_create_env_root_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
env_root_dir = os.path.join(root_tmp_dir, "envs")
os.makedirs(env_root_dir, exist_ok=True)
return env_root_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
_EnvManager.validate(env_manager)
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
env_root_dir = _get_or_create_env_root_dir(should_use_nfs)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any(isinstance(elem_type, x) for x in supported_types):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_create_model_downloading_tmp_dir(should_use_nfs)
)
if env_manager == _EnvManager.LOCAL:
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
# Note: In databricks runtime, because databricks notebook cell output cannot capture
# child process output, so that set capture_output to be True so that when `conda prepare
# env` command failed, the exception message will include command stdout/stderr output.
# Otherwise user have to check cluster driver log to find command stdout/stderr output.
# In non-databricks runtime, set capture_output to be False, because the benefit of
# "capture_output=False" is the output will be printed immediately, otherwise you have
# to wait conda command fail and suddenly get all output printed (included in error
# message).
if env_manager != _EnvManager.LOCAL:
_get_flavor_backend(
local_model_path,
env_manager=env_manager,
install_mlflow=False,
env_root_dir=env_root_dir,
).prepare_env(model_uri=local_model_path, capture_output=is_in_databricks_runtime())
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
if env_manager != _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Create individual conda_env_root_dir for each spark UDF task process.
env_root_dir_on_executor = _get_or_create_env_root_dir(should_use_nfs)
else:
local_model_path_on_executor = local_model_path
env_root_dir_on_executor = env_root_dir
pyfunc_backend = _get_flavor_backend(
local_model_path_on_executor,
workers=1,
install_mlflow=False,
env_manager=env_manager,
env_root_dir=env_root_dir_on_executor,
)
if should_use_spark_to_broadcast_file:
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(
model_uri=local_model_path_on_executor, capture_output=True
)
# launch scoring server
server_port = find_free_port()
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
timeout=60,
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager == _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def save_model(
path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
**kwargs,
):
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError("Argument code_path should be a list, not {}".format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any(item is not None for item in first_argument_set.values())
second_argument_set_specified = any(item is not None for item in second_argument_set.values())
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set,
)
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
_validate_and_prepare_target_save_path(path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_path,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_path,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def log_model(
artifact_path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
):
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code_dir_subpath,
data=data,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
| true | true |
f7fccaa7e6f13b7526e7dafd7be37ba892507bd8 | 19,447 | py | Python | src/genie/libs/parser/iosxr/tests/test_show_static_routing.py | jeremyschulman/genieparser | a8c0e48a75f22b633834d0b25ff95b4a0a7f89f9 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxr/tests/test_show_static_routing.py | jeremyschulman/genieparser | a8c0e48a75f22b633834d0b25ff95b4a0a7f89f9 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxr/tests/test_show_static_routing.py | jeremyschulman/genieparser | a8c0e48a75f22b633834d0b25ff95b4a0a7f89f9 | [
"Apache-2.0"
] | 1 | 2021-07-07T18:07:56.000Z | 2021-07-07T18:07:56.000Z | import unittest
from unittest.mock import Mock
# ATS
from ats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
from genie.libs.parser.iosxr.show_static_routing import ShowStaticTopologyDetail
# ============================================
# unit test for 'show ip static route'
# =============================================
class test_show_static_topology_detail(unittest.TestCase):
'''
unit test for show static topology detail
'''
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_output_1 = {'execute.return_value': '''
RP/0/0/CPU0:R2_xrv#show static vrf all ipv4 topology detail
Thu Dec 7 22:09:55.169 UTC
VRF: default Table Id: 0xe0000000 AFI: IPv4 SAFI: Unicast
Last path event occured at Dec 7 21:52:00.853
Prefix/Len Interface Nexthop Object Explicit-path Metrics
10.4.1.1/32 GigabitEthernet0_0_0_3 None None None [0/4096/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.853
Path version: 1, Path status: 0x21
Path has best tag: 0
GigabitEthernet0_0_0_0 None None None [0/4096/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.733
Path version: 1, Path status: 0x21
Path has best tag: 0
10.36.3.3/32 GigabitEthernet0_0_0_2 10.229.3.3 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.843
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
None 10.229.3.3 None None [0/0/3/0/1]
Path is configured at Dec 7 21:47:43.624
Path version: 0, Path status: 0x0
GigabitEthernet0_0_0_1 10.2.3.3 1 None [7/0/17/0/1]
Path is configured at Dec 7 21:47:43.624
Path version: 0, Path status: 0x80
Path contains both next-hop and outbound interface.
'''}
golden_parsed_output_1 = {
'vrf': {
'default': {
'address_family': {
'ipv4': {
'safi': 'unicast',
'table_id': '0xe0000000',
'routes': {
'10.4.1.1/32': {
'route': '10.4.1.1/32',
'next_hop': {
'outgoing_interface': {
'GigabitEthernet0/0/0/3': {
'outgoing_interface': 'GigabitEthernet0/0/0/3',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.853',
'path_version': 1,
'path_status': '0x21',
'tag': 0,
},
'GigabitEthernet0/0/0/0': {
'outgoing_interface': 'GigabitEthernet0/0/0/0',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.733',
'path_version': 1,
'path_status': '0x21',
'tag': 0,
},
},
},
},
'10.36.3.3/32': {
'route': '10.36.3.3/32',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': '10.229.3.3',
'outgoing_interface': 'GigabitEthernet0/0/0/2',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.843',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
2: {
'index': 2,
'next_hop': '10.229.3.3',
'metrics': 1,
'preference': 3,
'active': False,
'path_event': 'Path is configured at Dec 7 21:47:43.624',
'path_version': 0,
'path_status': '0x0',
},
3: {
'index': 3,
'next_hop': '10.2.3.3',
'outgoing_interface': 'GigabitEthernet0/0/0/1',
'metrics': 1,
'preference': 17,
'track': 1,
'active': False,
'path_event': 'Path is configured at Dec 7 21:47:43.624',
'path_version': 0,
'path_status': '0x80',
},
},
},
},
},
},
},
},
},
}
golden_output_vrf_af = {'execute.return_value': '''
RP/0/0/CPU0:R2_xrv#show static vrf all ipv6 topology detail
Thu Dec 7 22:10:18.618 UTC
VRF: default Table Id: 0xe0800000 AFI: IPv6 SAFI: Unicast
Last path event occured at Dec 7 21:52:00.843
Prefix/Len Interface Nexthop Object Explicit-path Metrics
2001:1:1:1::1/128 GigabitEthernet0_0_0_3 2001:10:1:2::1 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.843
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
GigabitEthernet0_0_0_0 2001:20:1:2::1 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.733
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
2001:3:3:3::3/128 GigabitEthernet0_0_0_2 2001:20:2:3::3 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.763
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
GigabitEthernet0_0_0_1 2001:10:2:3::3 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.753
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
None 2001:20:2:3::3 None None [0/0/3/0/1]
Path is configured at Dec 7 21:47:43.624
Path version: 0, Path status: 0x0
VRF: VRF1 Table Id: 0xe0800010 AFI: IPv6 SAFI: Unicast
Last path event occured at Dec 7 21:51:47.424
Prefix/Len Interface Nexthop Object Explicit-path Metrics
2001:1:1:1::1/128 Null0 None None None [0/4096/99/0/1234]
Path is installed into RIB at Dec 7 21:51:47.424
Path version: 1, Path status: 0x21
Path has best tag: 0
2001:2:2:2::2/128 Null0 None None None [0/4096/101/0/3456]
Path is installed into RIB at Dec 7 21:51:47.424
Path version: 1, Path status: 0x21
Path has best tag: 0
'''}
golden_parsed_output_vrf_af = {
'vrf': {
'default': {
'address_family': {
'ipv6': {
'safi': 'unicast',
'table_id': '0xe0800000',
'routes': {
'2001:1:1:1::1/128': {
'route': '2001:1:1:1::1/128',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': '2001:10:1:2::1',
'outgoing_interface': 'GigabitEthernet0/0/0/3',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.843',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
2: {
'index': 2,
'next_hop': '2001:20:1:2::1',
'outgoing_interface': 'GigabitEthernet0/0/0/0',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.733',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
},
},
},
'2001:3:3:3::3/128': {
'route': '2001:3:3:3::3/128',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': '2001:20:2:3::3',
'outgoing_interface': 'GigabitEthernet0/0/0/2',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.763',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
2: {
'index': 2,
'next_hop': '2001:10:2:3::3',
'outgoing_interface': 'GigabitEthernet0/0/0/1',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.753',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
3: {
'index': 3,
'next_hop': '2001:20:2:3::3',
'metrics': 1,
'preference': 3,
'active': False,
'path_event': 'Path is configured at Dec 7 21:47:43.624',
'path_version': 0,
'path_status': '0x0',
},
},
},
},
},
},
},
},
'VRF1': {
'address_family': {
'ipv6': {
'safi': 'unicast',
'table_id': '0xe0800010',
'routes': {
'2001:1:1:1::1/128': {
'route': '2001:1:1:1::1/128',
'next_hop': {
'outgoing_interface': {
'Null0': {
'outgoing_interface': 'Null0',
'metrics': 1234,
'preference': 99,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:51:47.424',
'path_version': 1,
'path_status': '0x21',
'tag': 0,
},
},
},
},
'2001:2:2:2::2/128': {
'route': '2001:2:2:2::2/128',
'next_hop': {
'outgoing_interface': {
'Null0': {
'outgoing_interface': 'Null0',
'metrics': 3456,
'preference': 101,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:51:47.424',
'path_version': 1,
'path_status': '0x21',
'tag': 0,
},
},
},
},
},
},
},
},
},
}
golden_parsed_output2 = {
'vrf': {
'default': {
'address_family': {
'ipv4': {
'safi': 'unicast',
'table_id': '0xe0000000',
'routes': {
'172.16.0.89/32': {
'route': '172.16.0.89/32',
'next_hop': {
'outgoing_interface': {
'TenGigE0/0/1/2': {
'outgoing_interface': 'TenGigE0/0/1/2',
'metrics': 1,
'preference': 1,
'local_label': 'No label',
'path_event': 'Path is configured at Sep 11 08:29:25.605',
'path_version': 0,
'path_status': '0x0',
},
},
},
},
},
},
},
},
},
}
golden_output2 = {'execute.return_value': '''
show static topology detail
Wed Oct 9 14:34:58.699 EDT
VRF: default Table Id: 0xe0000000 AFI: IPv4 SAFI: Unicast
Last path event occured at Sep 11 08:29:25.605
Prefix/Len Interface Nexthop Object Explicit-path Metrics Local-Label
172.16.0.89/32 TenGigE0_0_1_2 None None None [0/4096/1/0/1] No label Path is configured at Sep 11 08:29:25.605
Path version: 0, Path status: 0x0
'''}
def test_empty_1(self):
self.device = Mock(**self.empty_output)
obj = ShowStaticTopologyDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_show_ip_static_route_1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_1)
obj = ShowStaticTopologyDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_1)
def test_show_ip_static_route_2(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_vrf_af)
obj = ShowStaticTopologyDetail(device=self.device)
parsed_output = obj.parse(vrf='all',af='ipv6')
self.assertEqual(parsed_output,self.golden_parsed_output_vrf_af)
def test_show_ip_static_route_3(self):
self.maxDiff = None
self.device = Mock(**self.golden_output2)
obj = ShowStaticTopologyDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output2)
if __name__ == '__main__':
unittest.main() | 50.643229 | 152 | 0.329871 | import unittest
from unittest.mock import Mock
from ats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
from genie.libs.parser.iosxr.show_static_routing import ShowStaticTopologyDetail
class test_show_static_topology_detail(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_output_1 = {'execute.return_value': '''
RP/0/0/CPU0:R2_xrv#show static vrf all ipv4 topology detail
Thu Dec 7 22:09:55.169 UTC
VRF: default Table Id: 0xe0000000 AFI: IPv4 SAFI: Unicast
Last path event occured at Dec 7 21:52:00.853
Prefix/Len Interface Nexthop Object Explicit-path Metrics
10.4.1.1/32 GigabitEthernet0_0_0_3 None None None [0/4096/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.853
Path version: 1, Path status: 0x21
Path has best tag: 0
GigabitEthernet0_0_0_0 None None None [0/4096/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.733
Path version: 1, Path status: 0x21
Path has best tag: 0
10.36.3.3/32 GigabitEthernet0_0_0_2 10.229.3.3 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.843
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
None 10.229.3.3 None None [0/0/3/0/1]
Path is configured at Dec 7 21:47:43.624
Path version: 0, Path status: 0x0
GigabitEthernet0_0_0_1 10.2.3.3 1 None [7/0/17/0/1]
Path is configured at Dec 7 21:47:43.624
Path version: 0, Path status: 0x80
Path contains both next-hop and outbound interface.
'''}
golden_parsed_output_1 = {
'vrf': {
'default': {
'address_family': {
'ipv4': {
'safi': 'unicast',
'table_id': '0xe0000000',
'routes': {
'10.4.1.1/32': {
'route': '10.4.1.1/32',
'next_hop': {
'outgoing_interface': {
'GigabitEthernet0/0/0/3': {
'outgoing_interface': 'GigabitEthernet0/0/0/3',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.853',
'path_version': 1,
'path_status': '0x21',
'tag': 0,
},
'GigabitEthernet0/0/0/0': {
'outgoing_interface': 'GigabitEthernet0/0/0/0',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.733',
'path_version': 1,
'path_status': '0x21',
'tag': 0,
},
},
},
},
'10.36.3.3/32': {
'route': '10.36.3.3/32',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': '10.229.3.3',
'outgoing_interface': 'GigabitEthernet0/0/0/2',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.843',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
2: {
'index': 2,
'next_hop': '10.229.3.3',
'metrics': 1,
'preference': 3,
'active': False,
'path_event': 'Path is configured at Dec 7 21:47:43.624',
'path_version': 0,
'path_status': '0x0',
},
3: {
'index': 3,
'next_hop': '10.2.3.3',
'outgoing_interface': 'GigabitEthernet0/0/0/1',
'metrics': 1,
'preference': 17,
'track': 1,
'active': False,
'path_event': 'Path is configured at Dec 7 21:47:43.624',
'path_version': 0,
'path_status': '0x80',
},
},
},
},
},
},
},
},
},
}
golden_output_vrf_af = {'execute.return_value': '''
RP/0/0/CPU0:R2_xrv#show static vrf all ipv6 topology detail
Thu Dec 7 22:10:18.618 UTC
VRF: default Table Id: 0xe0800000 AFI: IPv6 SAFI: Unicast
Last path event occured at Dec 7 21:52:00.843
Prefix/Len Interface Nexthop Object Explicit-path Metrics
2001:1:1:1::1/128 GigabitEthernet0_0_0_3 2001:10:1:2::1 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.843
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
GigabitEthernet0_0_0_0 2001:20:1:2::1 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.733
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
2001:3:3:3::3/128 GigabitEthernet0_0_0_2 2001:20:2:3::3 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.763
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
GigabitEthernet0_0_0_1 2001:10:2:3::3 None None [0/0/1/0/1]
Path is installed into RIB at Dec 7 21:52:00.753
Path version: 1, Path status: 0xa1
Path has best tag: 0
Path contains both next-hop and outbound interface.
None 2001:20:2:3::3 None None [0/0/3/0/1]
Path is configured at Dec 7 21:47:43.624
Path version: 0, Path status: 0x0
VRF: VRF1 Table Id: 0xe0800010 AFI: IPv6 SAFI: Unicast
Last path event occured at Dec 7 21:51:47.424
Prefix/Len Interface Nexthop Object Explicit-path Metrics
2001:1:1:1::1/128 Null0 None None None [0/4096/99/0/1234]
Path is installed into RIB at Dec 7 21:51:47.424
Path version: 1, Path status: 0x21
Path has best tag: 0
2001:2:2:2::2/128 Null0 None None None [0/4096/101/0/3456]
Path is installed into RIB at Dec 7 21:51:47.424
Path version: 1, Path status: 0x21
Path has best tag: 0
'''}
golden_parsed_output_vrf_af = {
'vrf': {
'default': {
'address_family': {
'ipv6': {
'safi': 'unicast',
'table_id': '0xe0800000',
'routes': {
'2001:1:1:1::1/128': {
'route': '2001:1:1:1::1/128',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': '2001:10:1:2::1',
'outgoing_interface': 'GigabitEthernet0/0/0/3',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.843',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
2: {
'index': 2,
'next_hop': '2001:20:1:2::1',
'outgoing_interface': 'GigabitEthernet0/0/0/0',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.733',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
},
},
},
'2001:3:3:3::3/128': {
'route': '2001:3:3:3::3/128',
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': '2001:20:2:3::3',
'outgoing_interface': 'GigabitEthernet0/0/0/2',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.763',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
2: {
'index': 2,
'next_hop': '2001:10:2:3::3',
'outgoing_interface': 'GigabitEthernet0/0/0/1',
'metrics': 1,
'preference': 1,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:52:00.753',
'path_version': 1,
'path_status': '0xa1',
'tag': 0,
},
3: {
'index': 3,
'next_hop': '2001:20:2:3::3',
'metrics': 1,
'preference': 3,
'active': False,
'path_event': 'Path is configured at Dec 7 21:47:43.624',
'path_version': 0,
'path_status': '0x0',
},
},
},
},
},
},
},
},
'VRF1': {
'address_family': {
'ipv6': {
'safi': 'unicast',
'table_id': '0xe0800010',
'routes': {
'2001:1:1:1::1/128': {
'route': '2001:1:1:1::1/128',
'next_hop': {
'outgoing_interface': {
'Null0': {
'outgoing_interface': 'Null0',
'metrics': 1234,
'preference': 99,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:51:47.424',
'path_version': 1,
'path_status': '0x21',
'tag': 0,
},
},
},
},
'2001:2:2:2::2/128': {
'route': '2001:2:2:2::2/128',
'next_hop': {
'outgoing_interface': {
'Null0': {
'outgoing_interface': 'Null0',
'metrics': 3456,
'preference': 101,
'active': True,
'path_event': 'Path is installed into RIB at Dec 7 21:51:47.424',
'path_version': 1,
'path_status': '0x21',
'tag': 0,
},
},
},
},
},
},
},
},
},
}
golden_parsed_output2 = {
'vrf': {
'default': {
'address_family': {
'ipv4': {
'safi': 'unicast',
'table_id': '0xe0000000',
'routes': {
'172.16.0.89/32': {
'route': '172.16.0.89/32',
'next_hop': {
'outgoing_interface': {
'TenGigE0/0/1/2': {
'outgoing_interface': 'TenGigE0/0/1/2',
'metrics': 1,
'preference': 1,
'local_label': 'No label',
'path_event': 'Path is configured at Sep 11 08:29:25.605',
'path_version': 0,
'path_status': '0x0',
},
},
},
},
},
},
},
},
},
}
golden_output2 = {'execute.return_value': '''
show static topology detail
Wed Oct 9 14:34:58.699 EDT
VRF: default Table Id: 0xe0000000 AFI: IPv4 SAFI: Unicast
Last path event occured at Sep 11 08:29:25.605
Prefix/Len Interface Nexthop Object Explicit-path Metrics Local-Label
172.16.0.89/32 TenGigE0_0_1_2 None None None [0/4096/1/0/1] No label Path is configured at Sep 11 08:29:25.605
Path version: 0, Path status: 0x0
'''}
def test_empty_1(self):
self.device = Mock(**self.empty_output)
obj = ShowStaticTopologyDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_show_ip_static_route_1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_1)
obj = ShowStaticTopologyDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_1)
def test_show_ip_static_route_2(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_vrf_af)
obj = ShowStaticTopologyDetail(device=self.device)
parsed_output = obj.parse(vrf='all',af='ipv6')
self.assertEqual(parsed_output,self.golden_parsed_output_vrf_af)
def test_show_ip_static_route_3(self):
self.maxDiff = None
self.device = Mock(**self.golden_output2)
obj = ShowStaticTopologyDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output2)
if __name__ == '__main__':
unittest.main() | true | true |
f7fcce2ab6dd01c89a0f5e80b3b1129040edbfaf | 5,626 | py | Python | platformio/app.py | seryoni/platformio | 35a602cfefde288ffe72f6d21436ac6785ffcab4 | [
"Apache-2.0"
] | null | null | null | platformio/app.py | seryoni/platformio | 35a602cfefde288ffe72f6d21436ac6785ffcab4 | [
"Apache-2.0"
] | null | null | null | platformio/app.py | seryoni/platformio | 35a602cfefde288ffe72f6d21436ac6785ffcab4 | [
"Apache-2.0"
] | 1 | 2019-07-17T07:16:24.000Z | 2019-07-17T07:16:24.000Z | # Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from copy import deepcopy
from os import environ, getenv
from os.path import getmtime, isfile, join
from time import time
from lockfile import LockFile
from platformio import __version__, util
from platformio.exception import InvalidSettingName, InvalidSettingValue
DEFAULT_SETTINGS = {
"check_platformio_interval": {
"description": "Check for the new PlatformIO interval (days)",
"value": 3
},
"check_platforms_interval": {
"description": "Check for the platform updates interval (days)",
"value": 7
},
"check_libraries_interval": {
"description": "Check for the library updates interval (days)",
"value": 7
},
"auto_update_platforms": {
"description": "Automatically update platforms (Yes/No)",
"value": False
},
"auto_update_libraries": {
"description": "Automatically update libraries (Yes/No)",
"value": False
},
"enable_telemetry": {
"description": (
"Telemetry service <http://docs.platformio.org/en/latest/"
"userguide/cmd_settings.html?#enable-telemetry> (Yes/No)"),
"value": True
},
"enable_prompts": {
"description": (
"Can PlatformIO communicate with you via prompts: "
"propose to install platforms which aren't installed yet, "
"paginate over library search results and etc.)? ATTENTION!!! "
"If you call PlatformIO like subprocess, "
"please disable prompts to avoid blocking (Yes/No)"),
"value": True
}
}
SESSION_VARS = {
"command_ctx": None,
"force_option": False,
"caller_id": None
}
class State(object):
def __init__(self, path=None, lock=False):
self.path = path
self.lock = lock
if not self.path:
self.path = join(util.get_home_dir(), "appstate.json")
self._state = {}
self._prev_state = {}
self._lockfile = None
def __enter__(self):
try:
self._lock_state_file()
if isfile(self.path):
self._state = util.load_json(self.path)
except ValueError:
self._state = {}
self._prev_state = deepcopy(self._state)
return self._state
def __exit__(self, type_, value, traceback):
if self._prev_state != self._state:
with open(self.path, "w") as fp:
if "dev" in __version__:
json.dump(self._state, fp, indent=4)
else:
json.dump(self._state, fp)
self._unlock_state_file()
def _lock_state_file(self):
if not self.lock:
return
self._lockfile = LockFile(self.path)
if (self._lockfile.is_locked() and
(time() - getmtime(self._lockfile.lock_file)) > 10):
self._lockfile.break_lock()
self._lockfile.acquire()
def _unlock_state_file(self):
if self._lockfile:
self._lockfile.release()
def sanitize_setting(name, value):
if name not in DEFAULT_SETTINGS:
raise InvalidSettingName(name)
defdata = DEFAULT_SETTINGS[name]
try:
if "validator" in defdata:
value = defdata['validator']()
elif isinstance(defdata['value'], bool):
if not isinstance(value, bool):
value = str(value).lower() in ("true", "yes", "y", "1")
elif isinstance(defdata['value'], int):
value = int(value)
except Exception:
raise InvalidSettingValue(value, name)
return value
def get_state_item(name, default=None):
with State() as data:
return data.get(name, default)
def set_state_item(name, value):
with State(lock=True) as data:
data[name] = value
def get_setting(name):
if name == "enable_prompts":
# disable prompts for Continuous Integration systems
# and when global "--force" option is set
if any([util.is_ci(), get_session_var("force_option")]):
return False
_env_name = "PLATFORMIO_SETTING_%s" % name.upper()
if _env_name in environ:
return sanitize_setting(name, getenv(_env_name))
with State() as data:
if "settings" in data and name in data['settings']:
return data['settings'][name]
return DEFAULT_SETTINGS[name]['value']
def set_setting(name, value):
with State(lock=True) as data:
if "settings" not in data:
data['settings'] = {}
data['settings'][name] = sanitize_setting(name, value)
def reset_settings():
with State(lock=True) as data:
if "settings" in data:
del data['settings']
def get_session_var(name, default=None):
return SESSION_VARS.get(name, default)
def set_session_var(name, value):
assert name in SESSION_VARS
SESSION_VARS[name] = value
def is_disabled_progressbar():
return (not get_setting("enable_prompts") or
getenv("PLATFORMIO_DISABLE_PROGRESSBAR") == "true")
| 29.767196 | 75 | 0.6262 |
import json
from copy import deepcopy
from os import environ, getenv
from os.path import getmtime, isfile, join
from time import time
from lockfile import LockFile
from platformio import __version__, util
from platformio.exception import InvalidSettingName, InvalidSettingValue
DEFAULT_SETTINGS = {
"check_platformio_interval": {
"description": "Check for the new PlatformIO interval (days)",
"value": 3
},
"check_platforms_interval": {
"description": "Check for the platform updates interval (days)",
"value": 7
},
"check_libraries_interval": {
"description": "Check for the library updates interval (days)",
"value": 7
},
"auto_update_platforms": {
"description": "Automatically update platforms (Yes/No)",
"value": False
},
"auto_update_libraries": {
"description": "Automatically update libraries (Yes/No)",
"value": False
},
"enable_telemetry": {
"description": (
"Telemetry service <http://docs.platformio.org/en/latest/"
"userguide/cmd_settings.html?#enable-telemetry> (Yes/No)"),
"value": True
},
"enable_prompts": {
"description": (
"Can PlatformIO communicate with you via prompts: "
"propose to install platforms which aren't installed yet, "
"paginate over library search results and etc.)? ATTENTION!!! "
"If you call PlatformIO like subprocess, "
"please disable prompts to avoid blocking (Yes/No)"),
"value": True
}
}
SESSION_VARS = {
"command_ctx": None,
"force_option": False,
"caller_id": None
}
class State(object):
def __init__(self, path=None, lock=False):
self.path = path
self.lock = lock
if not self.path:
self.path = join(util.get_home_dir(), "appstate.json")
self._state = {}
self._prev_state = {}
self._lockfile = None
def __enter__(self):
try:
self._lock_state_file()
if isfile(self.path):
self._state = util.load_json(self.path)
except ValueError:
self._state = {}
self._prev_state = deepcopy(self._state)
return self._state
def __exit__(self, type_, value, traceback):
if self._prev_state != self._state:
with open(self.path, "w") as fp:
if "dev" in __version__:
json.dump(self._state, fp, indent=4)
else:
json.dump(self._state, fp)
self._unlock_state_file()
def _lock_state_file(self):
if not self.lock:
return
self._lockfile = LockFile(self.path)
if (self._lockfile.is_locked() and
(time() - getmtime(self._lockfile.lock_file)) > 10):
self._lockfile.break_lock()
self._lockfile.acquire()
def _unlock_state_file(self):
if self._lockfile:
self._lockfile.release()
def sanitize_setting(name, value):
if name not in DEFAULT_SETTINGS:
raise InvalidSettingName(name)
defdata = DEFAULT_SETTINGS[name]
try:
if "validator" in defdata:
value = defdata['validator']()
elif isinstance(defdata['value'], bool):
if not isinstance(value, bool):
value = str(value).lower() in ("true", "yes", "y", "1")
elif isinstance(defdata['value'], int):
value = int(value)
except Exception:
raise InvalidSettingValue(value, name)
return value
def get_state_item(name, default=None):
with State() as data:
return data.get(name, default)
def set_state_item(name, value):
with State(lock=True) as data:
data[name] = value
def get_setting(name):
if name == "enable_prompts":
# disable prompts for Continuous Integration systems
# and when global "--force" option is set
if any([util.is_ci(), get_session_var("force_option")]):
return False
_env_name = "PLATFORMIO_SETTING_%s" % name.upper()
if _env_name in environ:
return sanitize_setting(name, getenv(_env_name))
with State() as data:
if "settings" in data and name in data['settings']:
return data['settings'][name]
return DEFAULT_SETTINGS[name]['value']
def set_setting(name, value):
with State(lock=True) as data:
if "settings" not in data:
data['settings'] = {}
data['settings'][name] = sanitize_setting(name, value)
def reset_settings():
with State(lock=True) as data:
if "settings" in data:
del data['settings']
def get_session_var(name, default=None):
return SESSION_VARS.get(name, default)
def set_session_var(name, value):
assert name in SESSION_VARS
SESSION_VARS[name] = value
def is_disabled_progressbar():
return (not get_setting("enable_prompts") or
getenv("PLATFORMIO_DISABLE_PROGRESSBAR") == "true")
| true | true |
f7fcce60aa6bf1d2109d51dcafd5d010200ebbaf | 680 | py | Python | challanges/find_maximum_value_binary_tree/test_find_maximum_value_binary_tree.py | Patricia888/data-structures-and-algorithms | 8963acf857b9f7069eeeea2884b41376986c3d7c | [
"MIT"
] | null | null | null | challanges/find_maximum_value_binary_tree/test_find_maximum_value_binary_tree.py | Patricia888/data-structures-and-algorithms | 8963acf857b9f7069eeeea2884b41376986c3d7c | [
"MIT"
] | null | null | null | challanges/find_maximum_value_binary_tree/test_find_maximum_value_binary_tree.py | Patricia888/data-structures-and-algorithms | 8963acf857b9f7069eeeea2884b41376986c3d7c | [
"MIT"
] | null | null | null | from .find_maximum_value_binary_tree import find_maximum_value, BST
def test_find_maximum_value_tree_with_one_value():
one_value = BST([5])
assert find_maximum_value(one_value) == 5
def test_find_maximum_value_tree_with_two_values():
one_value = BST([10, 2])
assert find_maximum_value(one_value) == 10
def test_find_maximum_value_balanced():
balanced = BST([10, 7, 3, 16, 12, 8, 20])
assert find_maximum_value(balanced) == 20
def test_find_maximum_value_left():
left = BST([10, 8, 6, 4])
assert find_maximum_value(left) == 10
def test_find_maximum_value_right():
right = BST([1, 3, 5, 7, 9])
assert find_maximum_value(right) == 9
| 25.185185 | 67 | 0.720588 | from .find_maximum_value_binary_tree import find_maximum_value, BST
def test_find_maximum_value_tree_with_one_value():
one_value = BST([5])
assert find_maximum_value(one_value) == 5
def test_find_maximum_value_tree_with_two_values():
one_value = BST([10, 2])
assert find_maximum_value(one_value) == 10
def test_find_maximum_value_balanced():
balanced = BST([10, 7, 3, 16, 12, 8, 20])
assert find_maximum_value(balanced) == 20
def test_find_maximum_value_left():
left = BST([10, 8, 6, 4])
assert find_maximum_value(left) == 10
def test_find_maximum_value_right():
right = BST([1, 3, 5, 7, 9])
assert find_maximum_value(right) == 9
| true | true |
f7fccf66051670cc2221f2bdee989c483ebfd7d3 | 2,154 | py | Python | tungsten_tempest_plugin/services/contrail/json/virtual_network_client.py | Vegasq/tungsten-tempest | 624584cd1a2298b92a44223de1ca7ae23c6f3476 | [
"Apache-2.0"
] | 1 | 2019-04-29T09:00:16.000Z | 2019-04-29T09:00:16.000Z | tungsten_tempest_plugin/services/contrail/json/virtual_network_client.py | Vegasq/tungsten-tempest | 624584cd1a2298b92a44223de1ca7ae23c6f3476 | [
"Apache-2.0"
] | 11 | 2018-12-04T14:20:27.000Z | 2019-05-30T14:37:13.000Z | tungsten_tempest_plugin/services/contrail/json/virtual_network_client.py | Vegasq/tungsten-tempest | 624584cd1a2298b92a44223de1ca7ae23c6f3476 | [
"Apache-2.0"
] | 9 | 2018-07-26T18:20:45.000Z | 2020-03-27T17:40:56.000Z | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tempest service class for virtual network test cases
"""
from oslo_serialization import jsonutils as json
from tungsten_tempest_plugin.services.contrail.json import base
class VirtualNetworkClient(base.BaseContrailClient):
"""
Service class for virtual n/w test cases
"""
def list_virtual_networks(self):
"""
:return:
"""
url = '/virtual-networks'
return self.get(url)
def create_virtual_networks(self, **kwargs):
"""
:param kwargs:
:return:
"""
url = '/virtual-networks'
post_body = json.dumps({'virtual-network': kwargs})
resp, body = self.post(url, post_body)
body = json.loads(body)
return base.ResponseBody(resp, body)
def update_virtual_network(self, uuid, **kwargs):
"""
:param uuid:
:param kwargs:
:return:
"""
url = '/virtual-network/%s' % uuid
post_data = {'virtual-network': kwargs}
req_post_data = json.dumps(post_data)
resp, body = self.put(url, req_post_data)
body = json.loads(body)
return base.ResponseBody(resp, body)
def delete_virtual_network(self, uuid):
"""
:param uuid:
:return:
"""
url = '/virtual-network/%s' % uuid
return self.delete(url)
def show_virtual_network(self, uuid):
"""
:param uuid:
:return:
"""
url = '/virtual-network/%s' % uuid
return self.get(url)
| 28.342105 | 78 | 0.616527 |
from oslo_serialization import jsonutils as json
from tungsten_tempest_plugin.services.contrail.json import base
class VirtualNetworkClient(base.BaseContrailClient):
def list_virtual_networks(self):
url = '/virtual-networks'
return self.get(url)
def create_virtual_networks(self, **kwargs):
url = '/virtual-networks'
post_body = json.dumps({'virtual-network': kwargs})
resp, body = self.post(url, post_body)
body = json.loads(body)
return base.ResponseBody(resp, body)
def update_virtual_network(self, uuid, **kwargs):
url = '/virtual-network/%s' % uuid
post_data = {'virtual-network': kwargs}
req_post_data = json.dumps(post_data)
resp, body = self.put(url, req_post_data)
body = json.loads(body)
return base.ResponseBody(resp, body)
def delete_virtual_network(self, uuid):
url = '/virtual-network/%s' % uuid
return self.delete(url)
def show_virtual_network(self, uuid):
url = '/virtual-network/%s' % uuid
return self.get(url)
| true | true |
f7fcd003bc9e3f57ec944b1eb041edb86ca93e84 | 6,381 | py | Python | rafiki/model/model.py | dcslin/rafiki | b617ac2536ac13095c4930d6d3f1f9b3c231b5e7 | [
"Apache-2.0"
] | null | null | null | rafiki/model/model.py | dcslin/rafiki | b617ac2536ac13095c4930d6d3f1f9b3c231b5e7 | [
"Apache-2.0"
] | null | null | null | rafiki/model/model.py | dcslin/rafiki | b617ac2536ac13095c4930d6d3f1f9b3c231b5e7 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import abc
import numpy as np
from typing import Union, Dict, Optional, Any, List
from .knob import BaseKnob
KnobConfig = Dict[str, BaseKnob]
Knobs = Dict[str, Any]
Params = Dict[str, Union[str, int, float, np.ndarray]]
class BaseModel(abc.ABC):
'''
Rafiki's base model class that Rafiki models must extend.
Rafiki models must implement all abstract methods below, according to the specification of its associated task (see :ref:`tasks`).
They configure how this model template will be trained, evaluated, tuned, serialized and served on Rafiki.
In the model's ``__init__`` method, call ``super().__init__(**knobs)`` as the first line,
followed by the model's initialization logic. The model should be initialize itself with ``knobs``,
a set of generated knob values for the created model instance.
These knob values are chosen by Rafiki based on the model's knob configuration (defined in :meth:`rafiki.model.BaseModel.get_knob_config`).
For example:
::
def __init__(self, **knobs):
super().__init__(**knobs)
self.__dict__.update(knobs)
...
self._build_model(self.knob1, self.knob2)
:param knobs: Dictionary mapping knob names to knob values
:type knobs: :obj:`rafiki.model.Knobs`
'''
def __init__(self, **knobs: Knobs):
pass
@abc.abstractstaticmethod
def get_knob_config() -> KnobConfig:
'''
Return a dictionary that defines the search space for this model template's knobs
(i.e. knobs' names, their types & their ranges).
Over the course of training, your model will be initialized with different values of knobs within this search space
to maximize this model’s performance.
Refer to :ref:`model-tuning` to understand more about how this works.
:returns: Dictionary mapping knob names to knob specifications
'''
raise NotImplementedError()
@abc.abstractmethod
def train(self, dataset_path: str, shared_params: Optional[Params] = None, **train_args):
'''
Train this model instance with the given traing dataset and initialized knob values.
Additional keyword arguments could be passed depending on the task's specification.
Additionally, trained parameters shared from previous trials could be passed,
as part of the ``SHARE_PARAMS`` policy (see :ref:`model-policies`).
Subsequently, the model is considered *trained*.
:param dataset_path: File path of the train dataset file in the *local filesystem*, in a format specified by the task
:param shared_params: Dictionary mapping parameter names to values, as produced by your model's :meth:`rafiki.model.BaseModel.dump_parameters`.
'''
raise NotImplementedError()
@abc.abstractmethod
def evaluate(self, dataset_path: str) -> float:
'''
Evaluate this model instance with the given validation dataset after training.
This will be called only when model is *trained*.
:param dataset_path: File path of the validation dataset file in the *local filesystem*, in a format specified by the task
:returns: A score associated with the validation performance for the trained model instance, the higher the better e.g. classification accuracy.
'''
raise NotImplementedError()
@abc.abstractmethod
def predict(self, queries: List[Any]) -> List[Any]:
'''
Make predictions on a batch of queries after training.
This will be called only when model is *trained*.
:param queries: List of queries, where a query is in the format specified by the task
:returns: List of predictions, in an order corresponding to the queries, where a prediction is in the format specified by the task
'''
raise NotImplementedError()
@abc.abstractmethod
def dump_parameters(self) -> Params:
'''
Returns a dictionary of model parameters that *fully define the trained state of the model*.
This dictionary must conform to the format :obj:`rafiki.model.Params`.
This will be used to save the trained model in Rafiki.
Additionally, trained parameters produced by this method could be shared with future trials, as
part of the ``SHARE_PARAMS`` policy (see :ref:`model-policies`).
This will be called only when model is *trained*.
:returns: Dictionary mapping parameter names to values
'''
raise NotImplementedError()
@abc.abstractmethod
def load_parameters(self, params: Params):
'''
Loads this model instance with previously trained model parameters produced by your model's :meth:`rafiki.model.BaseModel.dump_parameters`.
*This model instance's initialized knob values will match those during training*.
Subsequently, the model is considered *trained*.
'''
raise NotImplementedError()
def destroy(self):
'''
Destroy this model instance, freeing any resources held by this model instance.
No other instance methods will be called subsequently.
'''
pass
@staticmethod
def teardown():
'''
Runs class-wide teardown logic (e.g. close a training session shared across trials).
'''
pass
class PandaModel(BaseModel):
def __init__(self, **knobs: Knobs):
super().__init__(**knobs)
@abc.abstractmethod
def local_explain(self, queries, params: Params):
raise NotImplementedError()
| 39.147239 | 152 | 0.692681 |
import abc
import numpy as np
from typing import Union, Dict, Optional, Any, List
from .knob import BaseKnob
KnobConfig = Dict[str, BaseKnob]
Knobs = Dict[str, Any]
Params = Dict[str, Union[str, int, float, np.ndarray]]
class BaseModel(abc.ABC):
def __init__(self, **knobs: Knobs):
pass
@abc.abstractstaticmethod
def get_knob_config() -> KnobConfig:
raise NotImplementedError()
@abc.abstractmethod
def train(self, dataset_path: str, shared_params: Optional[Params] = None, **train_args):
raise NotImplementedError()
@abc.abstractmethod
def evaluate(self, dataset_path: str) -> float:
raise NotImplementedError()
@abc.abstractmethod
def predict(self, queries: List[Any]) -> List[Any]:
raise NotImplementedError()
@abc.abstractmethod
def dump_parameters(self) -> Params:
raise NotImplementedError()
@abc.abstractmethod
def load_parameters(self, params: Params):
raise NotImplementedError()
def destroy(self):
pass
@staticmethod
def teardown():
pass
class PandaModel(BaseModel):
def __init__(self, **knobs: Knobs):
super().__init__(**knobs)
@abc.abstractmethod
def local_explain(self, queries, params: Params):
raise NotImplementedError()
| true | true |
f7fcd065302faab42706ab33888811125efb3b4f | 2,177 | py | Python | tslearn/metrics/sax.py | hoangph3/tslearn | c589de380398379f2587f8cc812571d2a6d75938 | [
"BSD-2-Clause"
] | null | null | null | tslearn/metrics/sax.py | hoangph3/tslearn | c589de380398379f2587f8cc812571d2a6d75938 | [
"BSD-2-Clause"
] | null | null | null | tslearn/metrics/sax.py | hoangph3/tslearn | c589de380398379f2587f8cc812571d2a6d75938 | [
"BSD-2-Clause"
] | null | null | null | from .utils import _cdist_generic
from .cysax import cydist_sax
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
def cdist_sax(dataset1, breakpoints_avg, size_fitted, dataset2=None,
n_jobs=None, verbose=0):
r"""Calculates a matrix of distances (MINDIST) on SAX-transformed data,
as presented in [1]_. It is important to note that this function
expects the timeseries in dataset1 and dataset2 to be normalized
to each have zero mean and unit variance.
Parameters
----------
dataset1 : array-like
A dataset of time series
breakpoints_avg : array-like
The breakpoints used to assign the alphabet symbols.
size_fitted: int
The original timesteps in the timeseries, before
discretizing through SAX.
dataset2 : array-like (default: None)
Another dataset of time series. If `None`, self-similarity of
`dataset1` is returned.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See scikit-learns'
`Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`__
for more details.
verbose : int, optional (default=0)
The verbosity level: if non zero, progress messages are printed.
Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
`Glossary <https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation>`__
for more details.
Returns
-------
cdist : numpy.ndarray
Cross-similarity matrix
References
----------
.. [1] Lin, Jessica, et al. "Experiencing SAX: a novel symbolic
representation of time series." Data Mining and knowledge
discovery 15.2 (2007): 107-144.
""" # noqa: E501
return _cdist_generic(cydist_sax, dataset1, dataset2, n_jobs, verbose,
False, int, breakpoints_avg, size_fitted)
| 36.898305 | 109 | 0.66881 | from .utils import _cdist_generic
from .cysax import cydist_sax
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
def cdist_sax(dataset1, breakpoints_avg, size_fitted, dataset2=None,
n_jobs=None, verbose=0):
return _cdist_generic(cydist_sax, dataset1, dataset2, n_jobs, verbose,
False, int, breakpoints_avg, size_fitted)
| true | true |
f7fcd07c385c54db18a51dac00e4537cf5d590f6 | 4,792 | py | Python | tests/orchestrator/orchestrator_test_utils.py | priyaananthasankar/azure-functions-durable-python-1 | 5494962583ff12794d3c856f098e0bded8d74069 | [
"MIT"
] | null | null | null | tests/orchestrator/orchestrator_test_utils.py | priyaananthasankar/azure-functions-durable-python-1 | 5494962583ff12794d3c856f098e0bded8d74069 | [
"MIT"
] | null | null | null | tests/orchestrator/orchestrator_test_utils.py | priyaananthasankar/azure-functions-durable-python-1 | 5494962583ff12794d3c856f098e0bded8d74069 | [
"MIT"
] | null | null | null | import json
from typing import Callable, Iterator, Any, Dict, List
from jsonschema import validate
from azure.durable_functions.models import DurableOrchestrationContext, DurableEntityContext
from azure.durable_functions.orchestrator import Orchestrator
from azure.durable_functions.entity import Entity
from .schemas.OrchetrationStateSchema import schema
def assert_orchestration_state_equals(expected, result):
"""Ensure that the observable OrchestratorState matches the expected result.
"""
assert_attribute_equal(expected, result, "isDone")
assert_attribute_equal(expected, result, "schemaVersion")
assert_actions_are_equal(expected, result)
assert_attribute_equal(expected, result, "output")
assert_attribute_equal(expected, result, "error")
assert_attribute_equal(expected, result, "customStatus")
def assert_entity_state_equals(expected, result):
"""Ensure the that the observable EntityState json matches the expected result.
"""
assert_attribute_equal(expected, result,"entityExists")
assert "results" in result
observed_results = result["results"]
expected_results = expected["results"]
assert_results_are_equal(expected_results, observed_results)
assert_attribute_equal(expected, result, "entityState")
assert_attribute_equal(expected, result, "signals")
def assert_results_are_equal(expected: Dict[str, Any], result: Dict[str, Any]) -> bool:
assert_attribute_equal(expected, result, "result")
assert_attribute_equal(expected, result, "isError")
def assert_attribute_equal(expected, result, attribute):
if attribute in expected:
assert result.get(attribute) == expected.get(attribute)
else:
assert attribute not in result
def assert_actions_are_equal(expected, result):
expected_actions = expected.get("actions")
result_actions = result.get("actions")
assert len(expected_actions) == len(result_actions)
for index in range(len(expected_actions)):
assert len(expected_actions[index]) == len(result_actions[index])
for action_index in range(len(expected_actions[index])):
expected_action = expected_actions[index][action_index]
result_action = result_actions[index][action_index]
assert_action_is_equal(expected_action, result_action)
def assert_action_is_equal(expected_action, result_action):
assert_attribute_equal(expected_action, result_action, "functionName")
assert_attribute_equal(expected_action, result_action, "input")
assert_attribute_equal(expected_action, result_action, "actionType")
def get_orchestration_state_result(
context_builder,
activity_func: Callable[[DurableOrchestrationContext], Iterator[Any]]):
context_as_string = context_builder.to_json_string()
orchestrator = Orchestrator(activity_func)
result_of_handle = orchestrator.handle(
DurableOrchestrationContext.from_json(context_as_string))
result = json.loads(result_of_handle)
return result
def get_entity_state_result(
context_builder: DurableEntityContext,
user_code: Callable[[DurableEntityContext], Any],
) -> Dict[str, Any]:
"""Simulate the result of running the entity function with the provided context and batch.
Parameters
----------
context_builder: DurableEntityContext
A mocked entity context
user_code: Callable[[DurableEntityContext], Any]
A function implementing an entity
Returns:
-------
Dict[str, Any]:
JSON-response of the entity
"""
# The durable-extension automatically wraps the data within a 'self' key
context_as_string = context_builder.to_json_string()
entity = Entity(user_code)
context, batch = DurableEntityContext.from_json(context_as_string)
result_of_handle = entity.handle(context, batch)
result = json.loads(result_of_handle)
return result
def get_orchestration_property(
context_builder,
activity_func: Callable[[DurableOrchestrationContext], Iterator[Any]],
prop: str):
context_as_string = context_builder.to_json_string()
orchestrator = Orchestrator(activity_func)
result_of_handle = orchestrator.handle(
DurableOrchestrationContext.from_json(context_as_string))
result = getattr(orchestrator, prop)
return result
def assert_valid_schema(orchestration_state):
validation_results = validate(instance=orchestration_state, schema=schema)
assert validation_results is None
def assert_dict_are_equal(expected: Dict[Any, Any], result: Dict[Any, Any]):
assert len(expected.keys()) == len(result.keys())
for key in expected.keys():
assert expected[key] == result[key]
for key in result.keys():
assert result[key] == expected[key]
| 39.603306 | 94 | 0.750417 | import json
from typing import Callable, Iterator, Any, Dict, List
from jsonschema import validate
from azure.durable_functions.models import DurableOrchestrationContext, DurableEntityContext
from azure.durable_functions.orchestrator import Orchestrator
from azure.durable_functions.entity import Entity
from .schemas.OrchetrationStateSchema import schema
def assert_orchestration_state_equals(expected, result):
assert_attribute_equal(expected, result, "isDone")
assert_attribute_equal(expected, result, "schemaVersion")
assert_actions_are_equal(expected, result)
assert_attribute_equal(expected, result, "output")
assert_attribute_equal(expected, result, "error")
assert_attribute_equal(expected, result, "customStatus")
def assert_entity_state_equals(expected, result):
assert_attribute_equal(expected, result,"entityExists")
assert "results" in result
observed_results = result["results"]
expected_results = expected["results"]
assert_results_are_equal(expected_results, observed_results)
assert_attribute_equal(expected, result, "entityState")
assert_attribute_equal(expected, result, "signals")
def assert_results_are_equal(expected: Dict[str, Any], result: Dict[str, Any]) -> bool:
assert_attribute_equal(expected, result, "result")
assert_attribute_equal(expected, result, "isError")
def assert_attribute_equal(expected, result, attribute):
if attribute in expected:
assert result.get(attribute) == expected.get(attribute)
else:
assert attribute not in result
def assert_actions_are_equal(expected, result):
expected_actions = expected.get("actions")
result_actions = result.get("actions")
assert len(expected_actions) == len(result_actions)
for index in range(len(expected_actions)):
assert len(expected_actions[index]) == len(result_actions[index])
for action_index in range(len(expected_actions[index])):
expected_action = expected_actions[index][action_index]
result_action = result_actions[index][action_index]
assert_action_is_equal(expected_action, result_action)
def assert_action_is_equal(expected_action, result_action):
assert_attribute_equal(expected_action, result_action, "functionName")
assert_attribute_equal(expected_action, result_action, "input")
assert_attribute_equal(expected_action, result_action, "actionType")
def get_orchestration_state_result(
context_builder,
activity_func: Callable[[DurableOrchestrationContext], Iterator[Any]]):
context_as_string = context_builder.to_json_string()
orchestrator = Orchestrator(activity_func)
result_of_handle = orchestrator.handle(
DurableOrchestrationContext.from_json(context_as_string))
result = json.loads(result_of_handle)
return result
def get_entity_state_result(
context_builder: DurableEntityContext,
user_code: Callable[[DurableEntityContext], Any],
) -> Dict[str, Any]:
context_as_string = context_builder.to_json_string()
entity = Entity(user_code)
context, batch = DurableEntityContext.from_json(context_as_string)
result_of_handle = entity.handle(context, batch)
result = json.loads(result_of_handle)
return result
def get_orchestration_property(
context_builder,
activity_func: Callable[[DurableOrchestrationContext], Iterator[Any]],
prop: str):
context_as_string = context_builder.to_json_string()
orchestrator = Orchestrator(activity_func)
result_of_handle = orchestrator.handle(
DurableOrchestrationContext.from_json(context_as_string))
result = getattr(orchestrator, prop)
return result
def assert_valid_schema(orchestration_state):
validation_results = validate(instance=orchestration_state, schema=schema)
assert validation_results is None
def assert_dict_are_equal(expected: Dict[Any, Any], result: Dict[Any, Any]):
assert len(expected.keys()) == len(result.keys())
for key in expected.keys():
assert expected[key] == result[key]
for key in result.keys():
assert result[key] == expected[key]
| true | true |
f7fcd19b730da61f68f260162f74a6b533ba7c7b | 5,640 | py | Python | configs/representation/ssb/ssb_r18_nc_sgd_cos_100e_r5_8x8x1_k400.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/ssb/ssb_r18_nc_sgd_cos_100e_r5_8x8x1_k400.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/ssb/ssb_r18_nc_sgd_cos_100e_r5_8x8x1_k400.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | # model settings
temperature = 0.2
with_norm = True
query_dim = 128
model = dict(
type='SimSiamBaseTracker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
# cls_head=None,
# patch_head=None,
img_head=dict(
type='SimSiamHead',
in_channels=512,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=512,
projection_out_channels=512,
num_predictor_fcs=2,
predictor_mid_channels=128,
predictor_out_channels=512,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg'))
# model training and testing settings
train_cfg = dict(intra_video=True)
test_cfg = dict(
precede_frames=20,
topk=10,
temperature=0.2,
strides=(1, 2, 1, 1),
out_indices=(2, 3),
neighbor_range=24,
with_first=True,
with_first_neighbor=True,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=1,
out_of_bound_opt='repeat_last'),
dict(type='Clip2Frame', clip_len=4),
# dict(type='DuplicateFrames', times=2),
dict(type='DecordDecode'),
dict(
type='RandomResizedCrop',
area_range=(0.2, 1.),
same_across_clip=False,
same_on_clip=False),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(
type='Flip',
flip_ratio=0.5,
same_across_clip=False,
same_on_clip=False),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGrayScale',
# p=0.2,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGaussianBlur',
# p=0.5,
# same_across_clip=False,
# same_on_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=32,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline)),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 100
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['ssb'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 30.160428 | 78 | 0.616312 |
temperature = 0.2
with_norm = True
query_dim = 128
model = dict(
type='SimSiamBaseTracker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
img_head=dict(
type='SimSiamHead',
in_channels=512,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=512,
projection_out_channels=512,
num_predictor_fcs=2,
predictor_mid_channels=128,
predictor_out_channels=512,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg'))
train_cfg = dict(intra_video=True)
test_cfg = dict(
precede_frames=20,
topk=10,
temperature=0.2,
strides=(1, 2, 1, 1),
out_indices=(2, 3),
neighbor_range=24,
with_first=True,
with_first_neighbor=True,
output_dir='eval_results')
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=1,
out_of_bound_opt='repeat_last'),
dict(type='Clip2Frame', clip_len=4),
dict(type='DecordDecode'),
dict(
type='RandomResizedCrop',
area_range=(0.2, 1.),
same_across_clip=False,
same_on_clip=False),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(
type='Flip',
flip_ratio=0.5,
same_across_clip=False,
same_on_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=32,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline)),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
total_epochs = 100
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['ssb'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| true | true |
f7fcd38e1cc8fb21f3696adb053153039060e030 | 6,249 | py | Python | kademlia/crawling.py | Ketherz/kademlia | 1df52909f8cee33b659ac4e60c1acf1e88d118a6 | [
"MIT"
] | 745 | 2015-01-05T13:38:18.000Z | 2022-03-30T11:43:42.000Z | kademlia/crawling.py | Ketherz/kademlia | 1df52909f8cee33b659ac4e60c1acf1e88d118a6 | [
"MIT"
] | 86 | 2015-06-04T01:35:12.000Z | 2021-10-12T12:15:35.000Z | kademlia/crawling.py | Ketherz/kademlia | 1df52909f8cee33b659ac4e60c1acf1e88d118a6 | [
"MIT"
] | 234 | 2015-01-09T18:16:13.000Z | 2022-01-30T22:03:03.000Z | from collections import Counter
import logging
from kademlia.node import Node, NodeHeap
from kademlia.utils import gather_dict
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
class SpiderCrawl:
"""
Crawl the network and look for given 160-bit keys.
"""
def __init__(self, protocol, node, peers, ksize, alpha):
"""
Create a new C{SpiderCrawl}er.
Args:
protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
node: A :class:`~kademlia.node.Node` representing the key we're
looking for
peers: A list of :class:`~kademlia.node.Node` instances that
provide the entry point for the network
ksize: The value for k based on the paper
alpha: The value for alpha based on the paper
"""
self.protocol = protocol
self.ksize = ksize
self.alpha = alpha
self.node = node
self.nearest = NodeHeap(self.node, self.ksize)
self.last_ids_crawled = []
log.info("creating spider with peers: %s", peers)
self.nearest.push(peers)
async def _find(self, rpcmethod):
"""
Get either a value or list of nodes.
Args:
rpcmethod: The protocol's callfindValue or call_find_node.
The process:
1. calls find_* to current ALPHA nearest not already queried nodes,
adding results to current nearest list of k nodes.
2. current nearest list needs to keep track of who has been queried
already sort by nearest, keep KSIZE
3. if list is same as last time, next call should be to everyone not
yet queried
4. repeat, unless nearest list has all been queried, then ur done
"""
log.info("crawling network with nearest: %s", str(tuple(self.nearest)))
count = self.alpha
if self.nearest.get_ids() == self.last_ids_crawled:
count = len(self.nearest)
self.last_ids_crawled = self.nearest.get_ids()
dicts = {}
for peer in self.nearest.get_uncontacted()[:count]:
dicts[peer.id] = rpcmethod(peer, self.node)
self.nearest.mark_contacted(peer)
found = await gather_dict(dicts)
return await self._nodes_found(found)
async def _nodes_found(self, responses):
raise NotImplementedError
class ValueSpiderCrawl(SpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha):
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
# keep track of the single nearest node without value - per
# section 2.3 so we can set the key there if found
self.nearest_without_value = NodeHeap(self.node, 1)
async def find(self):
"""
Find either the closest nodes or the value requested.
"""
return await self._find(self.protocol.call_find_value)
async def _nodes_found(self, responses):
"""
Handle the result of an iteration in _find.
"""
toremove = []
found_values = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
elif response.has_value():
found_values.append(response.get_value())
else:
peer = self.nearest.get_node(peerid)
self.nearest_without_value.push(peer)
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
if found_values:
return await self._handle_found_values(found_values)
if self.nearest.have_contacted_all():
# not found!
return None
return await self.find()
async def _handle_found_values(self, values):
"""
We got some values! Exciting. But let's make sure
they're all the same or freak out a little bit. Also,
make sure we tell the nearest node that *didn't* have
the value to store it.
"""
value_counts = Counter(values)
if len(value_counts) != 1:
log.warning("Got multiple values for key %i: %s",
self.node.long_id, str(values))
value = value_counts.most_common(1)[0][0]
peer = self.nearest_without_value.popleft()
if peer:
await self.protocol.call_store(peer, self.node.id, value)
return value
class NodeSpiderCrawl(SpiderCrawl):
async def find(self):
"""
Find the closest nodes.
"""
return await self._find(self.protocol.call_find_node)
async def _nodes_found(self, responses):
"""
Handle the result of an iteration in _find.
"""
toremove = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
else:
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
if self.nearest.have_contacted_all():
return list(self.nearest)
return await self.find()
class RPCFindResponse:
def __init__(self, response):
"""
A wrapper for the result of a RPC find.
Args:
response: This will be a tuple of (<response received>, <value>)
where <value> will be a list of tuples if not found or
a dictionary of {'value': v} where v is the value desired
"""
self.response = response
def happened(self):
"""
Did the other host actually respond?
"""
return self.response[0]
def has_value(self):
return isinstance(self.response[1], dict)
def get_value(self):
return self.response[1]['value']
def get_node_list(self):
"""
Get the node list in the response. If there's no value, this should
be set.
"""
nodelist = self.response[1] or []
return [Node(*nodeple) for nodeple in nodelist]
| 33.961957 | 79 | 0.601056 | from collections import Counter
import logging
from kademlia.node import Node, NodeHeap
from kademlia.utils import gather_dict
log = logging.getLogger(__name__)
class SpiderCrawl:
def __init__(self, protocol, node, peers, ksize, alpha):
self.protocol = protocol
self.ksize = ksize
self.alpha = alpha
self.node = node
self.nearest = NodeHeap(self.node, self.ksize)
self.last_ids_crawled = []
log.info("creating spider with peers: %s", peers)
self.nearest.push(peers)
async def _find(self, rpcmethod):
log.info("crawling network with nearest: %s", str(tuple(self.nearest)))
count = self.alpha
if self.nearest.get_ids() == self.last_ids_crawled:
count = len(self.nearest)
self.last_ids_crawled = self.nearest.get_ids()
dicts = {}
for peer in self.nearest.get_uncontacted()[:count]:
dicts[peer.id] = rpcmethod(peer, self.node)
self.nearest.mark_contacted(peer)
found = await gather_dict(dicts)
return await self._nodes_found(found)
async def _nodes_found(self, responses):
raise NotImplementedError
class ValueSpiderCrawl(SpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha):
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
self.nearest_without_value = NodeHeap(self.node, 1)
async def find(self):
return await self._find(self.protocol.call_find_value)
async def _nodes_found(self, responses):
toremove = []
found_values = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
elif response.has_value():
found_values.append(response.get_value())
else:
peer = self.nearest.get_node(peerid)
self.nearest_without_value.push(peer)
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
if found_values:
return await self._handle_found_values(found_values)
if self.nearest.have_contacted_all():
return None
return await self.find()
async def _handle_found_values(self, values):
value_counts = Counter(values)
if len(value_counts) != 1:
log.warning("Got multiple values for key %i: %s",
self.node.long_id, str(values))
value = value_counts.most_common(1)[0][0]
peer = self.nearest_without_value.popleft()
if peer:
await self.protocol.call_store(peer, self.node.id, value)
return value
class NodeSpiderCrawl(SpiderCrawl):
async def find(self):
return await self._find(self.protocol.call_find_node)
async def _nodes_found(self, responses):
toremove = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
else:
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
if self.nearest.have_contacted_all():
return list(self.nearest)
return await self.find()
class RPCFindResponse:
def __init__(self, response):
self.response = response
def happened(self):
return self.response[0]
def has_value(self):
return isinstance(self.response[1], dict)
def get_value(self):
return self.response[1]['value']
def get_node_list(self):
nodelist = self.response[1] or []
return [Node(*nodeple) for nodeple in nodelist]
| true | true |
f7fcd3baf0a28568187795a29f36ab0bff17aaae | 50 | py | Python | add 2 numbers.py | SimranSwain/Days_of_Code | bce154a40c816ddef70a7262a9ed0c7bc6d1bed3 | [
"MIT"
] | null | null | null | add 2 numbers.py | SimranSwain/Days_of_Code | bce154a40c816ddef70a7262a9ed0c7bc6d1bed3 | [
"MIT"
] | null | null | null | add 2 numbers.py | SimranSwain/Days_of_Code | bce154a40c816ddef70a7262a9ed0c7bc6d1bed3 | [
"MIT"
] | null | null | null | x,y=input().split()
sum=int(x)+int(y)
print(sum) | 16.666667 | 20 | 0.62 | x,y=input().split()
sum=int(x)+int(y)
print(sum) | true | true |
f7fcd4435f07b49fbaf12abc8035608cae23a7a9 | 41,617 | py | Python | google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py | mf2199/python-bigtable | ee3a6c4c5f810fab08671db3407195864ecc1972 | [
"Apache-2.0"
] | 34 | 2020-07-27T19:14:01.000Z | 2022-03-31T14:46:53.000Z | google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py | mf2199/python-bigtable | ee3a6c4c5f810fab08671db3407195864ecc1972 | [
"Apache-2.0"
] | 254 | 2020-01-31T23:44:06.000Z | 2022-03-23T22:52:49.000Z | google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py | mf2199/python-bigtable | ee3a6c4c5f810fab08671db3407195864ecc1972 | [
"Apache-2.0"
] | 30 | 2020-01-31T20:45:34.000Z | 2022-03-23T19:56:42.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
from google.cloud.bigtable_admin_v2.types import table
from google.cloud.bigtable_admin_v2.types import table as gba_table
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import BigtableTableAdminGrpcTransport
class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport):
"""gRPC AsyncIO backend transport for BigtableTableAdmin.
Service for creating, configuring, and deleting Cloud
Bigtable tables.
Provides access to the table schemas only, not the data stored
within the tables.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "bigtableadmin.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "bigtableadmin.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_table(
self,
) -> Callable[
[bigtable_table_admin.CreateTableRequest], Awaitable[gba_table.Table]
]:
r"""Return a callable for the create table method over gRPC.
Creates a new table in the specified instance.
The table can be created with a full set of initial
column families, specified in the request.
Returns:
Callable[[~.CreateTableRequest],
Awaitable[~.Table]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_table" not in self._stubs:
self._stubs["create_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable",
request_serializer=bigtable_table_admin.CreateTableRequest.serialize,
response_deserializer=gba_table.Table.deserialize,
)
return self._stubs["create_table"]
@property
def create_table_from_snapshot(
self,
) -> Callable[
[bigtable_table_admin.CreateTableFromSnapshotRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create table from snapshot method over gRPC.
Creates a new table from the specified snapshot. The
target table must not exist. The snapshot and the table
must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.CreateTableFromSnapshotRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_table_from_snapshot" not in self._stubs:
self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot",
request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_table_from_snapshot"]
@property
def list_tables(
self,
) -> Callable[
[bigtable_table_admin.ListTablesRequest],
Awaitable[bigtable_table_admin.ListTablesResponse],
]:
r"""Return a callable for the list tables method over gRPC.
Lists all tables served from a specified instance.
Returns:
Callable[[~.ListTablesRequest],
Awaitable[~.ListTablesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_tables" not in self._stubs:
self._stubs["list_tables"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables",
request_serializer=bigtable_table_admin.ListTablesRequest.serialize,
response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize,
)
return self._stubs["list_tables"]
@property
def get_table(
self,
) -> Callable[[bigtable_table_admin.GetTableRequest], Awaitable[table.Table]]:
r"""Return a callable for the get table method over gRPC.
Gets metadata information about the specified table.
Returns:
Callable[[~.GetTableRequest],
Awaitable[~.Table]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_table" not in self._stubs:
self._stubs["get_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable",
request_serializer=bigtable_table_admin.GetTableRequest.serialize,
response_deserializer=table.Table.deserialize,
)
return self._stubs["get_table"]
@property
def delete_table(
self,
) -> Callable[
[bigtable_table_admin.DeleteTableRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the delete table method over gRPC.
Permanently deletes a specified table and all of its
data.
Returns:
Callable[[~.DeleteTableRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_table" not in self._stubs:
self._stubs["delete_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable",
request_serializer=bigtable_table_admin.DeleteTableRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_table"]
@property
def modify_column_families(
self,
) -> Callable[
[bigtable_table_admin.ModifyColumnFamiliesRequest], Awaitable[table.Table]
]:
r"""Return a callable for the modify column families method over gRPC.
Performs a series of column family modifications on
the specified table. Either all or none of the
modifications will occur before this method returns, but
data requests received prior to that point may see a
table where only some modifications have taken effect.
Returns:
Callable[[~.ModifyColumnFamiliesRequest],
Awaitable[~.Table]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "modify_column_families" not in self._stubs:
self._stubs["modify_column_families"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies",
request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize,
response_deserializer=table.Table.deserialize,
)
return self._stubs["modify_column_families"]
@property
def drop_row_range(
self,
) -> Callable[
[bigtable_table_admin.DropRowRangeRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the drop row range method over gRPC.
Permanently drop/delete a row range from a specified
table. The request can specify whether to delete all
rows in a table, or only those that match a particular
prefix.
Returns:
Callable[[~.DropRowRangeRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_row_range" not in self._stubs:
self._stubs["drop_row_range"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange",
request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["drop_row_range"]
@property
def generate_consistency_token(
self,
) -> Callable[
[bigtable_table_admin.GenerateConsistencyTokenRequest],
Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse],
]:
r"""Return a callable for the generate consistency token method over gRPC.
Generates a consistency token for a Table, which can
be used in CheckConsistency to check whether mutations
to the table that finished before this call started have
been replicated. The tokens will be available for 90
days.
Returns:
Callable[[~.GenerateConsistencyTokenRequest],
Awaitable[~.GenerateConsistencyTokenResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_consistency_token" not in self._stubs:
self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken",
request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize,
response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize,
)
return self._stubs["generate_consistency_token"]
@property
def check_consistency(
self,
) -> Callable[
[bigtable_table_admin.CheckConsistencyRequest],
Awaitable[bigtable_table_admin.CheckConsistencyResponse],
]:
r"""Return a callable for the check consistency method over gRPC.
Checks replication consistency based on a consistency
token, that is, if replication has caught up based on
the conditions specified in the token and the check
request.
Returns:
Callable[[~.CheckConsistencyRequest],
Awaitable[~.CheckConsistencyResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "check_consistency" not in self._stubs:
self._stubs["check_consistency"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency",
request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize,
response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize,
)
return self._stubs["check_consistency"]
@property
def snapshot_table(
self,
) -> Callable[
[bigtable_table_admin.SnapshotTableRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the snapshot table method over gRPC.
Creates a new snapshot in the specified cluster from
the specified source table. The cluster and the table
must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.SnapshotTableRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "snapshot_table" not in self._stubs:
self._stubs["snapshot_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable",
request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["snapshot_table"]
@property
def get_snapshot(
self,
) -> Callable[[bigtable_table_admin.GetSnapshotRequest], Awaitable[table.Snapshot]]:
r"""Return a callable for the get snapshot method over gRPC.
Gets metadata information about the specified
snapshot.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.GetSnapshotRequest],
Awaitable[~.Snapshot]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_snapshot" not in self._stubs:
self._stubs["get_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot",
request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize,
response_deserializer=table.Snapshot.deserialize,
)
return self._stubs["get_snapshot"]
@property
def list_snapshots(
self,
) -> Callable[
[bigtable_table_admin.ListSnapshotsRequest],
Awaitable[bigtable_table_admin.ListSnapshotsResponse],
]:
r"""Return a callable for the list snapshots method over gRPC.
Lists all snapshots associated with the specified
cluster.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.ListSnapshotsRequest],
Awaitable[~.ListSnapshotsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_snapshots" not in self._stubs:
self._stubs["list_snapshots"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots",
request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize,
response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize,
)
return self._stubs["list_snapshots"]
@property
def delete_snapshot(
self,
) -> Callable[
[bigtable_table_admin.DeleteSnapshotRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the delete snapshot method over gRPC.
Permanently deletes the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.DeleteSnapshotRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_snapshot" not in self._stubs:
self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot",
request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_snapshot"]
@property
def create_backup(
self,
) -> Callable[
[bigtable_table_admin.CreateBackupRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create backup method over gRPC.
Starts creating a new Cloud Bigtable Backup. The returned backup
[long-running operation][google.longrunning.Operation] can be
used to track creation of the backup. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata].
The [response][google.longrunning.Operation.response] field type
is [Backup][google.bigtable.admin.v2.Backup], if successful.
Cancelling the returned operation will stop the creation and
delete the backup.
Returns:
Callable[[~.CreateBackupRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
self._stubs["create_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup",
request_serializer=bigtable_table_admin.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
@property
def get_backup(
self,
) -> Callable[[bigtable_table_admin.GetBackupRequest], Awaitable[table.Backup]]:
r"""Return a callable for the get backup method over gRPC.
Gets metadata on a pending or completed Cloud
Bigtable Backup.
Returns:
Callable[[~.GetBackupRequest],
Awaitable[~.Backup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
self._stubs["get_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup",
request_serializer=bigtable_table_admin.GetBackupRequest.serialize,
response_deserializer=table.Backup.deserialize,
)
return self._stubs["get_backup"]
@property
def update_backup(
self,
) -> Callable[[bigtable_table_admin.UpdateBackupRequest], Awaitable[table.Backup]]:
r"""Return a callable for the update backup method over gRPC.
Updates a pending or completed Cloud Bigtable Backup.
Returns:
Callable[[~.UpdateBackupRequest],
Awaitable[~.Backup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
self._stubs["update_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup",
request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize,
response_deserializer=table.Backup.deserialize,
)
return self._stubs["update_backup"]
@property
def delete_backup(
self,
) -> Callable[
[bigtable_table_admin.DeleteBackupRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the delete backup method over gRPC.
Deletes a pending or completed Cloud Bigtable backup.
Returns:
Callable[[~.DeleteBackupRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup",
request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_backup"]
@property
def list_backups(
self,
) -> Callable[
[bigtable_table_admin.ListBackupsRequest],
Awaitable[bigtable_table_admin.ListBackupsResponse],
]:
r"""Return a callable for the list backups method over gRPC.
Lists Cloud Bigtable backups. Returns both completed
and pending backups.
Returns:
Callable[[~.ListBackupsRequest],
Awaitable[~.ListBackupsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
self._stubs["list_backups"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups",
request_serializer=bigtable_table_admin.ListBackupsRequest.serialize,
response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize,
)
return self._stubs["list_backups"]
@property
def restore_table(
self,
) -> Callable[
[bigtable_table_admin.RestoreTableRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the restore table method over gRPC.
Create a new table by restoring from a completed backup. The new
table must be in the same project as the instance containing the
backup. The returned table [long-running
operation][google.longrunning.Operation] can be used to track
the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
[RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata].
The [response][google.longrunning.Operation.response] type is
[Table][google.bigtable.admin.v2.Table], if successful.
Returns:
Callable[[~.RestoreTableRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_table" not in self._stubs:
self._stubs["restore_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable",
request_serializer=bigtable_table_admin.RestoreTableRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restore_table"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a Table or Backup
resource. Returns an empty policy if the resource exists
but does not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on a Table or Backup
resource. Replaces any existing policy.
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that the caller has on the
specified Table or Backup resource.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
return self.grpc_channel.close()
__all__ = ("BigtableTableAdminGrpcAsyncIOTransport",)
| 43.85353 | 104 | 0.645698 |
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from grpc.experimental import aio
from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
from google.cloud.bigtable_admin_v2.types import table
from google.cloud.bigtable_admin_v2.types import table as gba_table
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import BigtableTableAdminGrpcTransport
class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport):
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "bigtableadmin.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "bigtableadmin.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
return self._operations_client
@property
def create_table(
self,
) -> Callable[
[bigtable_table_admin.CreateTableRequest], Awaitable[gba_table.Table]
]:
if "create_table" not in self._stubs:
self._stubs["create_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable",
request_serializer=bigtable_table_admin.CreateTableRequest.serialize,
response_deserializer=gba_table.Table.deserialize,
)
return self._stubs["create_table"]
@property
def create_table_from_snapshot(
self,
) -> Callable[
[bigtable_table_admin.CreateTableFromSnapshotRequest],
Awaitable[operations_pb2.Operation],
]:
if "create_table_from_snapshot" not in self._stubs:
self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot",
request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_table_from_snapshot"]
@property
def list_tables(
self,
) -> Callable[
[bigtable_table_admin.ListTablesRequest],
Awaitable[bigtable_table_admin.ListTablesResponse],
]:
if "list_tables" not in self._stubs:
self._stubs["list_tables"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables",
request_serializer=bigtable_table_admin.ListTablesRequest.serialize,
response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize,
)
return self._stubs["list_tables"]
@property
def get_table(
self,
) -> Callable[[bigtable_table_admin.GetTableRequest], Awaitable[table.Table]]:
if "get_table" not in self._stubs:
self._stubs["get_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable",
request_serializer=bigtable_table_admin.GetTableRequest.serialize,
response_deserializer=table.Table.deserialize,
)
return self._stubs["get_table"]
@property
def delete_table(
self,
) -> Callable[
[bigtable_table_admin.DeleteTableRequest], Awaitable[empty_pb2.Empty]
]:
if "delete_table" not in self._stubs:
self._stubs["delete_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable",
request_serializer=bigtable_table_admin.DeleteTableRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_table"]
@property
def modify_column_families(
self,
) -> Callable[
[bigtable_table_admin.ModifyColumnFamiliesRequest], Awaitable[table.Table]
]:
if "modify_column_families" not in self._stubs:
self._stubs["modify_column_families"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies",
request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize,
response_deserializer=table.Table.deserialize,
)
return self._stubs["modify_column_families"]
@property
def drop_row_range(
self,
) -> Callable[
[bigtable_table_admin.DropRowRangeRequest], Awaitable[empty_pb2.Empty]
]:
if "drop_row_range" not in self._stubs:
self._stubs["drop_row_range"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange",
request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["drop_row_range"]
@property
def generate_consistency_token(
self,
) -> Callable[
[bigtable_table_admin.GenerateConsistencyTokenRequest],
Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse],
]:
if "generate_consistency_token" not in self._stubs:
self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken",
request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize,
response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize,
)
return self._stubs["generate_consistency_token"]
@property
def check_consistency(
self,
) -> Callable[
[bigtable_table_admin.CheckConsistencyRequest],
Awaitable[bigtable_table_admin.CheckConsistencyResponse],
]:
if "check_consistency" not in self._stubs:
self._stubs["check_consistency"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency",
request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize,
response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize,
)
return self._stubs["check_consistency"]
@property
def snapshot_table(
self,
) -> Callable[
[bigtable_table_admin.SnapshotTableRequest], Awaitable[operations_pb2.Operation]
]:
if "snapshot_table" not in self._stubs:
self._stubs["snapshot_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable",
request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["snapshot_table"]
@property
def get_snapshot(
self,
) -> Callable[[bigtable_table_admin.GetSnapshotRequest], Awaitable[table.Snapshot]]:
if "get_snapshot" not in self._stubs:
self._stubs["get_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot",
request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize,
response_deserializer=table.Snapshot.deserialize,
)
return self._stubs["get_snapshot"]
@property
def list_snapshots(
self,
) -> Callable[
[bigtable_table_admin.ListSnapshotsRequest],
Awaitable[bigtable_table_admin.ListSnapshotsResponse],
]:
if "list_snapshots" not in self._stubs:
self._stubs["list_snapshots"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots",
request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize,
response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize,
)
return self._stubs["list_snapshots"]
@property
def delete_snapshot(
self,
) -> Callable[
[bigtable_table_admin.DeleteSnapshotRequest], Awaitable[empty_pb2.Empty]
]:
if "delete_snapshot" not in self._stubs:
self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot",
request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_snapshot"]
@property
def create_backup(
self,
) -> Callable[
[bigtable_table_admin.CreateBackupRequest], Awaitable[operations_pb2.Operation]
]:
if "create_backup" not in self._stubs:
self._stubs["create_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup",
request_serializer=bigtable_table_admin.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
@property
def get_backup(
self,
) -> Callable[[bigtable_table_admin.GetBackupRequest], Awaitable[table.Backup]]:
if "get_backup" not in self._stubs:
self._stubs["get_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup",
request_serializer=bigtable_table_admin.GetBackupRequest.serialize,
response_deserializer=table.Backup.deserialize,
)
return self._stubs["get_backup"]
@property
def update_backup(
self,
) -> Callable[[bigtable_table_admin.UpdateBackupRequest], Awaitable[table.Backup]]:
if "update_backup" not in self._stubs:
self._stubs["update_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup",
request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize,
response_deserializer=table.Backup.deserialize,
)
return self._stubs["update_backup"]
@property
def delete_backup(
self,
) -> Callable[
[bigtable_table_admin.DeleteBackupRequest], Awaitable[empty_pb2.Empty]
]:
if "delete_backup" not in self._stubs:
self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup",
request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_backup"]
@property
def list_backups(
self,
) -> Callable[
[bigtable_table_admin.ListBackupsRequest],
Awaitable[bigtable_table_admin.ListBackupsResponse],
]:
if "list_backups" not in self._stubs:
self._stubs["list_backups"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups",
request_serializer=bigtable_table_admin.ListBackupsRequest.serialize,
response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize,
)
return self._stubs["list_backups"]
@property
def restore_table(
self,
) -> Callable[
[bigtable_table_admin.RestoreTableRequest], Awaitable[operations_pb2.Operation]
]:
if "restore_table" not in self._stubs:
self._stubs["restore_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable",
request_serializer=bigtable_table_admin.RestoreTableRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restore_table"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
return self.grpc_channel.close()
__all__ = ("BigtableTableAdminGrpcAsyncIOTransport",)
| true | true |
f7fcd50af5aae83442cbf89fb1dca61f56cd3e54 | 881 | py | Python | services/dynamic-sidecar/tests/unit/test_api_mocked.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | services/dynamic-sidecar/tests/unit/test_api_mocked.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | services/dynamic-sidecar/tests/unit/test_api_mocked.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import json
import pytest
from async_asgi_testclient import TestClient
from async_asgi_testclient.response import Response
pytestmark = pytest.mark.asyncio
def assert_200_empty(response: Response) -> bool:
assert response.status_code == 200, response.text
assert json.loads(response.text) == ""
return True
@pytest.mark.parametrize(
"route,method",
[
# push api module
("/push", "POST"),
# retrive api module
("/retrieve", "GET"),
("/retrieve", "POST"),
# state api module
("/state", "GET"),
("/state", "POST"),
],
)
async def test_mocked_modules(test_client: TestClient, route: str, method: str) -> None:
response = await test_client.open(route, method=method)
assert assert_200_empty(response) is True
| 24.472222 | 88 | 0.662883 |
import json
import pytest
from async_asgi_testclient import TestClient
from async_asgi_testclient.response import Response
pytestmark = pytest.mark.asyncio
def assert_200_empty(response: Response) -> bool:
assert response.status_code == 200, response.text
assert json.loads(response.text) == ""
return True
@pytest.mark.parametrize(
"route,method",
[
("/push", "POST"),
("/retrieve", "GET"),
("/retrieve", "POST"),
("/state", "GET"),
("/state", "POST"),
],
)
async def test_mocked_modules(test_client: TestClient, route: str, method: str) -> None:
response = await test_client.open(route, method=method)
assert assert_200_empty(response) is True
| true | true |
f7fcd52a0c4a9306acb7afa6782f59365c264c82 | 599 | py | Python | app/backend/src/couchers/constants.py | foormea/couchers | 4015769e5cdfbb9b9e10460fd979cccc5f203b88 | [
"MIT"
] | 226 | 2020-12-01T23:46:57.000Z | 2022-03-30T20:48:48.000Z | app/backend/src/couchers/constants.py | stifler6005/couchers | bf3d1f0fda0f1734aa83f18959639cb64ee2a25d | [
"MIT"
] | 1,713 | 2020-10-06T14:20:02.000Z | 2022-03-31T17:22:49.000Z | app/backend/src/couchers/constants.py | stifler6005/couchers | bf3d1f0fda0f1734aa83f18959639cb64ee2a25d | [
"MIT"
] | 80 | 2020-11-19T00:12:55.000Z | 2022-03-27T19:21:26.000Z | from datetime import timedelta
# terms of service version
TOS_VERSION = 2
# community guidelines version
GUIDELINES_VERSION = 1
EMAIL_REGEX = r"^[0-9a-z][0-9a-z\-\_\+\.]*@([0-9a-z\-]+\.)*[0-9a-z\-]+\.[a-z]{2,}$"
# expiry time for a verified phone number
PHONE_VERIFICATION_LIFETIME = timedelta(days=2 * 365)
# shortest period between phone verification code requests
PHONE_REVERIFICATION_INTERVAL = timedelta(days=180)
# expiry time for an sms code
SMS_CODE_LIFETIME = timedelta(hours=24)
# max attempts to enter the sms code
SMS_CODE_ATTEMPTS = 3
EMAIL_TOKEN_VALIDITY = timedelta(hours=48)
| 24.958333 | 83 | 0.744574 | from datetime import timedelta
TOS_VERSION = 2
GUIDELINES_VERSION = 1
EMAIL_REGEX = r"^[0-9a-z][0-9a-z\-\_\+\.]*@([0-9a-z\-]+\.)*[0-9a-z\-]+\.[a-z]{2,}$"
PHONE_VERIFICATION_LIFETIME = timedelta(days=2 * 365)
PHONE_REVERIFICATION_INTERVAL = timedelta(days=180)
SMS_CODE_LIFETIME = timedelta(hours=24)
SMS_CODE_ATTEMPTS = 3
EMAIL_TOKEN_VALIDITY = timedelta(hours=48)
| true | true |
f7fcd5d094d4527c5717a303d4a027799ab3d53a | 1,899 | py | Python | sa/profiles/Linksys/SPS2xx/get_interface_status.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/profiles/Linksys/SPS2xx/get_interface_status.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/profiles/Linksys/SPS2xx/get_interface_status.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Linksys.SPS2xx.get_interface_status
# ---------------------------------------------------------------------
# Copyright (C) 2007-2011 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfacestatus import IGetInterfaceStatus
class Script(BaseScript):
name = "Linksys.SPS2xx.get_interface_status"
interface = IGetInterfaceStatus
rx_interface_status = re.compile(
r"^(?P<interface>\S+)\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(?P<status>Up|Down)\s+\S+\s+\S.*$",
re.MULTILINE,
)
def execute(self, interface=None):
r = []
# Try snmp first
if self.has_snmp():
try:
for n, s in self.snmp.join_tables(
"1.3.6.1.2.1.31.1.1.1.1", "1.3.6.1.2.1.2.2.1.8"
): # IF-MIB
if n[:1] == "e" or n[:1] == "g":
if interface:
if n == interface:
r += [{"interface": n, "status": int(s) == 1}]
else:
r += [{"interface": n, "status": int(s) == 1}]
return r
except self.snmp.TimeOutError:
pass
# Fallback to CLI
if interface:
cmd = "show interfaces status ethernet %s" % interface
else:
cmd = "show interfaces status"
for match in self.rx_interface_status.finditer(self.cli(cmd)):
r += [
{
"interface": match.group("interface"),
"status": match.group("status").lower() == "up",
}
]
return r
| 33.910714 | 97 | 0.4297 |
import re
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfacestatus import IGetInterfaceStatus
class Script(BaseScript):
name = "Linksys.SPS2xx.get_interface_status"
interface = IGetInterfaceStatus
rx_interface_status = re.compile(
r"^(?P<interface>\S+)\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(?P<status>Up|Down)\s+\S+\s+\S.*$",
re.MULTILINE,
)
def execute(self, interface=None):
r = []
if self.has_snmp():
try:
for n, s in self.snmp.join_tables(
"1.3.6.1.2.1.31.1.1.1.1", "1.3.6.1.2.1.2.2.1.8"
):
if n[:1] == "e" or n[:1] == "g":
if interface:
if n == interface:
r += [{"interface": n, "status": int(s) == 1}]
else:
r += [{"interface": n, "status": int(s) == 1}]
return r
except self.snmp.TimeOutError:
pass
if interface:
cmd = "show interfaces status ethernet %s" % interface
else:
cmd = "show interfaces status"
for match in self.rx_interface_status.finditer(self.cli(cmd)):
r += [
{
"interface": match.group("interface"),
"status": match.group("status").lower() == "up",
}
]
return r
| true | true |
f7fcd83f1113785621f870aec8de831dccc8f971 | 2,131 | py | Python | Person Matching Code/code/DeepFeatures/sample.py | pavitradangati/PersonMatching | 6d8a9689193fed0ef0e3e9f3fad19710e595d9e0 | [
"MIT"
] | null | null | null | Person Matching Code/code/DeepFeatures/sample.py | pavitradangati/PersonMatching | 6d8a9689193fed0ef0e3e9f3fad19710e595d9e0 | [
"MIT"
] | null | null | null | Person Matching Code/code/DeepFeatures/sample.py | pavitradangati/PersonMatching | 6d8a9689193fed0ef0e3e9f3fad19710e595d9e0 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
feature_array = np.load('./faceFeaturesGT.npy')
indices_array = (pd.read_csv('./imageListGT.csv')).values
print(feature_array.shape, indices_array.shape)
print(indices_array[:10, :])
final_array = np.concatenate((feature_array, indices_array), axis = 1)
print(final_array.shape)
print(final_array[:10, 512])
people_ids = np.unique(final_array[:,513])
#print(people_ids.shape, people_ids)
gallery = None
probe = None
gallery_features = None
gallery_indices = None
probe_features = None
probe_indices = None
for i in people_ids:
count =np.where(final_array[:,513]==i))[0]
lower = int(0.8*count.shape[0])
gallery_idx = count[:lower]
if all(x==np.Inf for x in final_array[count, :512]):
continue
probe_idx = count[lower:]
if gallery is None:
gallery = final_array[gallery_idx, :]
gallery_features = final_array[gallery_idx, :-2]
gallery_indices = final_array[gallery_idx, -2:]
else:
gallery = np.concatenate((gallery, final_array[gallery_idx, :]), axis=0)
gallery_features = np.concatenate((gallery_features , final_array[gallery_idx, :-2]), axis=0)
gallery_indices = np.concatenate(( gallery_indices, final_array[gallery_idx, -2:]), axis=0)
if probe is None:
probe = final_array[probe_idx, :]
probe_features = final_array[probe_idx, :-2]
probe_indices = final_array[probe_idx, -2:]
else:
probe = np.concatenate((probe, final_array[probe_idx, :]), axis=0)
probe_features = np.concatenate((probe_features , final_array[probe_idx, :-2]), axis=0)
probe_indices = np.concatenate(( probe_indices, final_array[probe_idx, -2:]), axis=0)
np.save('./gallery_combined.npy', gallery)
np.save('./probe_combined.npy', probe)
np.save('./gallery_features.npy', gallery_features)
np.save('./gallery_list.npy', gallery_indices)
np.save('./probe_features.npy', probe_features)
np.save('./probe_list.npy', probe_indices)
print(gallery.shape, probe.shape)
print(gallery_features.shape, probe_features.shape)
print(gallery_indices.shape, probe_indices.shape)
print('Done')
| 40.980769 | 101 | 0.711872 | import numpy as np
import pandas as pd
feature_array = np.load('./faceFeaturesGT.npy')
indices_array = (pd.read_csv('./imageListGT.csv')).values
print(feature_array.shape, indices_array.shape)
print(indices_array[:10, :])
final_array = np.concatenate((feature_array, indices_array), axis = 1)
print(final_array.shape)
print(final_array[:10, 512])
people_ids = np.unique(final_array[:,513])
gallery = None
probe = None
gallery_features = None
gallery_indices = None
probe_features = None
probe_indices = None
for i in people_ids:
count =np.where(final_array[:,513]==i))[0]
lower = int(0.8*count.shape[0])
gallery_idx = count[:lower]
if all(x==np.Inf for x in final_array[count, :512]):
continue
probe_idx = count[lower:]
if gallery is None:
gallery = final_array[gallery_idx, :]
gallery_features = final_array[gallery_idx, :-2]
gallery_indices = final_array[gallery_idx, -2:]
else:
gallery = np.concatenate((gallery, final_array[gallery_idx, :]), axis=0)
gallery_features = np.concatenate((gallery_features , final_array[gallery_idx, :-2]), axis=0)
gallery_indices = np.concatenate(( gallery_indices, final_array[gallery_idx, -2:]), axis=0)
if probe is None:
probe = final_array[probe_idx, :]
probe_features = final_array[probe_idx, :-2]
probe_indices = final_array[probe_idx, -2:]
else:
probe = np.concatenate((probe, final_array[probe_idx, :]), axis=0)
probe_features = np.concatenate((probe_features , final_array[probe_idx, :-2]), axis=0)
probe_indices = np.concatenate(( probe_indices, final_array[probe_idx, -2:]), axis=0)
np.save('./gallery_combined.npy', gallery)
np.save('./probe_combined.npy', probe)
np.save('./gallery_features.npy', gallery_features)
np.save('./gallery_list.npy', gallery_indices)
np.save('./probe_features.npy', probe_features)
np.save('./probe_list.npy', probe_indices)
print(gallery.shape, probe.shape)
print(gallery_features.shape, probe_features.shape)
print(gallery_indices.shape, probe_indices.shape)
print('Done')
| false | true |
f7fcd879a9f448ab8fd8168d030097c7aef4b611 | 812 | py | Python | manage.py | managedbyq/mbq.ranch | 3bd66c2ce6b508c4b0acdfc97775c04e024bd1ff | [
"Apache-2.0"
] | null | null | null | manage.py | managedbyq/mbq.ranch | 3bd66c2ce6b508c4b0acdfc97775c04e024bd1ff | [
"Apache-2.0"
] | 1 | 2019-03-06T19:57:31.000Z | 2019-03-06T19:57:31.000Z | manage.py | managedbyq/mbq.ranch | 3bd66c2ce6b508c4b0acdfc97775c04e024bd1ff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 33.833333 | 77 | 0.639163 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| true | true |
f7fcd8c13db638ebba08b092eacf442cf7166f9c | 12,628 | py | Python | neo/io/neuroexplorerio.py | pearsonlab/python-neo | 8915dfe9e55fd3a36be83d820bdd83ab085e9402 | [
"BSD-3-Clause"
] | null | null | null | neo/io/neuroexplorerio.py | pearsonlab/python-neo | 8915dfe9e55fd3a36be83d820bdd83ab085e9402 | [
"BSD-3-Clause"
] | null | null | null | neo/io/neuroexplorerio.py | pearsonlab/python-neo | 8915dfe9e55fd3a36be83d820bdd83ab085e9402 | [
"BSD-3-Clause"
] | 1 | 2018-04-13T04:48:48.000Z | 2018-04-13T04:48:48.000Z | # -*- coding: utf-8 -*-
"""
Class for reading data from NeuroExplorer (.nex)
Documentation for dev :
http://www.neuroexplorer.com/code.html
Depend on:
Supported : Read
Author: sgarcia,luc estebanez
"""
import os
import struct
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Segment, AnalogSignal, SpikeTrain, Epoch, Event
class NeuroExplorerIO(BaseIO):
"""
Class for reading nex files.
Usage:
>>> from neo import io
>>> r = io.NeuroExplorerIO(filename='File_neuroexplorer_1.nex')
>>> seg = r.read_segment(lazy=False, cascade=True)
>>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<AnalogSignal(array([ 39.0625 , 0. , 0. , ...,
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 2.29499992e-02, 6.79249987e-02, ...
>>> print seg.events # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<Event: @21.1967754364 s, @21.2993755341 s, @21.350725174 s, ...
>>> print seg.epochs # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<neo.core.epoch.Epoch object at 0x10561ba90>,
<neo.core.epoch.Epoch object at 0x10561bad0>]
"""
is_readable = True
is_writable = False
supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
readable_objects = [Segment]
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuff: a definition for parameters when reading.
read_params = {Segment: []}
write_params = None
name = 'NeuroExplorer'
extensions = ['nex']
mode = 'file'
def __init__(self, filename=None):
"""
This class read a nex file.
Arguments:
filename: the filename to read
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self, lazy=False, cascade=True):
fid = open(self.filename, 'rb')
global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
# ~ print globalHeader
#~ print 'version' , globalHeader['version']
seg = Segment()
seg.file_origin = os.path.basename(self.filename)
seg.annotate(neuroexplorer_version=global_header['version'])
seg.annotate(comment=global_header['comment'])
if not cascade:
return seg
offset = 544
for i in range(global_header['nvar']):
entity_header = HeaderReader(fid, EntityHeader).read_f(
offset=offset + i * 208)
entity_header['name'] = entity_header['name'].replace('\x00', '')
#print 'i',i, entityHeader['type']
if entity_header['type'] == 0:
# neuron
if lazy:
spike_times = [] * pq.s
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
sptr = SpikeTrain(
times=spike_times,
t_start=global_header['tbeg'] /
global_header['freq'] * pq.s,
t_stop=global_header['tend'] /
global_header['freq'] * pq.s,
name=entity_header['name'])
if lazy:
sptr.lazy_shape = entity_header['n']
sptr.annotate(channel_index=entity_header['WireNumber'])
seg.spiketrains.append(sptr)
if entity_header['type'] == 1:
# event
if lazy:
event_times = [] * pq.s
else:
event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
event_times = event_times.astype('f8') / global_header[
'freq'] * pq.s
labels = np.array([''] * event_times.size, dtype='S')
evar = Event(times=event_times, labels=labels,
channel_name=entity_header['name'])
if lazy:
evar.lazy_shape = entity_header['n']
seg.events.append(evar)
if entity_header['type'] == 2:
# interval
if lazy:
start_times = [] * pq.s
stop_times = [] * pq.s
else:
start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
start_times = start_times.astype('f8') / global_header[
'freq'] * pq.s
stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'] +
entity_header['n'] * 4)
stop_times = stop_times.astype('f') / global_header[
'freq'] * pq.s
epar = Epoch(times=start_times,
durations=stop_times - start_times,
labels=np.array([''] * start_times.size,
dtype='S'),
channel_name=entity_header['name'])
if lazy:
epar.lazy_shape = entity_header['n']
seg.epochs.append(epar)
if entity_header['type'] == 3:
# spiketrain and wavefoms
if lazy:
spike_times = [] * pq.s
waveforms = None
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
shape=(entity_header['n'], 1,
entity_header['NPointsWave']),
offset=entity_header['offset'] +
entity_header['n'] * 4)
waveforms = (waveforms.astype('f') *
entity_header['ADtoMV'] +
entity_header['MVOffset']) * pq.mV
t_stop = global_header['tend'] / global_header['freq'] * pq.s
if spike_times.size > 0:
t_stop = max(t_stop, max(spike_times))
sptr = SpikeTrain(
times=spike_times,
t_start=global_header['tbeg'] /
global_header['freq'] * pq.s,
#~ t_stop = max(globalHeader['tend']/
#~ globalHeader['freq']*pq.s,max(spike_times)),
t_stop=t_stop, name=entity_header['name'],
waveforms=waveforms,
sampling_rate=entity_header['WFrequency'] * pq.Hz,
left_sweep=0 * pq.ms)
if lazy:
sptr.lazy_shape = entity_header['n']
sptr.annotate(channel_index=entity_header['WireNumber'])
seg.spiketrains.append(sptr)
if entity_header['type'] == 4:
# popvectors
pass
if entity_header['type'] == 5:
# analog
timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
timestamps = timestamps.astype('f8') / global_header['freq']
fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
fragment_starts = fragment_starts.astype('f8') / global_header[
'freq']
t_start = timestamps[0] - fragment_starts[0] / float(
entity_header['WFrequency'])
del timestamps, fragment_starts
if lazy:
signal = [] * pq.mV
else:
signal = np.memmap(self.filename, np.dtype('i2'), 'r',
shape=(entity_header['NPointsWave']),
offset=entity_header['offset'])
signal = signal.astype('f')
signal *= entity_header['ADtoMV']
signal += entity_header['MVOffset']
signal = signal * pq.mV
ana_sig = AnalogSignal(
signal=signal, t_start=t_start * pq.s,
sampling_rate=entity_header['WFrequency'] * pq.Hz,
name=entity_header['name'],
channel_index=entity_header['WireNumber'])
if lazy:
ana_sig.lazy_shape = entity_header['NPointsWave']
seg.analogsignals.append(ana_sig)
if entity_header['type'] == 6:
# markers : TO TEST
if lazy:
times = [] * pq.s
labels = np.array([], dtype='S')
markertype = None
else:
times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
times = times.astype('f8') / global_header['freq'] * pq.s
fid.seek(entity_header['offset'] + entity_header['n'] * 4)
markertype = fid.read(64).replace('\x00', '')
labels = np.memmap(
self.filename, np.dtype(
'S' + str(entity_header['MarkerLength'])),
'r', shape=(entity_header['n']),
offset=entity_header['offset'] +
entity_header['n'] * 4 + 64)
ea = Event(times=times,
labels=labels.view(np.ndarray),
name=entity_header['name'],
channel_index=entity_header['WireNumber'],
marker_type=markertype)
if lazy:
ea.lazy_shape = entity_header['n']
seg.events.append(ea)
seg.create_many_to_one_relationship()
return seg
GlobalHeader = [
('signature', '4s'),
('version', 'i'),
('comment', '256s'),
('freq', 'd'),
('tbeg', 'i'),
('tend', 'i'),
('nvar', 'i'),
]
EntityHeader = [
('type', 'i'),
('varVersion', 'i'),
('name', '64s'),
('offset', 'i'),
('n', 'i'),
('WireNumber', 'i'),
('UnitNumber', 'i'),
('Gain', 'i'),
('Filter', 'i'),
('XPos', 'd'),
('YPos', 'd'),
('WFrequency', 'd'),
('ADtoMV', 'd'),
('NPointsWave', 'i'),
('NMarkers', 'i'),
('MarkerLength', 'i'),
('MVOffset', 'd'),
('dummy', '60s'),
]
MarkerHeader = [
('type', 'i'),
('varVersion', 'i'),
('name', '64s'),
('offset', 'i'),
('n', 'i'),
('WireNumber', 'i'),
('UnitNumber', 'i'),
('Gain', 'i'),
('Filter', 'i'),
]
class HeaderReader():
def __init__(self, fid, description):
self.fid = fid
self.description = description
def read_f(self, offset=0):
self.fid.seek(offset)
d = {}
for key, fmt in self.description:
val = struct.unpack(fmt, self.fid.read(struct.calcsize(fmt)))
if len(val) == 1:
val = val[0]
else:
val = list(val)
d[key] = val
return d
| 38.266667 | 79 | 0.460881 |
import os
import struct
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Segment, AnalogSignal, SpikeTrain, Epoch, Event
class NeuroExplorerIO(BaseIO):
is_readable = True
is_writable = False
supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
readable_objects = [Segment]
writeable_objects = []
has_header = False
is_streameable = False
read_params = {Segment: []}
write_params = None
name = 'NeuroExplorer'
extensions = ['nex']
mode = 'file'
def __init__(self, filename=None):
BaseIO.__init__(self)
self.filename = filename
def read_segment(self, lazy=False, cascade=True):
fid = open(self.filename, 'rb')
global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
seg = Segment()
seg.file_origin = os.path.basename(self.filename)
seg.annotate(neuroexplorer_version=global_header['version'])
seg.annotate(comment=global_header['comment'])
if not cascade:
return seg
offset = 544
for i in range(global_header['nvar']):
entity_header = HeaderReader(fid, EntityHeader).read_f(
offset=offset + i * 208)
entity_header['name'] = entity_header['name'].replace('\x00', '')
if entity_header['type'] == 0:
if lazy:
spike_times = [] * pq.s
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
sptr = SpikeTrain(
times=spike_times,
t_start=global_header['tbeg'] /
global_header['freq'] * pq.s,
t_stop=global_header['tend'] /
global_header['freq'] * pq.s,
name=entity_header['name'])
if lazy:
sptr.lazy_shape = entity_header['n']
sptr.annotate(channel_index=entity_header['WireNumber'])
seg.spiketrains.append(sptr)
if entity_header['type'] == 1:
if lazy:
event_times = [] * pq.s
else:
event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
event_times = event_times.astype('f8') / global_header[
'freq'] * pq.s
labels = np.array([''] * event_times.size, dtype='S')
evar = Event(times=event_times, labels=labels,
channel_name=entity_header['name'])
if lazy:
evar.lazy_shape = entity_header['n']
seg.events.append(evar)
if entity_header['type'] == 2:
if lazy:
start_times = [] * pq.s
stop_times = [] * pq.s
else:
start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
start_times = start_times.astype('f8') / global_header[
'freq'] * pq.s
stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'] +
entity_header['n'] * 4)
stop_times = stop_times.astype('f') / global_header[
'freq'] * pq.s
epar = Epoch(times=start_times,
durations=stop_times - start_times,
labels=np.array([''] * start_times.size,
dtype='S'),
channel_name=entity_header['name'])
if lazy:
epar.lazy_shape = entity_header['n']
seg.epochs.append(epar)
if entity_header['type'] == 3:
if lazy:
spike_times = [] * pq.s
waveforms = None
else:
spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
spike_times = spike_times.astype('f8') / global_header[
'freq'] * pq.s
waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
shape=(entity_header['n'], 1,
entity_header['NPointsWave']),
offset=entity_header['offset'] +
entity_header['n'] * 4)
waveforms = (waveforms.astype('f') *
entity_header['ADtoMV'] +
entity_header['MVOffset']) * pq.mV
t_stop = global_header['tend'] / global_header['freq'] * pq.s
if spike_times.size > 0:
t_stop = max(t_stop, max(spike_times))
sptr = SpikeTrain(
times=spike_times,
t_start=global_header['tbeg'] /
global_header['freq'] * pq.s,
t_stop=t_stop, name=entity_header['name'],
waveforms=waveforms,
sampling_rate=entity_header['WFrequency'] * pq.Hz,
left_sweep=0 * pq.ms)
if lazy:
sptr.lazy_shape = entity_header['n']
sptr.annotate(channel_index=entity_header['WireNumber'])
seg.spiketrains.append(sptr)
if entity_header['type'] == 4:
pass
if entity_header['type'] == 5:
timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
timestamps = timestamps.astype('f8') / global_header['freq']
fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
fragment_starts = fragment_starts.astype('f8') / global_header[
'freq']
t_start = timestamps[0] - fragment_starts[0] / float(
entity_header['WFrequency'])
del timestamps, fragment_starts
if lazy:
signal = [] * pq.mV
else:
signal = np.memmap(self.filename, np.dtype('i2'), 'r',
shape=(entity_header['NPointsWave']),
offset=entity_header['offset'])
signal = signal.astype('f')
signal *= entity_header['ADtoMV']
signal += entity_header['MVOffset']
signal = signal * pq.mV
ana_sig = AnalogSignal(
signal=signal, t_start=t_start * pq.s,
sampling_rate=entity_header['WFrequency'] * pq.Hz,
name=entity_header['name'],
channel_index=entity_header['WireNumber'])
if lazy:
ana_sig.lazy_shape = entity_header['NPointsWave']
seg.analogsignals.append(ana_sig)
if entity_header['type'] == 6:
if lazy:
times = [] * pq.s
labels = np.array([], dtype='S')
markertype = None
else:
times = np.memmap(self.filename, np.dtype('i4'), 'r',
shape=(entity_header['n']),
offset=entity_header['offset'])
times = times.astype('f8') / global_header['freq'] * pq.s
fid.seek(entity_header['offset'] + entity_header['n'] * 4)
markertype = fid.read(64).replace('\x00', '')
labels = np.memmap(
self.filename, np.dtype(
'S' + str(entity_header['MarkerLength'])),
'r', shape=(entity_header['n']),
offset=entity_header['offset'] +
entity_header['n'] * 4 + 64)
ea = Event(times=times,
labels=labels.view(np.ndarray),
name=entity_header['name'],
channel_index=entity_header['WireNumber'],
marker_type=markertype)
if lazy:
ea.lazy_shape = entity_header['n']
seg.events.append(ea)
seg.create_many_to_one_relationship()
return seg
GlobalHeader = [
('signature', '4s'),
('version', 'i'),
('comment', '256s'),
('freq', 'd'),
('tbeg', 'i'),
('tend', 'i'),
('nvar', 'i'),
]
EntityHeader = [
('type', 'i'),
('varVersion', 'i'),
('name', '64s'),
('offset', 'i'),
('n', 'i'),
('WireNumber', 'i'),
('UnitNumber', 'i'),
('Gain', 'i'),
('Filter', 'i'),
('XPos', 'd'),
('YPos', 'd'),
('WFrequency', 'd'),
('ADtoMV', 'd'),
('NPointsWave', 'i'),
('NMarkers', 'i'),
('MarkerLength', 'i'),
('MVOffset', 'd'),
('dummy', '60s'),
]
MarkerHeader = [
('type', 'i'),
('varVersion', 'i'),
('name', '64s'),
('offset', 'i'),
('n', 'i'),
('WireNumber', 'i'),
('UnitNumber', 'i'),
('Gain', 'i'),
('Filter', 'i'),
]
class HeaderReader():
def __init__(self, fid, description):
self.fid = fid
self.description = description
def read_f(self, offset=0):
self.fid.seek(offset)
d = {}
for key, fmt in self.description:
val = struct.unpack(fmt, self.fid.read(struct.calcsize(fmt)))
if len(val) == 1:
val = val[0]
else:
val = list(val)
d[key] = val
return d
| true | true |
f7fcd8eaa37b662acbcbb74a9b239347427d0730 | 4,060 | py | Python | threathunter_common_python/threathunter_common/geo/ip.py | threathunterX/python_lib | e2d4052de04c82cb7bccd08042f28db824cab442 | [
"Apache-2.0"
] | 2 | 2019-03-17T04:03:08.000Z | 2019-05-01T09:42:23.000Z | threathunter_common_python/threathunter_common/geo/ip.py | threathunterX/python_lib | e2d4052de04c82cb7bccd08042f28db824cab442 | [
"Apache-2.0"
] | null | null | null | threathunter_common_python/threathunter_common/geo/ip.py | threathunterX/python_lib | e2d4052de04c82cb7bccd08042f28db824cab442 | [
"Apache-2.0"
] | 4 | 2019-06-24T05:47:24.000Z | 2020-09-29T05:00:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import array
__author__ = "nebula"
import os
import socket
import struct
try:
import mmap
except ImportError:
mmap = None
__all__ = ['IPv4Database', 'find']
_unpack_L = lambda b: struct.unpack("<L", b)[0]
_unpack_B = lambda b: struct.unpack(">L", b)[0]
def ip2int(addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def n_to_a(ip):
return "%d.%d.%d.%d" % ((ip >> 24) % 256, (ip >> 16) % 256, (ip >> 8) % 256, ip % 256)
def _unpack_C(b):
if isinstance(b, int):
return b
return struct.unpack("B", b)[0]
net_192_168 = (192 << 8) + 168
net_10_0 = 10
net_172_16 = (172 << 4) + 1
def is_private_ip(ip):
try:
i = ip2int(ip)
if i >> 16 == net_192_168:
return True
if i >> 24 == net_10_0:
return True
if i >> 20 == net_172_16:
return True
return False
except:
return False
datfile = os.path.join(os.path.dirname(__file__), "17monipdb.dat")
instance = None
class IPv4Database(object):
"""Database for search IPv4 address.
The 17mon dat file format in bytes::
-----------
| 4 bytes | <- offset number
-----------------
| 256 * 4 bytes | <- first ip number index
-----------------------
| offset - 1028 bytes | <- ip index
-----------------------
| data storage |
-----------------------
"""
def __init__(self, filename=None):
if filename is None:
filename = datfile
# store the index by the first number of ip address
self.first_indexes = None
# start the index ip and the correlated offset information in the db
self.ip_num = 0
self.ip_array = None
self.offset_array = None
self.length_array = None
# store the data
self.data = None
self._parse(filename)
def _parse(self, filename):
buf = file(filename, "rb").read()
offset = _unpack_B(buf[:4])
offset -= 1024
self.indexes = [None] * 257
for i in range(256):
i_start = i * 4 + 4
i_end = i_start + 4
self.indexes[i] = _unpack_L(buf[i_start:i_end])
self.indexes[256] = (offset - 1028) / 8
self.ip_num = (offset - 1028) / 8
self.ip_array = array.array("L", [0] * self.ip_num)
self.offset_array = array.array("L", [0] * self.ip_num)
self.length_array = array.array("c", [chr(0)] * self.ip_num)
self.data = buf[offset:]
o = file("/Users/lw/ips.txt", "w")
for i in range(0, self.ip_num):
pos = 1028 + 8 * i
ip_int = _unpack_B(buf[pos:pos+4])
data_offset = _unpack_L(buf[pos+4:pos+7]+b'\0')
data_len = _unpack_C(buf[pos + 7])
self.ip_array[i] = ip_int
self.offset_array[i] = data_offset
self.length_array[i] = chr(data_len)
print >> o, n_to_a(ip_int), self.data[data_offset:data_offset+data_len]
o.close()
def close(self):
pass
def _lookup_ipv4(self, ip):
nip = ip2int(ip)
first_number = (nip >> 24) % 256
lo, hi = self.indexes[first_number:first_number+2]
while lo < hi:
mid = (lo + hi) // 2
mid_val = self.ip_array[mid]
if mid_val < nip:
lo = mid + 1
else:
hi = mid
if lo >= self.ip_num:
return None
offset = self.offset_array[lo]
length = ord(self.length_array[lo])
value = self.data[offset:offset+length]
return value.decode('utf-8')
def find(self, ip):
return self._lookup_ipv4(ip)
def find(ip):
# keep find for compatibility
try:
ip = socket.gethostbyname(ip)
except socket.gaierror:
return
global instance
if instance is None:
instance = IPv4Database()
return instance.find(ip)
| 25.217391 | 90 | 0.526847 |
import array
__author__ = "nebula"
import os
import socket
import struct
try:
import mmap
except ImportError:
mmap = None
__all__ = ['IPv4Database', 'find']
_unpack_L = lambda b: struct.unpack("<L", b)[0]
_unpack_B = lambda b: struct.unpack(">L", b)[0]
def ip2int(addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def n_to_a(ip):
return "%d.%d.%d.%d" % ((ip >> 24) % 256, (ip >> 16) % 256, (ip >> 8) % 256, ip % 256)
def _unpack_C(b):
if isinstance(b, int):
return b
return struct.unpack("B", b)[0]
net_192_168 = (192 << 8) + 168
net_10_0 = 10
net_172_16 = (172 << 4) + 1
def is_private_ip(ip):
try:
i = ip2int(ip)
if i >> 16 == net_192_168:
return True
if i >> 24 == net_10_0:
return True
if i >> 20 == net_172_16:
return True
return False
except:
return False
datfile = os.path.join(os.path.dirname(__file__), "17monipdb.dat")
instance = None
class IPv4Database(object):
def __init__(self, filename=None):
if filename is None:
filename = datfile
self.first_indexes = None
self.ip_num = 0
self.ip_array = None
self.offset_array = None
self.length_array = None
self.data = None
self._parse(filename)
def _parse(self, filename):
buf = file(filename, "rb").read()
offset = _unpack_B(buf[:4])
offset -= 1024
self.indexes = [None] * 257
for i in range(256):
i_start = i * 4 + 4
i_end = i_start + 4
self.indexes[i] = _unpack_L(buf[i_start:i_end])
self.indexes[256] = (offset - 1028) / 8
self.ip_num = (offset - 1028) / 8
self.ip_array = array.array("L", [0] * self.ip_num)
self.offset_array = array.array("L", [0] * self.ip_num)
self.length_array = array.array("c", [chr(0)] * self.ip_num)
self.data = buf[offset:]
o = file("/Users/lw/ips.txt", "w")
for i in range(0, self.ip_num):
pos = 1028 + 8 * i
ip_int = _unpack_B(buf[pos:pos+4])
data_offset = _unpack_L(buf[pos+4:pos+7]+b'\0')
data_len = _unpack_C(buf[pos + 7])
self.ip_array[i] = ip_int
self.offset_array[i] = data_offset
self.length_array[i] = chr(data_len)
print >> o, n_to_a(ip_int), self.data[data_offset:data_offset+data_len]
o.close()
def close(self):
pass
def _lookup_ipv4(self, ip):
nip = ip2int(ip)
first_number = (nip >> 24) % 256
lo, hi = self.indexes[first_number:first_number+2]
while lo < hi:
mid = (lo + hi) // 2
mid_val = self.ip_array[mid]
if mid_val < nip:
lo = mid + 1
else:
hi = mid
if lo >= self.ip_num:
return None
offset = self.offset_array[lo]
length = ord(self.length_array[lo])
value = self.data[offset:offset+length]
return value.decode('utf-8')
def find(self, ip):
return self._lookup_ipv4(ip)
def find(ip):
try:
ip = socket.gethostbyname(ip)
except socket.gaierror:
return
global instance
if instance is None:
instance = IPv4Database()
return instance.find(ip)
| true | true |
f7fcd8f7daaba05dcd3deb235395446c973c7019 | 1,300 | py | Python | logistic_lda/utils.py | sam-cts/logistic_lda | 405caf212ba0def212feb82f8d9aaec1e491f735 | [
"Apache-2.0"
] | 16 | 2019-10-17T16:29:28.000Z | 2021-12-09T12:45:18.000Z | logistic_lda/utils.py | sam-cts/logistic_lda | 405caf212ba0def212feb82f8d9aaec1e491f735 | [
"Apache-2.0"
] | null | null | null | logistic_lda/utils.py | sam-cts/logistic_lda | 405caf212ba0def212feb82f8d9aaec1e491f735 | [
"Apache-2.0"
] | 3 | 2020-02-19T07:12:25.000Z | 2020-09-14T08:45:44.000Z | """
Copyright 2019 Twitter, Inc.
Licensed under the Apache License, Version 2.0
http://www.apache.org/licenses/LICENSE-2.0
"""
import numpy as np
import tensorflow as tf
def softmax_cross_entropy(targets, logits):
"""
Implements a simple softmax cross entropy.
$$-\sum_i t_{ni} \cdot (l_{ni} - \ln \sum_j \exp l_{nj})$$
Targets can be arbitrary vectors and do not have to be one-hot encodings or normalized,
unlike in some other implementations of cross-entropy.
Args:
targets: A float tensor of shape [B, K]
logits: A float tensor of shape [B, K]
Returns:
A float tensor of shape [B]
"""
logprobs = logits - tf.reduce_logsumexp(logits, axis=1, keepdims=True)
return -tf.reduce_sum(targets * logprobs, axis=1)
def create_table(keys, values=None, name=None):
"""
Creates a hash table which maps the given keys to integers.
Args:
keys: A list containing possible keys
values: An list of corresponding values (optional)
name: A name for the operation (optional)
Returns:
A `tf.contrib.lookup.HashTable` mapping keys to integers
"""
if values is None:
values = np.arange(len(keys), dtype=np.int64)
return tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys=keys, values=values), -1, name=name)
| 26 | 89 | 0.708462 |
import numpy as np
import tensorflow as tf
def softmax_cross_entropy(targets, logits):
logprobs = logits - tf.reduce_logsumexp(logits, axis=1, keepdims=True)
return -tf.reduce_sum(targets * logprobs, axis=1)
def create_table(keys, values=None, name=None):
if values is None:
values = np.arange(len(keys), dtype=np.int64)
return tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys=keys, values=values), -1, name=name)
| true | true |
f7fcd9abd569bddb9f6c8e830195823b867c12ee | 2,486 | py | Python | courses/machine_learning/deepdive/06_structured/labs/serving/application/main.py | laurenzberger/training-data-analyst | 3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3 | [
"Apache-2.0"
] | 6,140 | 2016-05-23T16:09:35.000Z | 2022-03-30T19:00:46.000Z | courses/machine_learning/deepdive/06_structured/labs/serving/application/main.py | laurenzberger/training-data-analyst | 3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3 | [
"Apache-2.0"
] | 1,384 | 2016-07-08T22:26:41.000Z | 2022-03-24T16:39:43.000Z | courses/machine_learning/deepdive/06_structured/labs/serving/application/main.py | laurenzberger/training-data-analyst | 3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3 | [
"Apache-2.0"
] | 5,110 | 2016-05-27T13:45:18.000Z | 2022-03-31T18:40:42.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
from flask import url_for
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from google.appengine.api import app_identity
# authenticate
credentials = # TODO
api = # TODO
project = app_identity.get_application_id()
model_name = os.getenv('MODEL_NAME', 'babyweight')
version_name = os.getenv('VERSION_NAME', 'ml_on_gcp')
app = Flask(__name__)
def get_prediction(features):
input_data = {'instances': [features]}
parent = # TODO
prediction = # TODO
return prediction['predictions'][0]['predictions'][0]
@app.route('/')
def index():
return render_template('index.html')
@app.route('/form')
def input_form():
return render_template('form.html')
@app.route('/api/predict', methods=['POST'])
def predict():
def gender2str(val):
genders = {'unknown': 'Unknown', 'male': 'True', 'female': 'False'}
return genders[val]
def plurality2str(val):
pluralities = {'1': 'Single(1)', '2': 'Twins(2)', '3': 'Triplets(3)'}
if features['is_male'] == 'Unknown' and int(val) > 1:
return 'Multiple(2+)'
return pluralities[val]
data = json.loads(request.data.decode())
mandatory_items = ['baby_gender', 'mother_age',
'plurality', 'gestation_weeks']
for item in mandatory_items:
if item not in data.keys():
return jsonify({'result': 'Set all items.'})
features = {}
features['key'] = 'nokey'
features['is_male'] = gender2str(data['baby_gender'])
features['mother_age'] = float(data['mother_age'])
features['plurality'] = plurality2str(data['plurality'])
features['gestation_weeks'] = # TODO: get gestation_weeks and cast to float
prediction = get_prediction(features)
return jsonify({'result': '{:.2f} lbs.'.format(prediction)})
| 28.574713 | 77 | 0.706356 |
import json
import os
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
from flask import url_for
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from google.appengine.api import app_identity
credentials =
api =
project = app_identity.get_application_id()
model_name = os.getenv('MODEL_NAME', 'babyweight')
version_name = os.getenv('VERSION_NAME', 'ml_on_gcp')
app = Flask(__name__)
def get_prediction(features):
input_data = {'instances': [features]}
parent =
prediction =
return prediction['predictions'][0]['predictions'][0]
@app.route('/')
def index():
return render_template('index.html')
@app.route('/form')
def input_form():
return render_template('form.html')
@app.route('/api/predict', methods=['POST'])
def predict():
def gender2str(val):
genders = {'unknown': 'Unknown', 'male': 'True', 'female': 'False'}
return genders[val]
def plurality2str(val):
pluralities = {'1': 'Single(1)', '2': 'Twins(2)', '3': 'Triplets(3)'}
if features['is_male'] == 'Unknown' and int(val) > 1:
return 'Multiple(2+)'
return pluralities[val]
data = json.loads(request.data.decode())
mandatory_items = ['baby_gender', 'mother_age',
'plurality', 'gestation_weeks']
for item in mandatory_items:
if item not in data.keys():
return jsonify({'result': 'Set all items.'})
features = {}
features['key'] = 'nokey'
features['is_male'] = gender2str(data['baby_gender'])
features['mother_age'] = float(data['mother_age'])
features['plurality'] = plurality2str(data['plurality'])
features['gestation_weeks'] =
prediction = get_prediction(features)
return jsonify({'result': '{:.2f} lbs.'.format(prediction)})
| false | true |
f7fcdb8edfb45efe56cc2373fb6484374d41a107 | 12,455 | py | Python | scripts/tutorial_simulation.py | Agnarsh/functions | 64a408ecf55773f38c5ce3b2fe75119e7235e9c9 | [
"Apache-2.0"
] | null | null | null | scripts/tutorial_simulation.py | Agnarsh/functions | 64a408ecf55773f38c5ce3b2fe75119e7235e9c9 | [
"Apache-2.0"
] | null | null | null | scripts/tutorial_simulation.py | Agnarsh/functions | 64a408ecf55773f38c5ce3b2fe75119e7235e9c9 | [
"Apache-2.0"
] | null | null | null | # *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0 license
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import json
import logging
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import bif
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions.enginelog import EngineLogging
import datetime as dt
EngineLogging.configure_console_logging(logging.DEBUG)
# replace with a credentials dictionary or provide a credentials file
with open('credentials_as_dev.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
'''
Simulation Tutorial
-------------------------
Don't have a live data feed? Need prototype an AS-based solution. You can use
simulation to build simple or elaborate data feeds for AS prototypes and demos.
The easiest way to get started with simulation is to use the built in
EntityDataGenerator function. You can use this function directly from the UI
and run it without config parameters. When used in this way, it will generate
basic numeric and categorical data items. It assumes that numeric data items
are continuous time series and that string data items are categorical time
series. This tutorial will explain how to setup parameters to get more
realistic outputs.
The EntityDataGenerator is designed to mimic an IoT source. IoT sources feed
a time series table that contains input data for function processing. The
EntityDataGenerator operates in the same way.
We will start by looking at vanilla results without parameters. The first thing
that we will need is an EntityType with some numeric and string items.
'''
entity_name = 'sim_test' # you can give your entity type a better nane
db = Database(credentials=credentials)
db_schema = None # set if you are not using the default\
entity = EntityType(entity_name, db, Column('temp', Float()), Column('pressure', Float()),
Column('company_code', String(50)), Column('category_code', String(5)),
**{'_timestamp': 'evt_timestamp', '_db_schema': db_schema})
entity.register(raise_error=True)
'''
To build historical data, you can use the entity's "generate_data" method.
'''
entity.generate_data(days=0.5, drop_existing=True)
'''
To see the data you just loaded, ask the db object to read the database
table and produce a pandas dataframe.
'''
df = db.read_table(table_name=entity_name, schema=db_schema)
print(df.head())
'''
You should see a new database table - named the same way as your entity. This table
will be loaded with a half day's worth of data.
The numeric columns like 'temp' and 'pressure' will have timeseries data with a mean
of 0. Data will be generated with 5 devices.
temp pressure devicid evt-timetamp
-0.36397491820566885 0.26423994956819163 73003 2019-08-19-08.10.23.860123
0.6568787527068098 1.0140367243229087 73000 2019-08-19-08.11.23.860123
The test entity type includes a "company_code". This is a special column with default
categorical values: ACME, JDI and ABC
The other string we created "category_code" will have random categorical data like "yc",
"ea". The categories are build from the column name so that they repeat over subsequent
executions of the EntityDataGenerator.
The fact that "temp" and "pressure" don't have realistic scaling detracts from there
value as demo aids. Let's scale them more appropriately. While we are at, it we will
define some better values for "category_code".
Want to change the frequency of generation? Set the value of freq. Use a valid pandas frequency string.
'''
sim_parameters = {"data_item_mean": {'temp': 22, 'pressure': 320}, "data_item_sd": {'temp': 2, 'pressure': 5},
"data_item_domain": {'category_code': ['A', 'B', 'C']}, "freq": '30S' # 30 sec
}
entity.generate_data(days=0.5, drop_existing=True, **sim_parameters)
'''
Looking at the sim_test table, we now see:
temp pressure deviceid evt-timetamp category_code
22.60692494267075 318.4117321035006 73004 2019-08-19-10.03.44.721861 C
24.300509140926817 314.64777038989394 73004 2019-08-19-10.04.44.721861 A
You can try the same from the UI using the parameters above when configuring the EntityDataGenerator function.
Alternatively we can add the EntityDataGenerator to the entity type as follows:
While we are at it, let's change the number of entity types we are simulating
-- Need more entity types? Change the value of auto_entity_count
-- Want a different starting range for entity type ids? set start_entity_id
'''
sim_parameters = {"data_item_mean": {'temp': 22, 'pressure': 320}, "data_item_sd": {'temp': 2, 'pressure': 5},
"data_item_domain": {'category_code': ['A', 'B', 'C']}, "start_entity_id": 1000,
"auto_entity_count": 10, "freq": '30S' # 30 sec
}
entity = EntityType(entity_name, db, Column('temp', Float()), Column('pressure', Float()),
Column('company_code', String(50)), Column('category_code', String(5)),
bif.EntityDataGenerator(parameters=sim_parameters, data_item='is_generated'),
**{'_timestamp': 'evt_timestamp', '_db_schema': db_schema})
'''
When creating this entity type we included the EntityDataGenerator function and set
the the arguments "data_item" and "parameters". The "data_item" argument allows you to
name the dummy data item that the EntityDataGenerator adds that signifies that it has run.
The "parameters" argument is configured with the value of sim_parameters dictionary.
By adding the EntityDataGenerator to the entity type it will run each time the
entity type's calc pipeline runs.
The EntityDataGenerator works differently from regular "transformer" functions.
Transformer functions apply transformations to incoming entity data by creating new
derived data items. The EntityDataGenerator doesn't have an incoming entity data to
work with, so it builds its own. It loads this data directly into the entity's time
series input data where is can be read by transform functions.
To test the execution of kpi calculations defined for the entity type locally
use 'test_local_pipeline'.
'''
entity.exec_local_pipeline()
'''
So far we have only looked at the ability to create numerical and categorical time
series data. You can also automatically generate dimensional attributes.
'''
entity.make_dimension('sim_test_dimension', Column('manufacturer', String(50)), )
entity.register(raise_error=True)
entity.generate_data(days=0.5, drop_existing=True, **sim_parameters)
'''
Look at the data that was loaded into sim_test_dimension.
You should see something like this.
device_id manufacturer
73004 GHI Industries
73000 GHI Industries
73003 Rentech
73002 GHI Industries
73001 Rentech
"manufacturer" is another one of those magic column names that will use a default
domain of values.
Let's add additional numeric and non-numeric data to the dimension and set
simulation parameters for them.
load_rating is a numeric dimension property with a mean of 500 and a default standard deviation
maintenance_org has a custom domain
Important: When altering the dimension it is important to drop it first. The make_dimension
method attempts to reuse an existing table if there is one. In this case there is one and it
contains the wrong columns. We must drop it first so that make_dimension can create
a new dimension.
'''
db.drop_table('sim_test_dimension', schema=db_schema)
entity.make_dimension('sim_test_dimension', Column('manufacturer', String(50)), Column('load_rating', Float()),
Column('maintenance_org', String(50)))
sim_parameters = {"data_item_mean": {'temp': 22, 'pressure': 320, 'load_rating': 500},
"data_item_sd": {'temp': 2, 'pressure': 5},
"data_item_domain": {'category_code': ['A', 'B', 'C'], 'maintenance_org': ['Lunar Parts', 'Relco']},
"start_entity_id": 1000, "auto_entity_count": 10, "freq": '30S' # 30 sec
}
entity.register(raise_error=True)
entity.generate_data(days=0.5, drop_existing=True, **sim_parameters)
'''
This is what the new dimension looks like
devicid manufacturer load_rating maintenance_org
1004 GHI Industries 500.89830511533046 Relco
1005 Rentech 499.9407055104487 Lunar Parts
1008 GHI Industries 500.79630237063606 Relco
1001 Rentech 500.00507522504734 Relco
1006 Rentech 498.48278537970765 Lunar Parts
1007 GHI Industries 499.2788278282351 Relco
1002 GHI Industries 501.1063668253522 Relco
1000 GHI Industries 501.1359844685311 Relco
1003 Rentech 501.45477263284033 Relco
1009 Rentech 499.94029397932167 Lunar Parts
With what you have seen so far, generate_data and the AS function EntityDataGenerator allow
for the simulation independent variables. Real world systems have a mix of independent and
dependent variables. You can use AS functions to simulate dependent variables.
Consider and extension to this example where operating temperature is dependent ambient
temperature (temp) and load. We can model the relationship between these variables using
an AS function. In this example the relationship is simple enough to be modeled using a
PythonExpression. You could PythonFunctions or custom functions to model more complex
relationships.
We will also add some random noise to the result of the expression. This will allow our
simulation to retain some of the random variation typically seen in the real world.
'''
temp_function = bif.PythonExpression(expression='df["temp"]+df["pressure"]/300*5',
output_name='operating_temperature_work')
entity = EntityType(entity_name, db, Column('temp', Float()), Column('pressure', Float()),
Column('company_code', String(50)), Column('category_code', String(5)),
bif.EntityDataGenerator(parameters=sim_parameters, data_item='is_generated'), temp_function,
bif.RandomNoise(input_items=['operating_temperature_work'], standard_deviation=1,
output_items=['operating_temperature']),
**{'_timestamp': 'evt_timestamp', '_db_schema': db_schema})
entity.exec_local_pipeline()
'''
Note:
entity.generate_data only writes simulated random data to the AS input table. It
does not retrieve this data and apply AS functions to it.
id evt_timestamp temp pressure deviceid _timestamp entitydatagenerator operating_temperature_work operating_temperature
1005 04:38.5 22.82177094 327.0609021 1005 04:38.5 TRUE 28.27278598 27.9994426
1004 05:08.5 22.78203275 321.8376423 1004 05:08.5 TRUE 28.14599345 27.95332118
1006 05:38.5 23.77231385 313.3748436 1006 05:38.5 TRUE 28.99522791 30.04482662
1000 06:08.5 24.23746302 329.5324336 1000 06:08.5 TRUE 29.72967024 27.95621538
1006 06:38.5 24.26086898 321.2665546 1006 06:38.5 TRUE 29.61531155 29.18479368
1009 07:08.5 26.14706462 321.1545257 1009 07:08.5 TRUE 31.49964005 32.32444398
1000 07:38.5 20.27024524 313.5222948 1000 07:38.5 TRUE 25.49561682 25.74210273
When executing AS functions locally it is necessary to execute an AS pipeline as above
In this tutorial you learned:
-- How to load independent numeric and categorical values into AS input tables
-- You saw how this applied to both time series data and dimension data
-- You also saw how to model dependent variables using AS functions.
This tutorial showed how to model a very simple system. You can use the exact same
techniques to build realistic simulations of much more complex systems.
'''
| 45.290909 | 127 | 0.708551 |
import json
import logging
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import bif
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions.enginelog import EngineLogging
import datetime as dt
EngineLogging.configure_console_logging(logging.DEBUG)
with open('credentials_as_dev.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
entity_name = 'sim_test'
db = Database(credentials=credentials)
db_schema = None
entity = EntityType(entity_name, db, Column('temp', Float()), Column('pressure', Float()),
Column('company_code', String(50)), Column('category_code', String(5)),
**{'_timestamp': 'evt_timestamp', '_db_schema': db_schema})
entity.register(raise_error=True)
entity.generate_data(days=0.5, drop_existing=True)
df = db.read_table(table_name=entity_name, schema=db_schema)
print(df.head())
sim_parameters = {"data_item_mean": {'temp': 22, 'pressure': 320}, "data_item_sd": {'temp': 2, 'pressure': 5},
"data_item_domain": {'category_code': ['A', 'B', 'C']}, "freq": '30S'
}
entity.generate_data(days=0.5, drop_existing=True, **sim_parameters)
sim_parameters = {"data_item_mean": {'temp': 22, 'pressure': 320}, "data_item_sd": {'temp': 2, 'pressure': 5},
"data_item_domain": {'category_code': ['A', 'B', 'C']}, "start_entity_id": 1000,
"auto_entity_count": 10, "freq": '30S'
}
entity = EntityType(entity_name, db, Column('temp', Float()), Column('pressure', Float()),
Column('company_code', String(50)), Column('category_code', String(5)),
bif.EntityDataGenerator(parameters=sim_parameters, data_item='is_generated'),
**{'_timestamp': 'evt_timestamp', '_db_schema': db_schema})
entity.exec_local_pipeline()
entity.make_dimension('sim_test_dimension', Column('manufacturer', String(50)), )
entity.register(raise_error=True)
entity.generate_data(days=0.5, drop_existing=True, **sim_parameters)
db.drop_table('sim_test_dimension', schema=db_schema)
entity.make_dimension('sim_test_dimension', Column('manufacturer', String(50)), Column('load_rating', Float()),
Column('maintenance_org', String(50)))
sim_parameters = {"data_item_mean": {'temp': 22, 'pressure': 320, 'load_rating': 500},
"data_item_sd": {'temp': 2, 'pressure': 5},
"data_item_domain": {'category_code': ['A', 'B', 'C'], 'maintenance_org': ['Lunar Parts', 'Relco']},
"start_entity_id": 1000, "auto_entity_count": 10, "freq": '30S'
}
entity.register(raise_error=True)
entity.generate_data(days=0.5, drop_existing=True, **sim_parameters)
temp_function = bif.PythonExpression(expression='df["temp"]+df["pressure"]/300*5',
output_name='operating_temperature_work')
entity = EntityType(entity_name, db, Column('temp', Float()), Column('pressure', Float()),
Column('company_code', String(50)), Column('category_code', String(5)),
bif.EntityDataGenerator(parameters=sim_parameters, data_item='is_generated'), temp_function,
bif.RandomNoise(input_items=['operating_temperature_work'], standard_deviation=1,
output_items=['operating_temperature']),
**{'_timestamp': 'evt_timestamp', '_db_schema': db_schema})
entity.exec_local_pipeline()
| true | true |
f7fcdbfb237b3efaece2844f03a4bba4be6be0b5 | 7,867 | py | Python | circuit_training/learning/train_ppo_lib.py | sguada/circuit_training | 220ca925c83cdc6e67181c305da577f305c602b3 | [
"Apache-2.0"
] | null | null | null | circuit_training/learning/train_ppo_lib.py | sguada/circuit_training | 220ca925c83cdc6e67181c305da577f305c602b3 | [
"Apache-2.0"
] | 1 | 2022-01-18T23:08:19.000Z | 2022-01-19T03:04:29.000Z | circuit_training/learning/train_ppo_lib.py | sguada/circuit_training | 220ca925c83cdc6e67181c305da577f305c602b3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample training with distributed collection using a variable container."""
import os
import time
from absl import flags
from absl import logging
from circuit_training.learning import agent
from circuit_training.learning import learner as learner_lib
import reverb
import tensorflow as tf
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.train import learner as actor_learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.utils import common
flags.DEFINE_string('netlist_file', '',
'File path to the netlist file.')
flags.DEFINE_string('init_placement', '',
'File path to the init placement file.')
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('replay_buffer_server_address', None,
'Replay buffer server address.')
flags.DEFINE_string('variable_container_server_address', None,
'Variable container server address.')
flags.DEFINE_integer('num_iterations', 10000,
'Total number train/eval iterations to perform.')
flags.DEFINE_integer(
'sequence_length', 134,
'The sequence length to estimate shuffle size. Depends on the environment.'
'Max horizon = T translates to sequence_length T+1 because of the '
'additional boundary step (last -> first).')
flags.DEFINE_integer(
'num_episodes_per_iteration', 1024,
'This is the number of episodes we train on in each iteration.')
flags.DEFINE_integer(
'global_batch_size', 1024,
'Global batch size across all replicas.')
flags.DEFINE_integer(
'global_seed', 111,
'Used in env and weight initialization, does not impact action sampling.')
FLAGS = flags.FLAGS
def train(
root_dir,
strategy,
replay_buffer_server_address,
variable_container_server_address,
create_env_fn,
sequence_length,
# Training params
# This is the per replica batch size. The global batch size can be computed
# by this number multiplied by the number of replicas (8 in the case of 2x2
# TPUs).
per_replica_batch_size=32,
num_epochs=4,
num_iterations=10000,
# This is the number of episodes we train on in each iteration.
# num_episodes_per_iteration * epsisode_length * num_epochs =
# global_step (number of gradient updates) * per_replica_batch_size *
# num_replicas.
num_episodes_per_iteration=1024,
use_model_tpu=False):
"""Trains a PPO agent."""
# Get the specs from the environment.
env = create_env_fn()
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
# Create the agent.
with strategy.scope():
train_step = train_utils.create_train_step()
model_id = common.create_variable('model_id')
logging.info('Using GRL agent networks.')
static_features = env.wrapped_env().get_static_obs()
tf_agent = agent.create_circuit_ppo_grl_agent(
train_step,
observation_tensor_spec,
action_tensor_spec,
time_step_tensor_spec,
strategy,
static_features=static_features,
use_model_tpu=use_model_tpu)
tf_agent.initialize()
# Create the policy saver which saves the initial model now, then it
# periodically checkpoints the policy weights.
saved_model_dir = os.path.join(root_dir, actor_learner.POLICY_SAVED_MODEL_DIR)
save_model_trigger = triggers.PolicySavedModelTrigger(
saved_model_dir,
tf_agent,
train_step,
start=-num_episodes_per_iteration,
interval=num_episodes_per_iteration)
# Create the variable container.
variables = {
reverb_variable_container.POLICY_KEY: tf_agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
'model_id': model_id,
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.push(variables)
# Create the replay buffer.
reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(
tf_agent.collect_data_spec,
sequence_length=None,
table_name='training_table',
server_address=replay_buffer_server_address)
# Initialize the dataset.
def experience_dataset_fn():
get_dtype = lambda x: x.dtype
get_shape = lambda x: (None,) + x.shape
shapes = tf.nest.map_structure(get_shape, tf_agent.collect_data_spec)
dtypes = tf.nest.map_structure(get_dtype, tf_agent.collect_data_spec)
dataset = reverb.TrajectoryDataset(
server_address=replay_buffer_server_address,
table='training_table',
dtypes=dtypes,
shapes=shapes,
# Menger uses learner_iterations_per_call (256). Using 8 here instead
# because we do not need that much data in the buffer (they have to be
# filtered out for the next iteration anyways). The rule of thumb is
# 2-3x batch_size.
max_in_flight_samples_per_worker=8,
num_workers_per_iterator=-1,
max_samples_per_stream=-1,
rate_limiter_timeout_ms=-1,
)
def broadcast_info(info_traj):
# Assumes that the first element of traj is shaped
# (sequence_length, ...); and we extract this length.
info, traj = info_traj
first_elem = tf.nest.flatten(traj)[0]
length = first_elem.shape[0] or tf.shape(first_elem)[0]
info = tf.nest.map_structure(lambda t: tf.repeat(t, [length]), info)
return reverb.ReplaySample(info, traj)
dataset = dataset.map(broadcast_info)
return dataset
# Create the learner.
learning_triggers = [
save_model_trigger,
triggers.StepPerSecondLogTrigger(train_step, interval=1000),
]
def per_sequence_fn(sample):
# At this point, each sample data contains a sequence of trajectories.
data, info = sample.data, sample.info
data = tf_agent.preprocess_sequence(data)
return data, info
learner = learner_lib.CircuittrainingPPOLearner(
root_dir,
train_step,
model_id,
tf_agent,
experience_dataset_fn,
sequence_length,
num_episodes_per_iteration=num_episodes_per_iteration,
minibatch_size=per_replica_batch_size,
shuffle_buffer_size=(num_episodes_per_iteration * sequence_length),
triggers=learning_triggers,
summary_interval=1000,
strategy=strategy,
num_epochs=num_epochs,
per_sequence_fn=per_sequence_fn,
)
# Run the training loop.
for i in range(num_iterations):
step_val = train_step.numpy()
logging.info('Training. Iteration: %d', i)
start_time = time.time()
learner.run()
num_steps = train_step.numpy() - step_val
run_time = time.time() - start_time
logging.info('Steps per sec: %s', num_steps / run_time)
logging.info('Pushing variables at model_id: %d', model_id.numpy())
variable_container.push(variables)
logging.info('clearing replay buffer')
reverb_replay_train.clear()
| 35.922374 | 80 | 0.729376 |
import os
import time
from absl import flags
from absl import logging
from circuit_training.learning import agent
from circuit_training.learning import learner as learner_lib
import reverb
import tensorflow as tf
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.train import learner as actor_learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.utils import common
flags.DEFINE_string('netlist_file', '',
'File path to the netlist file.')
flags.DEFINE_string('init_placement', '',
'File path to the init placement file.')
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('replay_buffer_server_address', None,
'Replay buffer server address.')
flags.DEFINE_string('variable_container_server_address', None,
'Variable container server address.')
flags.DEFINE_integer('num_iterations', 10000,
'Total number train/eval iterations to perform.')
flags.DEFINE_integer(
'sequence_length', 134,
'The sequence length to estimate shuffle size. Depends on the environment.'
'Max horizon = T translates to sequence_length T+1 because of the '
'additional boundary step (last -> first).')
flags.DEFINE_integer(
'num_episodes_per_iteration', 1024,
'This is the number of episodes we train on in each iteration.')
flags.DEFINE_integer(
'global_batch_size', 1024,
'Global batch size across all replicas.')
flags.DEFINE_integer(
'global_seed', 111,
'Used in env and weight initialization, does not impact action sampling.')
FLAGS = flags.FLAGS
def train(
root_dir,
strategy,
replay_buffer_server_address,
variable_container_server_address,
create_env_fn,
sequence_length,
per_replica_batch_size=32,
num_epochs=4,
num_iterations=10000,
num_episodes_per_iteration=1024,
use_model_tpu=False):
env = create_env_fn()
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
with strategy.scope():
train_step = train_utils.create_train_step()
model_id = common.create_variable('model_id')
logging.info('Using GRL agent networks.')
static_features = env.wrapped_env().get_static_obs()
tf_agent = agent.create_circuit_ppo_grl_agent(
train_step,
observation_tensor_spec,
action_tensor_spec,
time_step_tensor_spec,
strategy,
static_features=static_features,
use_model_tpu=use_model_tpu)
tf_agent.initialize()
saved_model_dir = os.path.join(root_dir, actor_learner.POLICY_SAVED_MODEL_DIR)
save_model_trigger = triggers.PolicySavedModelTrigger(
saved_model_dir,
tf_agent,
train_step,
start=-num_episodes_per_iteration,
interval=num_episodes_per_iteration)
variables = {
reverb_variable_container.POLICY_KEY: tf_agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
'model_id': model_id,
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.push(variables)
reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(
tf_agent.collect_data_spec,
sequence_length=None,
table_name='training_table',
server_address=replay_buffer_server_address)
def experience_dataset_fn():
get_dtype = lambda x: x.dtype
get_shape = lambda x: (None,) + x.shape
shapes = tf.nest.map_structure(get_shape, tf_agent.collect_data_spec)
dtypes = tf.nest.map_structure(get_dtype, tf_agent.collect_data_spec)
dataset = reverb.TrajectoryDataset(
server_address=replay_buffer_server_address,
table='training_table',
dtypes=dtypes,
shapes=shapes,
max_in_flight_samples_per_worker=8,
num_workers_per_iterator=-1,
max_samples_per_stream=-1,
rate_limiter_timeout_ms=-1,
)
def broadcast_info(info_traj):
info, traj = info_traj
first_elem = tf.nest.flatten(traj)[0]
length = first_elem.shape[0] or tf.shape(first_elem)[0]
info = tf.nest.map_structure(lambda t: tf.repeat(t, [length]), info)
return reverb.ReplaySample(info, traj)
dataset = dataset.map(broadcast_info)
return dataset
learning_triggers = [
save_model_trigger,
triggers.StepPerSecondLogTrigger(train_step, interval=1000),
]
def per_sequence_fn(sample):
data, info = sample.data, sample.info
data = tf_agent.preprocess_sequence(data)
return data, info
learner = learner_lib.CircuittrainingPPOLearner(
root_dir,
train_step,
model_id,
tf_agent,
experience_dataset_fn,
sequence_length,
num_episodes_per_iteration=num_episodes_per_iteration,
minibatch_size=per_replica_batch_size,
shuffle_buffer_size=(num_episodes_per_iteration * sequence_length),
triggers=learning_triggers,
summary_interval=1000,
strategy=strategy,
num_epochs=num_epochs,
per_sequence_fn=per_sequence_fn,
)
for i in range(num_iterations):
step_val = train_step.numpy()
logging.info('Training. Iteration: %d', i)
start_time = time.time()
learner.run()
num_steps = train_step.numpy() - step_val
run_time = time.time() - start_time
logging.info('Steps per sec: %s', num_steps / run_time)
logging.info('Pushing variables at model_id: %d', model_id.numpy())
variable_container.push(variables)
logging.info('clearing replay buffer')
reverb_replay_train.clear()
| true | true |
f7fcde68ea974a3a794023ab79e0f6b36b8568ab | 2,541 | py | Python | webscripts/prepdata.py | KathrynDH/DataDashboard | 1bf61497480f778a1c7cc9ce9fc7fb48b3067606 | [
"MIT"
] | null | null | null | webscripts/prepdata.py | KathrynDH/DataDashboard | 1bf61497480f778a1c7cc9ce9fc7fb48b3067606 | [
"MIT"
] | null | null | null | webscripts/prepdata.py | KathrynDH/DataDashboard | 1bf61497480f778a1c7cc9ce9fc7fb48b3067606 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 21 11:04:16 2021
@author: Kathryn Haske
Load data and create graphs for webpage
Function get_graphs called by myapp/routes.py
"""
from webscripts.getdata import read_data
from webscripts.plotlygraphs import line_graph, scatter_plot, bar_chart
def get_graphs():
"""
Function to load data and create graphs
called by myapp/routes.py
Args:
none
Returns:
list of plotly figures
"""
#files to load for first four graphs: youth literacy and population data
files = ['data/worldbanklit.csv','data/worldbankpop.csv']
#duplicate columns in files
drop_dup_col = 'country'
df = read_data(files, drop_dup_col).dropna()
figures = []
year_list = ['2000', '2005', '2010', '2015']
# figure-0
figures.append(
line_graph(
year_list, df, 'country', slice(1,5,1),
'Literacy rate, youth (ages 15-24) <br>by country income category',
'year', 'youth literacy rate %'
)
)
# figure-1
figures.append(
bar_chart(
df.country.tolist(), df.iloc[:,4].tolist(),
'2015 literacy rate, youth (ages 15-24) <br>by country income category',
None, 'youth literacy rate %'
)
)
# figure-2
figures.append(
line_graph(
year_list, df, 'country', slice(5,9,1),
'Population by country income category',
'year', 'population'
)
)
# figure-3
figures.append(
bar_chart(
df.country.tolist(), df.iloc[:,8].tolist(),
'2015 population by country income category',
None, 'population'
)
)
#file to load for graph: GNP and youth literacy
files = ['data/worldbank-lit-inc.csv', 'data/country-list.csv']
df2 = read_data(files, drop_dup_col,'inner').dropna()
# figure-4
figures.append(
scatter_plot(
df2.iloc[:,1], df2.iloc[:,3], df2.iloc[:,0],
'Youth literacy rate and GNI USD',
'GNI USD', 'youth literacy rate %'
)
)
#get GNP under 5k and youth literacy
df3 = df2[df2['gnp']<5000]
# figure-5
figures.append(
scatter_plot(
df3.iloc[:,1], df3.iloc[:,3], df3.iloc[:,0],
'Youth literacy rate and GNI under 5,000 USD',
'GNI', 'youth literacy rate %'
)
)
return figures
| 26.46875 | 84 | 0.552145 |
from webscripts.getdata import read_data
from webscripts.plotlygraphs import line_graph, scatter_plot, bar_chart
def get_graphs():
files = ['data/worldbanklit.csv','data/worldbankpop.csv']
drop_dup_col = 'country'
df = read_data(files, drop_dup_col).dropna()
figures = []
year_list = ['2000', '2005', '2010', '2015']
figures.append(
line_graph(
year_list, df, 'country', slice(1,5,1),
'Literacy rate, youth (ages 15-24) <br>by country income category',
'year', 'youth literacy rate %'
)
)
figures.append(
bar_chart(
df.country.tolist(), df.iloc[:,4].tolist(),
'2015 literacy rate, youth (ages 15-24) <br>by country income category',
None, 'youth literacy rate %'
)
)
figures.append(
line_graph(
year_list, df, 'country', slice(5,9,1),
'Population by country income category',
'year', 'population'
)
)
figures.append(
bar_chart(
df.country.tolist(), df.iloc[:,8].tolist(),
'2015 population by country income category',
None, 'population'
)
)
files = ['data/worldbank-lit-inc.csv', 'data/country-list.csv']
df2 = read_data(files, drop_dup_col,'inner').dropna()
figures.append(
scatter_plot(
df2.iloc[:,1], df2.iloc[:,3], df2.iloc[:,0],
'Youth literacy rate and GNI USD',
'GNI USD', 'youth literacy rate %'
)
)
df3 = df2[df2['gnp']<5000]
figures.append(
scatter_plot(
df3.iloc[:,1], df3.iloc[:,3], df3.iloc[:,0],
'Youth literacy rate and GNI under 5,000 USD',
'GNI', 'youth literacy rate %'
)
)
return figures
| true | true |
f7fcdf63ae1110dd019ce632cb5036fbadd85949 | 10,585 | py | Python | mosec/server.py | secsilm/mosec | 89b7d8e6c763b22dcf49dad4ace3796e3ffccb0a | [
"Apache-2.0"
] | null | null | null | mosec/server.py | secsilm/mosec | 89b7d8e6c763b22dcf49dad4ace3796e3ffccb0a | [
"Apache-2.0"
] | null | null | null | mosec/server.py | secsilm/mosec | 89b7d8e6c763b22dcf49dad4ace3796e3ffccb0a | [
"Apache-2.0"
] | null | null | null | import logging
import multiprocessing as mp
import os
import signal
import subprocess
import traceback
from contextlib import ContextDecorator
from multiprocessing.synchronize import Event
from os.path import exists
from pathlib import Path
from shutil import rmtree
from time import monotonic, sleep
from typing import Dict, List, Optional, Type, Union
import pkg_resources
from .args import ArgParser
from .coordinator import STAGE_EGRESS, STAGE_INGRESS, Coordinator
from .worker import Worker
logger = logging.getLogger(__name__)
GUARD_CHECK_INTERVAL = 1
NEW_PROCESS_METHOD = {"spawn", "fork"}
class Server:
"""
This public class defines the mosec server interface. It allows
users to sequentially append workers they implemented, builds
the workflow pipeline automatically and starts up the server.
###### Batching
> The user may enable the batching feature for any stage when the
corresponding worker is appended, by setting the `max_batch_size`.
###### Multiprocess
> The user may spawn multiple processes for any stage when the
corresponding worker is appended, by setting the `num`.
"""
def __init__(self):
self._worker_cls: List[Type[Worker]] = []
self._worker_num: List[int] = []
self._worker_mbs: List[int] = []
self._coordinator_env: List[Union[None, List[Dict[str, str]]]] = []
self._coordinator_ctx: List[str] = []
self._coordinator_pools: List[List[Union[mp.Process, None]]] = []
self._coordinator_shutdown: Event = mp.get_context("spawn").Event()
self._coordinator_shutdown_notify: Event = mp.get_context("spawn").Event()
self._controller_process: Optional[mp.Process] = None
self._configs: dict = {}
self._server_shutdown: bool = False
signal.signal(signal.SIGTERM, self._terminate)
signal.signal(signal.SIGINT, self._terminate)
def _validate_server(self):
assert len(self._worker_cls) > 0, (
"no worker registered\n"
"help: use `.append_worker(...)` to register at least one worker"
)
@staticmethod
def _validate_arguments(
worker,
num,
max_batch_size,
start_method,
env,
):
def validate_int_ge_1(number, name):
assert isinstance(
number, int
), f"{name} must be integer but you give {type(number)}"
assert number >= 1, f"{name} must be greater than 1"
def validate_env():
if env is None:
return
def validate_str_dict(dictionary: Dict):
for k, v in dictionary.items():
if not (isinstance(k, str) and isinstance(v, str)):
return False
return True
assert len(env) == num, "len(env) must equal to num"
valid = True
if not isinstance(env, List):
valid = False
elif not all([isinstance(x, Dict) and validate_str_dict(x) for x in env]):
valid = False
assert valid, "env must be a list of string dictionary"
validate_env()
assert issubclass(worker, Worker), "worker must be inherited from mosec.Worker"
validate_int_ge_1(num, "worker number")
validate_int_ge_1(max_batch_size, "maximum batch size")
assert (
start_method in NEW_PROCESS_METHOD
), f"start method must be one of {NEW_PROCESS_METHOD}"
def _parse_args(self):
self._configs = vars(ArgParser.parse())
logger.info(f"Mosec Server Configurations: {self._configs}")
def _controller_args(self):
args = []
for k, v in self._configs.items():
args.extend([f"--{k}", str(v)])
args.extend(["--batches"] + list(map(str, self._worker_mbs)))
return args
def _start_controller(self):
"""Subprocess to start controller program"""
if not self._server_shutdown:
path = self._configs["path"]
if exists(path):
logger.info(f"path already exists, try to remove it: {path}")
rmtree(path)
path = Path(pkg_resources.resource_filename("mosec", "bin"), "mosec")
self._controller_process = subprocess.Popen(
[path] + self._controller_args()
)
def _terminate(self, signum, framestack):
logger.info(f"[{signum}] terminating server [{framestack}] ...")
self._server_shutdown = True
@staticmethod
def _clean_pools(
processes: List[Union[mp.Process, None]],
) -> List[Union[mp.Process, None]]:
for i, p in enumerate(processes):
if p is None or p.exitcode is not None:
processes[i] = None
return processes
def _manage_coordinators(self):
first = True
while not self._server_shutdown:
for stage_id, (w_cls, w_num, w_mbs, c_ctx, c_env) in enumerate(
zip(
self._worker_cls,
self._worker_num,
self._worker_mbs,
self._coordinator_ctx,
self._coordinator_env,
)
):
# for every sequential stage
self._coordinator_pools[stage_id] = self._clean_pools(
self._coordinator_pools[stage_id]
)
if all(self._coordinator_pools[stage_id]):
# this stage is healthy
continue
if not first and not any(self._coordinator_pools[stage_id]):
# this stage might contain bugs
self._terminate(
1,
f"all workers at stage {stage_id} exited;"
" please check for bugs or socket connection issues",
)
break
stage = ""
if stage_id == 0:
stage += STAGE_INGRESS
if stage_id == len(self._worker_cls) - 1:
stage += STAGE_EGRESS
for worker_id in range(w_num):
# for every worker in each stage
if self._coordinator_pools[stage_id][worker_id] is not None:
continue
coordinator_process = mp.get_context(c_ctx).Process(
target=Coordinator,
args=(
w_cls,
w_mbs,
stage,
self._coordinator_shutdown,
self._coordinator_shutdown_notify,
self._configs["path"],
stage_id + 1,
worker_id + 1,
),
daemon=True,
)
with EnvContext(c_env, worker_id):
coordinator_process.start()
self._coordinator_pools[stage_id][worker_id] = coordinator_process
first = False
if self._controller_process:
ctr_exitcode = self._controller_process.poll()
if ctr_exitcode:
self._terminate(
ctr_exitcode,
f"mosec controller exited on error: {ctr_exitcode}",
)
sleep(GUARD_CHECK_INTERVAL)
def _halt(self):
"""Graceful shutdown"""
# notify coordinators for the shutdown
self._coordinator_shutdown_notify.set()
# terminate controller first and wait for a graceful period
if self._controller_process:
self._controller_process.terminate()
graceful_period = monotonic() + self._configs["timeout"] / 1000
while monotonic() < graceful_period:
ctr_exitcode = self._controller_process.poll()
if ctr_exitcode is not None: # exited
if ctr_exitcode: # on error
logger.error(
f"mosec controller halted on error: {ctr_exitcode}"
)
else:
logger.info("mosec controller halted normally")
break
sleep(0.1)
# shutdown coordinators
self._coordinator_shutdown.set()
logger.info("mosec server exited. see you.")
def append_worker(
self,
worker: Type[Worker],
num: int = 1,
max_batch_size: int = 1,
start_method: str = "spawn",
env: Union[None, List[Dict[str, str]]] = None,
):
"""
This method sequentially appends workers to the workflow pipeline.
Arguments:
worker: the class you inherit from `Worker` which implements
the `forward` method
num: the number of processes for parallel computing (>=1)
max_batch_size: the maximum batch size allowed (>=1)
start_method: the process starting method ("spawn" or "fork")
env: the environment variables to set before starting the process
"""
self._validate_arguments(worker, num, max_batch_size, start_method, env)
self._worker_cls.append(worker)
self._worker_num.append(num)
self._worker_mbs.append(max_batch_size)
self._coordinator_env.append(env)
self._coordinator_ctx.append(start_method)
self._coordinator_pools.append([None] * num)
def run(self):
"""
This method starts the mosec model server!
"""
self._validate_server()
self._parse_args()
self._start_controller()
try:
self._manage_coordinators()
except Exception:
logger.error(traceback.format_exc().replace("\n", " "))
self._halt()
class EnvContext(ContextDecorator):
def __init__(self, env: Union[None, List[Dict[str, str]]], id: int) -> None:
super().__init__()
self.default: Dict = {}
self.env = env
self.id = id
def __enter__(self):
if self.env is not None:
for k, v in self.env[self.id].items():
self.default[k] = os.getenv(k, "")
os.environ[k] = v
return self
def __exit__(self, *exc):
for k, v in self.default.items():
os.environ[k] = v
return False
| 35.166113 | 87 | 0.558904 | import logging
import multiprocessing as mp
import os
import signal
import subprocess
import traceback
from contextlib import ContextDecorator
from multiprocessing.synchronize import Event
from os.path import exists
from pathlib import Path
from shutil import rmtree
from time import monotonic, sleep
from typing import Dict, List, Optional, Type, Union
import pkg_resources
from .args import ArgParser
from .coordinator import STAGE_EGRESS, STAGE_INGRESS, Coordinator
from .worker import Worker
logger = logging.getLogger(__name__)
GUARD_CHECK_INTERVAL = 1
NEW_PROCESS_METHOD = {"spawn", "fork"}
class Server:
def __init__(self):
self._worker_cls: List[Type[Worker]] = []
self._worker_num: List[int] = []
self._worker_mbs: List[int] = []
self._coordinator_env: List[Union[None, List[Dict[str, str]]]] = []
self._coordinator_ctx: List[str] = []
self._coordinator_pools: List[List[Union[mp.Process, None]]] = []
self._coordinator_shutdown: Event = mp.get_context("spawn").Event()
self._coordinator_shutdown_notify: Event = mp.get_context("spawn").Event()
self._controller_process: Optional[mp.Process] = None
self._configs: dict = {}
self._server_shutdown: bool = False
signal.signal(signal.SIGTERM, self._terminate)
signal.signal(signal.SIGINT, self._terminate)
def _validate_server(self):
assert len(self._worker_cls) > 0, (
"no worker registered\n"
"help: use `.append_worker(...)` to register at least one worker"
)
@staticmethod
def _validate_arguments(
worker,
num,
max_batch_size,
start_method,
env,
):
def validate_int_ge_1(number, name):
assert isinstance(
number, int
), f"{name} must be integer but you give {type(number)}"
assert number >= 1, f"{name} must be greater than 1"
def validate_env():
if env is None:
return
def validate_str_dict(dictionary: Dict):
for k, v in dictionary.items():
if not (isinstance(k, str) and isinstance(v, str)):
return False
return True
assert len(env) == num, "len(env) must equal to num"
valid = True
if not isinstance(env, List):
valid = False
elif not all([isinstance(x, Dict) and validate_str_dict(x) for x in env]):
valid = False
assert valid, "env must be a list of string dictionary"
validate_env()
assert issubclass(worker, Worker), "worker must be inherited from mosec.Worker"
validate_int_ge_1(num, "worker number")
validate_int_ge_1(max_batch_size, "maximum batch size")
assert (
start_method in NEW_PROCESS_METHOD
), f"start method must be one of {NEW_PROCESS_METHOD}"
def _parse_args(self):
self._configs = vars(ArgParser.parse())
logger.info(f"Mosec Server Configurations: {self._configs}")
def _controller_args(self):
args = []
for k, v in self._configs.items():
args.extend([f"--{k}", str(v)])
args.extend(["--batches"] + list(map(str, self._worker_mbs)))
return args
def _start_controller(self):
if not self._server_shutdown:
path = self._configs["path"]
if exists(path):
logger.info(f"path already exists, try to remove it: {path}")
rmtree(path)
path = Path(pkg_resources.resource_filename("mosec", "bin"), "mosec")
self._controller_process = subprocess.Popen(
[path] + self._controller_args()
)
def _terminate(self, signum, framestack):
logger.info(f"[{signum}] terminating server [{framestack}] ...")
self._server_shutdown = True
@staticmethod
def _clean_pools(
processes: List[Union[mp.Process, None]],
) -> List[Union[mp.Process, None]]:
for i, p in enumerate(processes):
if p is None or p.exitcode is not None:
processes[i] = None
return processes
def _manage_coordinators(self):
first = True
while not self._server_shutdown:
for stage_id, (w_cls, w_num, w_mbs, c_ctx, c_env) in enumerate(
zip(
self._worker_cls,
self._worker_num,
self._worker_mbs,
self._coordinator_ctx,
self._coordinator_env,
)
):
self._coordinator_pools[stage_id] = self._clean_pools(
self._coordinator_pools[stage_id]
)
if all(self._coordinator_pools[stage_id]):
continue
if not first and not any(self._coordinator_pools[stage_id]):
self._terminate(
1,
f"all workers at stage {stage_id} exited;"
" please check for bugs or socket connection issues",
)
break
stage = ""
if stage_id == 0:
stage += STAGE_INGRESS
if stage_id == len(self._worker_cls) - 1:
stage += STAGE_EGRESS
for worker_id in range(w_num):
if self._coordinator_pools[stage_id][worker_id] is not None:
continue
coordinator_process = mp.get_context(c_ctx).Process(
target=Coordinator,
args=(
w_cls,
w_mbs,
stage,
self._coordinator_shutdown,
self._coordinator_shutdown_notify,
self._configs["path"],
stage_id + 1,
worker_id + 1,
),
daemon=True,
)
with EnvContext(c_env, worker_id):
coordinator_process.start()
self._coordinator_pools[stage_id][worker_id] = coordinator_process
first = False
if self._controller_process:
ctr_exitcode = self._controller_process.poll()
if ctr_exitcode:
self._terminate(
ctr_exitcode,
f"mosec controller exited on error: {ctr_exitcode}",
)
sleep(GUARD_CHECK_INTERVAL)
def _halt(self):
self._coordinator_shutdown_notify.set()
if self._controller_process:
self._controller_process.terminate()
graceful_period = monotonic() + self._configs["timeout"] / 1000
while monotonic() < graceful_period:
ctr_exitcode = self._controller_process.poll()
if ctr_exitcode is not None:
if ctr_exitcode:
logger.error(
f"mosec controller halted on error: {ctr_exitcode}"
)
else:
logger.info("mosec controller halted normally")
break
sleep(0.1)
self._coordinator_shutdown.set()
logger.info("mosec server exited. see you.")
def append_worker(
self,
worker: Type[Worker],
num: int = 1,
max_batch_size: int = 1,
start_method: str = "spawn",
env: Union[None, List[Dict[str, str]]] = None,
):
self._validate_arguments(worker, num, max_batch_size, start_method, env)
self._worker_cls.append(worker)
self._worker_num.append(num)
self._worker_mbs.append(max_batch_size)
self._coordinator_env.append(env)
self._coordinator_ctx.append(start_method)
self._coordinator_pools.append([None] * num)
def run(self):
self._validate_server()
self._parse_args()
self._start_controller()
try:
self._manage_coordinators()
except Exception:
logger.error(traceback.format_exc().replace("\n", " "))
self._halt()
class EnvContext(ContextDecorator):
def __init__(self, env: Union[None, List[Dict[str, str]]], id: int) -> None:
super().__init__()
self.default: Dict = {}
self.env = env
self.id = id
def __enter__(self):
if self.env is not None:
for k, v in self.env[self.id].items():
self.default[k] = os.getenv(k, "")
os.environ[k] = v
return self
def __exit__(self, *exc):
for k, v in self.default.items():
os.environ[k] = v
return False
| true | true |
f7fcdf736be5df3949e0c92adefb6b4899fbc42d | 17,866 | py | Python | vyperlogix/crontab/scheduler.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | 1 | 2020-09-29T01:36:33.000Z | 2020-09-29T01:36:33.000Z | vyperlogix/crontab/scheduler.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | vyperlogix/crontab/scheduler.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
"""
See also: http://code.activestate.com/recipes/577466-cron-like-triggers/
This module provides a class for cron-like scheduling systems, and
exposes the function used to convert static cron expressions to Python
sets.
CronExpression objects are instantiated with a cron formatted string
that represents the times when the trigger is active. When using
expressions that contain periodic terms, an extension of cron created
for this module, a starting epoch should be explicitly defined. When the
epoch is not explicitly defined, it defaults to the Unix epoch. Periodic
terms provide a method of recurring triggers based on arbitrary time
periods.
Standard Cron Triggers:
>>> job = CronExpression("0 0 * * 1-5/2 find /var/log -delete")
>>> job.check_trigger((2010, 11, 17, 0, 0))
True
>>> job.check_trigger((2012, 12, 21, 0 , 0))
False
Periodic Trigger:
>>> job = CronExpression("0 %9 * * * Feed 'it'", (2010, 5, 1, 7, 0, -6))
>>> job.comment
"Feed 'it'"
>>> job.check_trigger((2010, 5, 1, 7, 0), utc_offset=-6)
True
>>> job.check_trigger((2010, 5, 1, 16, 0), utc_offset=-6)
True
>>> job.check_trigger((2010, 5, 2, 1, 0), utc_offset=-6)
True
"""
import datetime
import calendar
from vyperlogix import misc
from vyperlogix.misc import ObjectTypeName
from vyperlogix.classes.SmartObject import SmartObject
__all__ = ["CronExpression", "parse_atom", "DEFAULT_EPOCH", "SUBSTITUTIONS"]
__license__ = "Public Domain"
DAY_NAMES = zip(('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'), xrange(7))
MINUTES = (0, 59)
HOURS = (0, 23)
DAYS_OF_MONTH = (1, 31)
MONTHS = (1, 12)
DAYS_OF_WEEK = (0, 6)
L_FIELDS = (DAYS_OF_WEEK, DAYS_OF_MONTH)
FIELD_RANGES = (MINUTES, HOURS, DAYS_OF_MONTH, MONTHS, DAYS_OF_WEEK)
MONTH_NAMES = zip(('jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec'), xrange(1, 13))
DEFAULT_EPOCH = (1970, 1, 1, 0, 0, 0)
SUBSTITUTIONS = {
"@yearly": "0 0 1 1 *",
"@anually": "0 0 1 1 *",
"@monthly": "0 0 1 * *",
"@weekly": "0 0 * * 0",
"@daily": "0 0 * * *",
"@midnight": "0 0 * * *",
"@hourly": "0 * * * *"
}
class CronExpression(object):
def __init__(self, line, epoch=DEFAULT_EPOCH, epoch_utc_offset=0):
"""
Instantiates a CronExpression object with an optionally defined epoch.
If the epoch is defined, the UTC offset can be specified one of two
ways: as the sixth element in 'epoch' or supplied in epoch_utc_offset.
The epoch should be defined down to the minute sorted by
descending significance.
"""
for key, value in SUBSTITUTIONS.items():
if line.startswith(key):
line = line.replace(key, value)
break
fields = line.split(None, 5)
if len(fields) == 5:
fields.append('')
minutes, hours, dom, months, dow, self.comment = fields if ((misc.isTuple(fields) or misc.isList(fields)) and (len(fields) == 6)) else ('', '', '', '', '', '')
dow = dow.replace('7', '0').replace('?', '*')
dom = dom.replace('?', '*')
for monthstr, monthnum in MONTH_NAMES:
months = months.lower().replace(monthstr, str(monthnum))
for dowstr, downum in DAY_NAMES:
dow = dow.lower().replace(dowstr, str(downum))
self.string_tab = [minutes, hours, dom.upper(), months, dow.upper()]
self.compute_numtab()
if len(epoch) == 5:
y, mo, d, h, m = epoch
self.epoch = (y, mo, d, h, m, epoch_utc_offset)
else:
self.epoch = epoch
def __str__(self):
base = self.__class__.__name__ + "(%s)"
cron_line = self.string_tab + [str(self.comment)]
if not self.comment:
cron_line.pop()
arguments = '"' + ' '.join(cron_line) + '"'
if self.epoch != DEFAULT_EPOCH:
return base % (arguments + ", epoch=" + repr(self.epoch))
else:
return base % arguments
def __repr__(self):
return str(self)
def compute_numtab(self):
"""
Recomputes the sets for the static ranges of the trigger time.
This method should only be called by the user if the string_tab
member is modified.
"""
self.numerical_tab = []
for field_str, span in zip(self.string_tab, FIELD_RANGES):
split_field_str = field_str.split(',')
if len(split_field_str) > 1 and "*" in split_field_str:
raise ValueError("\"*\" must be alone in a field.")
unified = set()
for cron_atom in split_field_str:
# parse_atom only handles static cases
for special_char in ('%', '#', 'L', 'W'):
if special_char in cron_atom:
break
else:
__atom__ = parse_atom(cron_atom, span)
if (__atom__):
unified.update(__atom__)
self.numerical_tab.append(unified)
if self.string_tab[2] == "*" and self.string_tab[4] != "*":
self.numerical_tab[2] = set()
def check_trigger(self, date_tuple, utc_offset=0):
"""
Returns boolean indicating if the trigger is active at the given time.
The date tuple should be in the local time. Unless periodicities are
used, utc_offset does not need to be specified. If periodicities are
used, specifically in the hour and minutes fields, it is crucial that
the utc_offset is specified.
"""
year, month, day, hour, mins = date_tuple
given_date = datetime.date(year, month, day)
zeroday = datetime.date(*self.epoch[:3])
last_dom = calendar.monthrange(year, month)[-1]
dom_matched = True
# In calendar and datetime.date.weekday, Monday = 0
given_dow = (datetime.date.weekday(given_date) + 1) % 7
first_dow = (given_dow + 1 - day) % 7
# Figure out how much time has passed from the epoch to the given date
utc_diff = utc_offset - self.epoch[5]
mod_delta_yrs = year - self.epoch[0]
mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12
mod_delta_day = (given_date - zeroday).days
mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff
mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60
# Makes iterating through like components easier.
quintuple = zip(
(mins, hour, day, month, given_dow),
self.numerical_tab,
self.string_tab,
(mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon,
mod_delta_day),
FIELD_RANGES)
for value, valid_values, field_str, delta_t, field_type in quintuple:
# All valid, static values for the fields are stored in sets
if value in valid_values:
continue
# The following for loop implements the logic for context
# sensitive and epoch sensitive constraints. break statements,
# which are executed when a match is found, lead to a continue
# in the outer loop. If there are no matches found, the given date
# does not match expression constraints, so the function returns
# False as seen at the end of this for...else... construct.
for cron_atom in field_str.split(','):
if (misc.isStringValid(cron_atom)) and (cron_atom[0] == '%'):
if not(delta_t % int(cron_atom[1:])):
break
elif field_type == DAYS_OF_WEEK and '#' in cron_atom:
D, N = int(cron_atom[0]), int(cron_atom[2])
# Computes Nth occurence of D day of the week
if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day:
break
elif field_type == DAYS_OF_MONTH and cron_atom[-1] == 'W':
target = min(int(cron_atom[:-1]), last_dom)
lands_on = (first_dow + target - 1) % 7
if lands_on == 0:
# Shift from Sun. to Mon. unless Mon. is next month
target += 1 if target < last_dom else -2
elif lands_on == 6:
# Shift from Sat. to Fri. unless Fri. in prior month
target += -1 if target > 1 else 2
# Break if the day is correct, and target is a weekday
if target == day and (first_dow + target - 7) % 7 > 1:
break
elif field_type in L_FIELDS and cron_atom.endswith('L'):
# In dom field, L means the last day of the month
target = last_dom
if field_type == DAYS_OF_WEEK:
# Calculates the last occurence of given day of week
desired_dow = int(cron_atom[:-1])
target = (((desired_dow - first_dow) % 7) + 29)
target -= 7 if target > last_dom else 0
if target == day:
break
else:
# See 2010.11.15 of CHANGELOG
if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*':
dom_matched = False
continue
elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*':
# If we got here, then days of months validated so it does
# not matter that days of the week failed.
return dom_matched
# None of the expressions matched which means this field fails
return False
# Arriving at this point means the date landed within the constraints
# of all fields; the associated trigger should be fired.
return True
def parse_atom(parse, minmax):
"""
Returns a set containing valid values for a given cron-style range of
numbers. The 'minmax' arguments is a two element iterable containing the
inclusive upper and lower limits of the expression.
Examples:
>>> parse_atom("1-5",(0,6))
set([1, 2, 3, 4, 5])
>>> parse_atom("*/6",(0,23))
set([0, 6, 12, 18])
>>> parse_atom("18-6/4",(0,23))
set([18, 22, 0, 4])
>>> parse_atom("*/9",(0,23))
set([0, 9, 18])
"""
parse = parse.strip()
increment = 1
if parse == '*':
return set(xrange(minmax[0], minmax[1] + 1))
elif parse.isdigit():
# A single number still needs to be returned as a set
value = int(parse)
if value >= minmax[0] and value <= minmax[1]:
return set((value,))
else:
raise ValueError("Invalid bounds: \"%s\"" % parse)
elif '-' in parse or '/' in parse:
divide = parse.split('/')
subrange = divide[0]
if len(divide) == 2:
# Example: 1-3/5 or */7 increment should be 5 and 7 respectively
increment = int(divide[1])
if '-' in subrange:
# Example: a-b
prefix, suffix = [int(n) for n in subrange.split('-')]
if prefix < minmax[0] or suffix > minmax[1]:
raise ValueError("Invalid bounds: \"%s\"" % parse)
elif subrange == '*':
# Include all values with the given range
prefix, suffix = minmax
else:
raise ValueError("Unrecognized symbol: \"%s\"" % subrange)
if prefix < suffix:
# Example: 7-10
return set(xrange(prefix, suffix + 1, increment))
else:
# Example: 12-4/2; (12, 12 + n, ..., 12 + m*n) U (n_0, ..., 4)
noskips = list(xrange(prefix, minmax[1] + 1))
noskips+= list(xrange(minmax[0], suffix + 1))
return set(noskips[::increment])
from vyperlogix.misc import threadpool
__Q__ = threadpool.ThreadQueue(1)
def crontab(config,jsonHandler=None,callback=None,logging_callback=None,default=None,threaded=False):
import os, sys, time, signal
from vyperlogix.misc import _utils
from vyperlogix.process.shell import SmartShell
from vyperlogix.lists.ListWrapper import ListWrapper
normalize = lambda items:[s for s in [''.join(ll[0:ll.findFirstMatching('#') if (ll.findFirstMatching('#') > -1) else len(ll)]).strip() for ll in [ListWrapper(l) for l in items if (len(l) > 0)]] if (len(s) > 0)]
def __logger__(msg):
if (callable(logging_callback)):
try:
logging_callback(msg)
except:
pass
def __crontab__(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default):
__lines__ = ''
__logger__('INFO.1.1: verbose="%s" (%s).' % (config.verbose,ObjectTypeName.typeClassName(config.verbose)))
try:
__logger__('INFO.1.2: config="%s".' % (config))
if (config.verbose):
__logger__('INFO.1.3: JSON FPath ?: "%s".' % (config.jsonFpath))
if (callable(jsonHandler)):
try:
__config__ = jsonHandler(config.jsonFpath)
except Exception as ex:
__config__ = SmartObject()
__file__ = config.schedulefpath if (misc.isStringValid(config.schedulefpath)) else None
if (config.verbose):
__logger__('INFO.1.4: Crontab ?: "%s".' % (__file__))
if (os.path.exists(__file__)):
if (config.verbose):
__logger__('INFO.1.5: Crontab Exists: "%s".' % (__file__))
__lines__ = _utils._readFileFrom(__file__)
if (config.verbose):
__logger__('INFO.1.6: Crontab Content: "%s".' % (__lines__))
except Exception as ex:
__logger__('EXCEPTION.1: "%s".' % (_utils.formattedException(details=ex)))
__logger__('INFO.1.6.1: config.isRunning="%s".' % (config.isRunning))
while (config.isRunning and threaded):
jobs = [CronExpression(__line__) for __line__ in normalize(__lines__) if (misc.isStringValid(__line__))]
config.isRunning = callback(jobs) if (callable(callback)) else True
if (config.isRunning):
for job in jobs:
if (config.verbose):
__logger__('INFO.1.7: Job: "%s".' % (job))
if job.check_trigger(time.gmtime(time.time())[:5]):
if (config.dryrun):
__logger__('INFO.1.8: Execute: %s' % (job.comment))
else:
import tempfile
__cmd__ = tempfile.NamedTemporaryFile().name
__sysout__ = _utils.stringIO()
def __callback__(ss,data=None):
global __begin__
if (data) and (misc.isString(data)) and (len(data) > 0):
__logger__('INFO.1.9: %s' % (data))
return
def __onExit__(ss):
__logger__('INFO.1.10: __onExit__')
__logger__('INFO.1.11: %s' % (__sysout__.getvalue()))
if (os.path.exists(__cmd__)):
os.remove(__cmd__)
wfHandle = open(__cmd__,'w')
print >>wfHandle, '@echo on\n'
print >>wfHandle, '%s\n' % (job.comment)
wfHandle.flush()
wfHandle.close()
ss = SmartShell(__cmd__,callback=__callback__,isDebugging=True,onExit=__onExit__,sysout=__sysout__)
ss.execute()
if (threaded):
if (config.verbose):
__logger__('INFO.1.12: Sleeping for %s secs...' % (config.resolution))
time.sleep(config.resolution if (isinstance(config.resolution,float) or isinstance(config.resolution,int)) else 60)
if (callable(jsonHandler)):
try:
__config__ = jsonHandler(config.jsonFpath)
except Exception as ex:
__config__ = SmartObject()
__file__ = config.schedulefpath if (misc.isStringValid(config.schedulefpath)) else None
if (os.path.exists(__file__)):
if (config.verbose):
__logger__('INFO.1.13: Crontab Exists: "%s".' % (__file__))
__lines__ = _utils._readFileFrom(__file__)
if (config.verbose):
__logger__('INFO.1.14: Crontab Content: "%s".' % (__lines__))
else:
__logger__('WARNING.1.15: Cannot execute crontab unless threaded is %s (true).' % (threaded))
return config.isRunning
__logger__('INFO.1: threaded="%s".' % (threaded))
if (threaded):
@threadpool.threadify(__Q__)
def threaded_crontab(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default):
return __crontab__(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default)
threaded_crontab(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default)
__logger__('INFO.2: isRunning="%s".' % (config.isRunning))
if (not config.isRunning):
if (config.verbose):
if (callable(logging_callback)):
try:
logging_callback('INFO: Cannot run due to application defined criteria expressed via the callback.')
except:
pass
__logger__('INFO.3: TERMINATING !!!')
pid = os.getpid()
os.kill(pid,signal.SIGTERM)
else:
__logger__('INFO.3: threaded="%s".' % (threaded))
__crontab__(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default)
if (__name__ == '__main__'):
import time
__crontab__ = '''
# Minute Hour Day of Month Month Day of Week Command
# (0-59) (0-23) (1-31) (1-12 or Jan-Dec) (0-6 or Sun-Sat)
*/5 * * * * dir # comment this
'''
__file__ = r'C:\@2\crontab'
crontab(__file__,dry_run=True,threaded=True,verbose=True)
s_begin = time.time()
while ((time.time() - s_begin) < 900.0):
print '(+++)'
time.sleep(10) | 40.512472 | 215 | 0.585022 |
"""
See also: http://code.activestate.com/recipes/577466-cron-like-triggers/
This module provides a class for cron-like scheduling systems, and
exposes the function used to convert static cron expressions to Python
sets.
CronExpression objects are instantiated with a cron formatted string
that represents the times when the trigger is active. When using
expressions that contain periodic terms, an extension of cron created
for this module, a starting epoch should be explicitly defined. When the
epoch is not explicitly defined, it defaults to the Unix epoch. Periodic
terms provide a method of recurring triggers based on arbitrary time
periods.
Standard Cron Triggers:
>>> job = CronExpression("0 0 * * 1-5/2 find /var/log -delete")
>>> job.check_trigger((2010, 11, 17, 0, 0))
True
>>> job.check_trigger((2012, 12, 21, 0 , 0))
False
Periodic Trigger:
>>> job = CronExpression("0 %9 * * * Feed 'it'", (2010, 5, 1, 7, 0, -6))
>>> job.comment
"Feed 'it'"
>>> job.check_trigger((2010, 5, 1, 7, 0), utc_offset=-6)
True
>>> job.check_trigger((2010, 5, 1, 16, 0), utc_offset=-6)
True
>>> job.check_trigger((2010, 5, 2, 1, 0), utc_offset=-6)
True
"""
import datetime
import calendar
from vyperlogix import misc
from vyperlogix.misc import ObjectTypeName
from vyperlogix.classes.SmartObject import SmartObject
__all__ = ["CronExpression", "parse_atom", "DEFAULT_EPOCH", "SUBSTITUTIONS"]
__license__ = "Public Domain"
DAY_NAMES = zip(('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'), xrange(7))
MINUTES = (0, 59)
HOURS = (0, 23)
DAYS_OF_MONTH = (1, 31)
MONTHS = (1, 12)
DAYS_OF_WEEK = (0, 6)
L_FIELDS = (DAYS_OF_WEEK, DAYS_OF_MONTH)
FIELD_RANGES = (MINUTES, HOURS, DAYS_OF_MONTH, MONTHS, DAYS_OF_WEEK)
MONTH_NAMES = zip(('jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec'), xrange(1, 13))
DEFAULT_EPOCH = (1970, 1, 1, 0, 0, 0)
SUBSTITUTIONS = {
"@yearly": "0 0 1 1 *",
"@anually": "0 0 1 1 *",
"@monthly": "0 0 1 * *",
"@weekly": "0 0 * * 0",
"@daily": "0 0 * * *",
"@midnight": "0 0 * * *",
"@hourly": "0 * * * *"
}
class CronExpression(object):
def __init__(self, line, epoch=DEFAULT_EPOCH, epoch_utc_offset=0):
"""
Instantiates a CronExpression object with an optionally defined epoch.
If the epoch is defined, the UTC offset can be specified one of two
ways: as the sixth element in 'epoch' or supplied in epoch_utc_offset.
The epoch should be defined down to the minute sorted by
descending significance.
"""
for key, value in SUBSTITUTIONS.items():
if line.startswith(key):
line = line.replace(key, value)
break
fields = line.split(None, 5)
if len(fields) == 5:
fields.append('')
minutes, hours, dom, months, dow, self.comment = fields if ((misc.isTuple(fields) or misc.isList(fields)) and (len(fields) == 6)) else ('', '', '', '', '', '')
dow = dow.replace('7', '0').replace('?', '*')
dom = dom.replace('?', '*')
for monthstr, monthnum in MONTH_NAMES:
months = months.lower().replace(monthstr, str(monthnum))
for dowstr, downum in DAY_NAMES:
dow = dow.lower().replace(dowstr, str(downum))
self.string_tab = [minutes, hours, dom.upper(), months, dow.upper()]
self.compute_numtab()
if len(epoch) == 5:
y, mo, d, h, m = epoch
self.epoch = (y, mo, d, h, m, epoch_utc_offset)
else:
self.epoch = epoch
def __str__(self):
base = self.__class__.__name__ + "(%s)"
cron_line = self.string_tab + [str(self.comment)]
if not self.comment:
cron_line.pop()
arguments = '"' + ' '.join(cron_line) + '"'
if self.epoch != DEFAULT_EPOCH:
return base % (arguments + ", epoch=" + repr(self.epoch))
else:
return base % arguments
def __repr__(self):
return str(self)
def compute_numtab(self):
"""
Recomputes the sets for the static ranges of the trigger time.
This method should only be called by the user if the string_tab
member is modified.
"""
self.numerical_tab = []
for field_str, span in zip(self.string_tab, FIELD_RANGES):
split_field_str = field_str.split(',')
if len(split_field_str) > 1 and "*" in split_field_str:
raise ValueError("\"*\" must be alone in a field.")
unified = set()
for cron_atom in split_field_str:
for special_char in ('%', '#', 'L', 'W'):
if special_char in cron_atom:
break
else:
__atom__ = parse_atom(cron_atom, span)
if (__atom__):
unified.update(__atom__)
self.numerical_tab.append(unified)
if self.string_tab[2] == "*" and self.string_tab[4] != "*":
self.numerical_tab[2] = set()
def check_trigger(self, date_tuple, utc_offset=0):
"""
Returns boolean indicating if the trigger is active at the given time.
The date tuple should be in the local time. Unless periodicities are
used, utc_offset does not need to be specified. If periodicities are
used, specifically in the hour and minutes fields, it is crucial that
the utc_offset is specified.
"""
year, month, day, hour, mins = date_tuple
given_date = datetime.date(year, month, day)
zeroday = datetime.date(*self.epoch[:3])
last_dom = calendar.monthrange(year, month)[-1]
dom_matched = True
given_dow = (datetime.date.weekday(given_date) + 1) % 7
first_dow = (given_dow + 1 - day) % 7
utc_diff = utc_offset - self.epoch[5]
mod_delta_yrs = year - self.epoch[0]
mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12
mod_delta_day = (given_date - zeroday).days
mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff
mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60
quintuple = zip(
(mins, hour, day, month, given_dow),
self.numerical_tab,
self.string_tab,
(mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon,
mod_delta_day),
FIELD_RANGES)
for value, valid_values, field_str, delta_t, field_type in quintuple:
if value in valid_values:
continue
for cron_atom in field_str.split(','):
if (misc.isStringValid(cron_atom)) and (cron_atom[0] == '%'):
if not(delta_t % int(cron_atom[1:])):
break
elif field_type == DAYS_OF_WEEK and '#' in cron_atom:
D, N = int(cron_atom[0]), int(cron_atom[2])
if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day:
break
elif field_type == DAYS_OF_MONTH and cron_atom[-1] == 'W':
target = min(int(cron_atom[:-1]), last_dom)
lands_on = (first_dow + target - 1) % 7
if lands_on == 0:
target += 1 if target < last_dom else -2
elif lands_on == 6:
target += -1 if target > 1 else 2
if target == day and (first_dow + target - 7) % 7 > 1:
break
elif field_type in L_FIELDS and cron_atom.endswith('L'):
target = last_dom
if field_type == DAYS_OF_WEEK:
desired_dow = int(cron_atom[:-1])
target = (((desired_dow - first_dow) % 7) + 29)
target -= 7 if target > last_dom else 0
if target == day:
break
else:
if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*':
dom_matched = False
continue
elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*':
return dom_matched
return False
return True
def parse_atom(parse, minmax):
"""
Returns a set containing valid values for a given cron-style range of
numbers. The 'minmax' arguments is a two element iterable containing the
inclusive upper and lower limits of the expression.
Examples:
>>> parse_atom("1-5",(0,6))
set([1, 2, 3, 4, 5])
>>> parse_atom("*/6",(0,23))
set([0, 6, 12, 18])
>>> parse_atom("18-6/4",(0,23))
set([18, 22, 0, 4])
>>> parse_atom("*/9",(0,23))
set([0, 9, 18])
"""
parse = parse.strip()
increment = 1
if parse == '*':
return set(xrange(minmax[0], minmax[1] + 1))
elif parse.isdigit():
value = int(parse)
if value >= minmax[0] and value <= minmax[1]:
return set((value,))
else:
raise ValueError("Invalid bounds: \"%s\"" % parse)
elif '-' in parse or '/' in parse:
divide = parse.split('/')
subrange = divide[0]
if len(divide) == 2:
increment = int(divide[1])
if '-' in subrange:
prefix, suffix = [int(n) for n in subrange.split('-')]
if prefix < minmax[0] or suffix > minmax[1]:
raise ValueError("Invalid bounds: \"%s\"" % parse)
elif subrange == '*':
prefix, suffix = minmax
else:
raise ValueError("Unrecognized symbol: \"%s\"" % subrange)
if prefix < suffix:
return set(xrange(prefix, suffix + 1, increment))
else:
noskips = list(xrange(prefix, minmax[1] + 1))
noskips+= list(xrange(minmax[0], suffix + 1))
return set(noskips[::increment])
from vyperlogix.misc import threadpool
__Q__ = threadpool.ThreadQueue(1)
def crontab(config,jsonHandler=None,callback=None,logging_callback=None,default=None,threaded=False):
import os, sys, time, signal
from vyperlogix.misc import _utils
from vyperlogix.process.shell import SmartShell
from vyperlogix.lists.ListWrapper import ListWrapper
normalize = lambda items:[s for s in [''.join(ll[0:ll.findFirstMatching('#') if (ll.findFirstMatching('#') > -1) else len(ll)]).strip() for ll in [ListWrapper(l) for l in items if (len(l) > 0)]] if (len(s) > 0)]
def __logger__(msg):
if (callable(logging_callback)):
try:
logging_callback(msg)
except:
pass
def __crontab__(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default):
__lines__ = ''
__logger__('INFO.1.1: verbose="%s" (%s).' % (config.verbose,ObjectTypeName.typeClassName(config.verbose)))
try:
__logger__('INFO.1.2: config="%s".' % (config))
if (config.verbose):
__logger__('INFO.1.3: JSON FPath ?: "%s".' % (config.jsonFpath))
if (callable(jsonHandler)):
try:
__config__ = jsonHandler(config.jsonFpath)
except Exception as ex:
__config__ = SmartObject()
__file__ = config.schedulefpath if (misc.isStringValid(config.schedulefpath)) else None
if (config.verbose):
__logger__('INFO.1.4: Crontab ?: "%s".' % (__file__))
if (os.path.exists(__file__)):
if (config.verbose):
__logger__('INFO.1.5: Crontab Exists: "%s".' % (__file__))
__lines__ = _utils._readFileFrom(__file__)
if (config.verbose):
__logger__('INFO.1.6: Crontab Content: "%s".' % (__lines__))
except Exception as ex:
__logger__('EXCEPTION.1: "%s".' % (_utils.formattedException(details=ex)))
__logger__('INFO.1.6.1: config.isRunning="%s".' % (config.isRunning))
while (config.isRunning and threaded):
jobs = [CronExpression(__line__) for __line__ in normalize(__lines__) if (misc.isStringValid(__line__))]
config.isRunning = callback(jobs) if (callable(callback)) else True
if (config.isRunning):
for job in jobs:
if (config.verbose):
__logger__('INFO.1.7: Job: "%s".' % (job))
if job.check_trigger(time.gmtime(time.time())[:5]):
if (config.dryrun):
__logger__('INFO.1.8: Execute: %s' % (job.comment))
else:
import tempfile
__cmd__ = tempfile.NamedTemporaryFile().name
__sysout__ = _utils.stringIO()
def __callback__(ss,data=None):
global __begin__
if (data) and (misc.isString(data)) and (len(data) > 0):
__logger__('INFO.1.9: %s' % (data))
return
def __onExit__(ss):
__logger__('INFO.1.10: __onExit__')
__logger__('INFO.1.11: %s' % (__sysout__.getvalue()))
if (os.path.exists(__cmd__)):
os.remove(__cmd__)
wfHandle = open(__cmd__,'w')
print >>wfHandle, '@echo on\n'
print >>wfHandle, '%s\n' % (job.comment)
wfHandle.flush()
wfHandle.close()
ss = SmartShell(__cmd__,callback=__callback__,isDebugging=True,onExit=__onExit__,sysout=__sysout__)
ss.execute()
if (threaded):
if (config.verbose):
__logger__('INFO.1.12: Sleeping for %s secs...' % (config.resolution))
time.sleep(config.resolution if (isinstance(config.resolution,float) or isinstance(config.resolution,int)) else 60)
if (callable(jsonHandler)):
try:
__config__ = jsonHandler(config.jsonFpath)
except Exception as ex:
__config__ = SmartObject()
__file__ = config.schedulefpath if (misc.isStringValid(config.schedulefpath)) else None
if (os.path.exists(__file__)):
if (config.verbose):
__logger__('INFO.1.13: Crontab Exists: "%s".' % (__file__))
__lines__ = _utils._readFileFrom(__file__)
if (config.verbose):
__logger__('INFO.1.14: Crontab Content: "%s".' % (__lines__))
else:
__logger__('WARNING.1.15: Cannot execute crontab unless threaded is %s (true).' % (threaded))
return config.isRunning
__logger__('INFO.1: threaded="%s".' % (threaded))
if (threaded):
@threadpool.threadify(__Q__)
def threaded_crontab(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default):
return __crontab__(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default)
threaded_crontab(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default)
__logger__('INFO.2: isRunning="%s".' % (config.isRunning))
if (not config.isRunning):
if (config.verbose):
if (callable(logging_callback)):
try:
logging_callback('INFO: Cannot run due to application defined criteria expressed via the callback.')
except:
pass
__logger__('INFO.3: TERMINATING !!!')
pid = os.getpid()
os.kill(pid,signal.SIGTERM)
else:
__logger__('INFO.3: threaded="%s".' % (threaded))
__crontab__(config,jsonHandler=jsonHandler,callback=callback,logging_callback=logging_callback,default=default)
if (__name__ == '__main__'):
import time
__crontab__ = '''
# Minute Hour Day of Month Month Day of Week Command
# (0-59) (0-23) (1-31) (1-12 or Jan-Dec) (0-6 or Sun-Sat)
*/5 * * * * dir # comment this
'''
__file__ = r'C:\@2\crontab'
crontab(__file__,dry_run=True,threaded=True,verbose=True)
s_begin = time.time()
while ((time.time() - s_begin) < 900.0):
print '(+++)'
time.sleep(10) | false | true |
f7fce01e5b9264f55562fa8ffe6cb7e02ca3c449 | 44 | py | Python | tests/dummy/__init__.py | fossabot/mfm | 16fb084402ad6239a5b4fb913376392ed859f144 | [
"MIT"
] | 1 | 2018-08-11T19:04:40.000Z | 2018-08-11T19:04:40.000Z | tests/dummy/__init__.py | fossabot/mfm | 16fb084402ad6239a5b4fb913376392ed859f144 | [
"MIT"
] | 68 | 2016-05-22T19:56:59.000Z | 2018-08-11T18:40:44.000Z | tests/dummy/__init__.py | fossabot/mfm | 16fb084402ad6239a5b4fb913376392ed859f144 | [
"MIT"
] | 2 | 2016-12-28T17:33:22.000Z | 2018-08-11T18:36:35.000Z | from .View import View
__all__ = ['View']
| 8.8 | 22 | 0.659091 | from .View import View
__all__ = ['View']
| true | true |
f7fce06752c7e0b73a802944e6a8c60b5b50b9f6 | 2,178 | py | Python | gunnery/task/forms.py | dholdaway/gunnery | 87664986cd8bf1a17a9cbfb98fb1012cef9adaec | [
"Apache-2.0"
] | null | null | null | gunnery/task/forms.py | dholdaway/gunnery | 87664986cd8bf1a17a9cbfb98fb1012cef9adaec | [
"Apache-2.0"
] | 1 | 2021-06-10T23:55:50.000Z | 2021-06-10T23:55:50.000Z | gunnery/task/forms.py | dholdaway/gunnery | 87664986cd8bf1a17a9cbfb98fb1012cef9adaec | [
"Apache-2.0"
] | null | null | null | from django.forms import *
from django.forms.widgets import Textarea, SelectMultiple, HiddenInput
from django.forms.models import modelformset_factory, BaseModelFormSet
from django.core.exceptions import ValidationError
from django.db import models
from crispy_forms.helper import FormHelper
from crispy_forms.layout import *
from .models import *
from core.forms import PageForm, TagSelect, create_form
class TaskForm(PageForm):
class Meta:
model = Task
fields = ['name', 'description', 'application']
widgets = {'description': Textarea(attrs={'rows': 2}),
'application': HiddenInput() }
class TaskParameterForm(ModelForm):
class Meta:
model = TaskParameter
fields = ['name', 'description']
widgets = {'description': TextInput() }
class TaskCommandForm(ModelForm):
class Meta:
model = TaskCommand
fields = ['command', 'roles']
widgets = {'roles': TagSelect(attrs={'data-placeholder': 'Roles'}) }
class ExecutionForm(ModelForm):
class Meta:
model = Execution
fields = ['environment']
class ExecutionParameterForm(ModelForm):
class Meta:
model = ExecutionParameter
fields = ['name', 'value']
class RequireFirst(BaseModelFormSet):
def clean(self, *args, **kwargs):
super(RequireFirst, self).clean()
has_one = False
for form in self.forms:
if 'command' in form.cleaned_data and form.cleaned_data['DELETE']==False :
has_one = True
if not has_one:
raise ValidationError('At least one command must be specified')
TaskParameterFormset = modelformset_factory(TaskParameter,
form=TaskParameterForm,
can_order=True,
can_delete=True,
extra=1)
TaskCommandFormset = modelformset_factory(TaskCommand,
form=TaskCommandForm,
can_order=True,
can_delete=True,
extra=2,
formset=RequireFirst)
def task_create_form(name, request, id, args={}):
form_objects = {
'task': TaskForm,
}
return create_form(form_objects, name, request, id, args) | 32.507463 | 87 | 0.652893 | from django.forms import *
from django.forms.widgets import Textarea, SelectMultiple, HiddenInput
from django.forms.models import modelformset_factory, BaseModelFormSet
from django.core.exceptions import ValidationError
from django.db import models
from crispy_forms.helper import FormHelper
from crispy_forms.layout import *
from .models import *
from core.forms import PageForm, TagSelect, create_form
class TaskForm(PageForm):
class Meta:
model = Task
fields = ['name', 'description', 'application']
widgets = {'description': Textarea(attrs={'rows': 2}),
'application': HiddenInput() }
class TaskParameterForm(ModelForm):
class Meta:
model = TaskParameter
fields = ['name', 'description']
widgets = {'description': TextInput() }
class TaskCommandForm(ModelForm):
class Meta:
model = TaskCommand
fields = ['command', 'roles']
widgets = {'roles': TagSelect(attrs={'data-placeholder': 'Roles'}) }
class ExecutionForm(ModelForm):
class Meta:
model = Execution
fields = ['environment']
class ExecutionParameterForm(ModelForm):
class Meta:
model = ExecutionParameter
fields = ['name', 'value']
class RequireFirst(BaseModelFormSet):
def clean(self, *args, **kwargs):
super(RequireFirst, self).clean()
has_one = False
for form in self.forms:
if 'command' in form.cleaned_data and form.cleaned_data['DELETE']==False :
has_one = True
if not has_one:
raise ValidationError('At least one command must be specified')
TaskParameterFormset = modelformset_factory(TaskParameter,
form=TaskParameterForm,
can_order=True,
can_delete=True,
extra=1)
TaskCommandFormset = modelformset_factory(TaskCommand,
form=TaskCommandForm,
can_order=True,
can_delete=True,
extra=2,
formset=RequireFirst)
def task_create_form(name, request, id, args={}):
form_objects = {
'task': TaskForm,
}
return create_form(form_objects, name, request, id, args) | true | true |
f7fce09ddc8c9c02aff75f9bc6492a310f90bd8c | 81,526 | py | Python | django/contrib/admin/options.py | sublime1809/django | 9a5fe5b29fd431431a53da63ad8825d878ee5878 | [
"BSD-3-Clause"
] | 2 | 2015-01-21T15:45:07.000Z | 2015-02-21T02:38:13.000Z | django/contrib/admin/options.py | HenriqueLR/django | d1ca70110f49f0be90206c8da516ac16aebc8c75 | [
"BSD-3-Clause"
] | null | null | null | django/contrib/admin/options.py | HenriqueLR/django | d1ca70110f49f0be90206c8da516ac16aebc8c75 | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict
import copy
import operator
from functools import partial, reduce, update_wrapper
import warnings
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import widgets, helpers
from django.contrib.admin import validation
from django.contrib.admin.checks import (BaseModelAdminChecks, ModelAdminChecks,
InlineModelAdminChecks)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.utils import (quote, unquote, flatten_fieldsets,
get_deleted_objects, model_format_dict, NestedObjects,
lookup_needs_distinct)
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.auth import get_permission_codename
from django.core import checks
from django.core.exceptions import (PermissionDenied, ValidationError,
FieldError, ImproperlyConfigured)
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db import models, transaction, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.related import RelatedObject
from django.db.models.fields import BLANK_CHOICE_DASH, FieldDoesNotExist
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.formsets import all_valid, DELETION_FIELD_NAME
from django.forms.models import (modelform_factory, modelformset_factory,
inlineformset_factory, BaseInlineFormSet, modelform_defines_fields)
from django.forms.widgets import SelectMultiple, CheckboxSelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.shortcuts import get_object_or_404
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, escapejs
from django.utils.http import urlencode
from django.utils.text import capfirst, get_text_list
from django.utils.translation import string_concat
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
# Validation of ModelAdmin definitions
# Old, deprecated style:
validator_class = None
default_validator_class = validation.BaseValidator
# New style:
checks_class = BaseModelAdminChecks
@classmethod
def validate(cls, model):
warnings.warn(
'ModelAdmin.validate() is deprecated. Use "check()" instead.',
RemovedInDjango19Warning)
if cls.validator_class:
validator = cls.validator_class()
else:
validator = cls.default_validator_class()
validator.validate(cls, model)
@classmethod
def check(cls, model, **kwargs):
if cls.validator_class:
warnings.warn(
'ModelAdmin.validator_class is deprecated. '
'ModeAdmin validators must be converted to use '
'the system check framework.',
RemovedInDjango19Warning)
validator = cls.validator_class()
try:
validator.validate(cls, model)
except ImproperlyConfigured as e:
return [checks.Error(e.args[0], hint=None, obj=cls)]
else:
return []
else:
return cls.checks_class().check(cls, model, **kwargs)
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
can_add_related = bool(related_modeladmin and
related_modeladmin.has_add_permission(request))
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.rel, self.admin_site,
can_add_related=can_add_related)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(returns None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.rel.to, None)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.rel.to._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel,
self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.rel.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel,
self.admin_site, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = string_concat(help_text, ' ', msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
@property
def declared_fieldsets(self):
warnings.warn(
"ModelAdmin.declared_fieldsets is deprecated and "
"will be removed in Django 1.9.",
RemovedInDjango19Warning, stacklevel=2
)
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
# We access the property and check if it triggers a warning.
# If it does, then it's ours and we can safely ignore it, but if
# it doesn't then it has been overridden so we must warn about the
# deprecation.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
declared_fieldsets = self.declared_fieldsets
if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning):
warnings.warn(
"ModelAdmin.declared_fieldsets is deprecated and "
"will be removed in Django 1.9.",
RemovedInDjango19Warning
)
if declared_fieldsets:
return declared_fieldsets
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(l):
l = l()
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specifically included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field, _, _, _ = model._meta.get_field_by_name(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
if field.rel is None:
# This property or relation doesn't exist, but it's allowed
# since it's ignored in ChangeList.get_filters().
return True
model = field.rel.to
rel_name = field.rel.get_related_field().name
elif isinstance(field, RelatedObject):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
"""
Returns True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
for related_object in (opts.get_all_related_objects(include_hidden=True) +
opts.get_all_related_many_to_many_objects()):
related_model = related_object.model
if (any(issubclass(model, related_model) for model in registered_models) and
related_object.field.rel.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Returns True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
# validation
# Old, deprecated style:
default_validator_class = validation.ModelAdminValidator
# New style:
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info),
]
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'admin/RelatedObjectLookups.js',
'jquery%s.js' % extra,
'jquery.init.js'
]
if self.actions is not None:
js.append('actions%s.js' % extra)
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id):
"""
Returns an instance matching the primary key provided. ``None`` is
returned if no match is found (or the object_id failed validation
against the primary key field).
"""
queryset = self.get_queryset(request)
model = queryset.model
try:
object_id = model._meta.pk.to_python(object_id)
return queryset.get(pk=object_id)
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if (defaults.get('fields') is None
and not modelform_defines_fields(defaults.get('form'))):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def _get_formsets(self, request, obj):
"""
Helper function that exists to allow the deprecation warning to be
executed while this function continues to return a generator.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj)
def get_formsets(self, request, obj=None):
warnings.warn(
"ModelAdmin.get_formsets() is deprecated and will be removed in "
"Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.",
RemovedInDjango19Warning, stacklevel=2
)
return self._get_formsets(request, obj)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yields formsets and the corresponding inlines.
"""
# We call get_formsets() [deprecated] and check if it triggers a
# warning. If it does, then it's ours and we can safely ignore it, but
# if it doesn't then it has been overridden so we must warn about the
# deprecation.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
formsets = self.get_formsets(request, obj)
if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning):
warnings.warn(
"ModelAdmin.get_formsets() is deprecated and will be removed in "
"Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.",
RemovedInDjango19Warning
)
if formsets:
zipped = zip(formsets, self.get_inline_instances(request, None))
for formset, inline in zipped:
yield formset, inline
else:
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_search_fields(self, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets):
"""
Construct a change message from a changed object.
"""
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError('Bad message level string: `%s`. '
'Possible values are: %s' % (level, levels_repr))
messages.add_message(request, level, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context, current_app=self.admin_site.name)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'pk_value': escape(pk_value), # for possible backwards-compatibility
'value': escape(value),
'obj': escapejs(obj)
})
elif "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name)
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display):
"""
Determines the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
self.message_user(request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display)
}, messages.SUCCESS)
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
return TemplateResponse(request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html"
], context, current_app=self.admin_site.name)
def get_inline_formsets(self, request, formsets, inline_instances,
obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data.
Unless overridden, this populates from the GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except models.FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url=reverse('admin:%s_%s_add' % (
opts.app_label, opts.model_name),
current_app=self.admin_site.name))
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
if add:
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, self.model())
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = dict(
self.admin_site.each_context(),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, current_app=self.admin_site.name)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
obj = self.get_object(request, unquote(object_id))
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = get_object_or_404(self.get_queryset(request), pk=unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(self.admin_site.each_context(),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context, current_app=self.admin_site.name)
def _create_formsets(self, request, obj):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if obj.pk:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra, 'jquery.init.js', 'inlines%s.js' % extra]
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.rel and field.rel.to != self.parent_model:
opts = field.rel.to._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| 42.32918 | 119 | 0.615889 | from collections import OrderedDict
import copy
import operator
from functools import partial, reduce, update_wrapper
import warnings
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import widgets, helpers
from django.contrib.admin import validation
from django.contrib.admin.checks import (BaseModelAdminChecks, ModelAdminChecks,
InlineModelAdminChecks)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.utils import (quote, unquote, flatten_fieldsets,
get_deleted_objects, model_format_dict, NestedObjects,
lookup_needs_distinct)
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.auth import get_permission_codename
from django.core import checks
from django.core.exceptions import (PermissionDenied, ValidationError,
FieldError, ImproperlyConfigured)
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db import models, transaction, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.related import RelatedObject
from django.db.models.fields import BLANK_CHOICE_DASH, FieldDoesNotExist
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.formsets import all_valid, DELETION_FIELD_NAME
from django.forms.models import (modelform_factory, modelformset_factory,
inlineformset_factory, BaseInlineFormSet, modelform_defines_fields)
from django.forms.widgets import SelectMultiple, CheckboxSelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.shortcuts import get_object_or_404
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, escapejs
from django.utils.http import urlencode
from django.utils.text import capfirst, get_text_list
from django.utils.translation import string_concat
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
# Validation of ModelAdmin definitions
# Old, deprecated style:
validator_class = None
default_validator_class = validation.BaseValidator
# New style:
checks_class = BaseModelAdminChecks
@classmethod
def validate(cls, model):
warnings.warn(
'ModelAdmin.validate() is deprecated. Use "check()" instead.',
RemovedInDjango19Warning)
if cls.validator_class:
validator = cls.validator_class()
else:
validator = cls.default_validator_class()
validator.validate(cls, model)
@classmethod
def check(cls, model, **kwargs):
if cls.validator_class:
warnings.warn(
'ModelAdmin.validator_class is deprecated. '
'ModeAdmin validators must be converted to use '
'the system check framework.',
RemovedInDjango19Warning)
validator = cls.validator_class()
try:
validator.validate(cls, model)
except ImproperlyConfigured as e:
return [checks.Error(e.args[0], hint=None, obj=cls)]
else:
return []
else:
return cls.checks_class().check(cls, model, **kwargs)
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
can_add_related = bool(related_modeladmin and
related_modeladmin.has_add_permission(request))
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.rel, self.admin_site,
can_add_related=can_add_related)
return formfield
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
if db_field.name in self.radio_fields:
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
related_admin = self.admin_site._registry.get(db_field.rel.to, None)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.rel.to._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel,
self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if not db_field.rel.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel,
self.admin_site, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = string_concat(help_text, ' ', msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
@property
def declared_fieldsets(self):
warnings.warn(
"ModelAdmin.declared_fieldsets is deprecated and "
"will be removed in Django 1.9.",
RemovedInDjango19Warning, stacklevel=2
)
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
def get_fields(self, request, obj=None):
return self.fields
def get_fieldsets(self, request, obj=None):
# it doesn't then it has been overridden so we must warn about the
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
declared_fieldsets = self.declared_fieldsets
if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning):
warnings.warn(
"ModelAdmin.declared_fieldsets is deprecated and "
"will be removed in Django 1.9.",
RemovedInDjango19Warning
)
if declared_fieldsets:
return declared_fieldsets
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
return self.ordering or ()
def get_readonly_fields(self, request, obj=None):
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
return self.prepopulated_fields
def get_queryset(self, request):
qs = self.model._default_manager.get_queryset()
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
for l in model._meta.related_fkey_lookups:
if callable(l):
l = l()
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
rel_name = None
for part in parts[:-1]:
try:
field, _, _, _ = model._meta.get_field_by_name(part)
except FieldDoesNotExist:
# later.
return True
if hasattr(field, 'rel'):
if field.rel is None:
# This property or relation doesn't exist, but it's allowed
# since it's ignored in ChangeList.get_filters().
return True
model = field.rel.to
rel_name = field.rel.get_related_field().name
elif isinstance(field, RelatedObject):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
for related_object in (opts.get_all_related_objects(include_hidden=True) +
opts.get_all_related_many_to_many_objects()):
related_model = related_object.model
if (any(issubclass(model, related_model) for model in registered_models) and
related_object.field.rel.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
default_validator_class = validation.ModelAdminValidator
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info),
]
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'admin/RelatedObjectLookups.js',
'jquery%s.js' % extra,
'jquery.init.js'
]
if self.actions is not None:
js.append('actions%s.js' % extra)
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_model_perms(self, request):
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
def get_changelist(self, request, **kwargs):
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id):
queryset = self.get_queryset(request)
model = queryset.model
try:
object_id = model._meta.pk.to_python(object_id)
return queryset.get(pk=object_id)
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if (defaults.get('fields') is None
and not modelform_defines_fields(defaults.get('form'))):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def _get_formsets(self, request, obj):
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj)
def get_formsets(self, request, obj=None):
warnings.warn(
"ModelAdmin.get_formsets() is deprecated and will be removed in "
"Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.",
RemovedInDjango19Warning, stacklevel=2
)
return self._get_formsets(request, obj)
def get_formsets_with_inlines(self, request, obj=None):
# if it doesn't then it has been overridden so we must warn about the
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
formsets = self.get_formsets(request, obj)
if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning):
warnings.warn(
"ModelAdmin.get_formsets() is deprecated and will be removed in "
"Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.",
RemovedInDjango19Warning
)
if formsets:
zipped = zip(formsets, self.get_inline_instances(request, None))
for formset, inline in zipped:
yield formset, inline
else:
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object):
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION
)
def log_change(self, request, object, message):
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message
)
def log_deletion(self, request, object, object_repr):
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION
)
def action_checkbox(self, obj):
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_actions(self, request):
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
return self.list_display
def get_list_display_links(self, request, list_display):
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
return self.list_filter
def get_search_fields(self, request):
return self.search_fields
def get_search_results(self, request, queryset, search_term):
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets):
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError('Bad message level string: `%s`. '
'Possible values are: %s' % (level, levels_repr))
messages.add_message(request, level, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def save_form(self, request, form, change):
return form.save(commit=False)
def save_model(self, request, obj, form, change):
obj.save()
def delete_model(self, request, obj):
obj.delete()
def save_formset(self, request, form, formset, change):
formset.save()
def save_related(self, request, form, formsets, change):
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context, current_app=self.admin_site.name)
def response_add(self, request, obj, post_url_continue=None):
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'pk_value': escape(pk_value), # for possible backwards-compatibility
'value': escape(value),
'obj': escapejs(obj)
})
elif "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name)
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display):
opts = self.model._meta
self.message_user(request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display)
}, messages.SUCCESS)
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
return TemplateResponse(request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html"
], context, current_app=self.admin_site.name)
def get_inline_formsets(self, request, formsets, inline_instances,
obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url=reverse('admin:%s_%s_add' % (
opts.app_label, opts.model_name),
current_app=self.admin_site.name))
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
if add:
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, self.model())
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
actions = self.get_actions(request)
if actions:
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = dict(
self.admin_site.each_context(),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, current_app=self.admin_site.name)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context=None):
opts = self.model._meta
app_label = opts.app_label
obj = self.get_object(request, unquote(object_id))
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = get_object_or_404(self.get_queryset(request), pk=unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(self.admin_site.each_context(),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context, current_app=self.admin_site.name)
def _create_formsets(self, request, obj):
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if obj.pk:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra, 'jquery.init.js', 'inlines%s.js' % extra]
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_extra(self, request, obj=None, **kwargs):
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# which doesn't have its own individual permissions. The user needs
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
for field in opts.fields:
if field.rel and field.rel.to != self.parent_model:
opts = field.rel.to._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# which doesn't have its own individual permissions. The user needs
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| true | true |
f7fce0c8d36f9c28a3bc3fff390f8112165c8f69 | 7,508 | py | Python | tools/format/path_generator_test.py | ChrisCummins/format | d42b4dafcd7c4b187311473f1b446e0ca1988b12 | [
"Apache-2.0"
] | null | null | null | tools/format/path_generator_test.py | ChrisCummins/format | d42b4dafcd7c4b187311473f1b446e0ca1988b12 | [
"Apache-2.0"
] | 12 | 2020-01-12T11:55:03.000Z | 2020-01-17T01:00:23.000Z | tools/format/path_generator_test.py | ChrisCummins/format | d42b4dafcd7c4b187311473f1b446e0ca1988b12 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for //tools/format:path_generator."""
import os
import pathlib
from labm8.py import fs
from labm8.py import test
from tools.format import path_generator as path_generator_lib
FLAGS = test.FLAGS
@test.Fixture(scope="function")
def path_generator():
return path_generator_lib.PathGenerator(".formatignore")
def MakeFiles(relpaths):
"""Create the given list of paths relative to the current directory."""
for path in relpaths:
path = pathlib.Path(path)
path.parent.mkdir(exist_ok=True, parents=True)
path.touch()
def test_GeneratePaths_non_existent_path(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
paths = list(path_generator.GeneratePaths([str(tempdir / "not_a_path")]))
assert paths == []
def test_GeneratePaths_single_abspath(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / "hello.txt",]
)
paths = list(path_generator.GeneratePaths([str(tempdir / "hello.txt")]))
assert paths == [tempdir / "hello.txt"]
def test_GeneratePaths_single_relpath(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[tempdir / "hello.txt",]
)
paths = list(path_generator.GeneratePaths(["hello.txt"]))
assert paths == [
tempdir / "hello.txt",
]
def test_GeneratePaths_empty_directory(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == []
def test_GeneratePaths_directory_with_file(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / "a",]
)
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == [
tempdir / "a",
]
def test_GeneratePaths_file_in_ignore_list(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / ".formatignore", tempdir / "a",]
)
fs.Write(tempdir / ".formatignore", "a".encode("utf-8"))
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == [tempdir / ".formatignore"]
def test_GeneratePaths_ignore_list_glob(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / ".formatignore", tempdir / "a",]
)
fs.Write(tempdir / ".formatignore", "*".encode("utf-8"))
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == [tempdir / ".formatignore"]
def test_GeneratePaths_ignore_list_glob_dot_files(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / ".formatignore", tempdir / "a",]
)
fs.Write(tempdir / ".formatignore", ".*".encode("utf-8"))
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == [
tempdir / "a",
]
def test_GeneratePaths_ignore_list_glob_unignored(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[".formatignore", "a", "b", "c",]
)
fs.Write(".formatignore", "*\n!a".encode("utf-8"))
paths = list(path_generator.GeneratePaths(["."]))
assert paths == [
tempdir / ".formatignore",
tempdir / "a",
]
def test_GeneratePaths_ignore_list_parent_directory(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
"""Test ignoring the contents of a directory."""
os.chdir(tempdir)
MakeFiles(
[".formatignore", "src/a", "src/b",]
)
fs.Write(".formatignore", "src".encode("utf-8"))
paths = list(path_generator.GeneratePaths(["."]))
assert paths == [tempdir / ".formatignore"]
def test_GeneratePaths_ignore_list_glob_parent_directory(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
"""Test ignoring the contents of a directory.
File layout:
/.formatignore -> "*"
/src
/src/a
"""
os.chdir(tempdir)
MakeFiles(
[".formatignore", "src/a", "src/b",]
)
fs.Write(".formatignore", "*".encode("utf-8"))
paths = list(path_generator.GeneratePaths(["."]))
assert paths == [tempdir / ".formatignore"]
def test_GeneratePaths_ignore_list_recurisve_glob(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
"""Test ignoring files in a recursive glob."""
os.chdir(tempdir)
MakeFiles(
[".formatignore", "src/a", "src/b", "src/c/a/a", "src/c/a/b",]
)
fs.Write(".formatignore", "**/a".encode("utf-8"))
paths = list(path_generator.GeneratePaths(["."]))
print(paths)
assert paths == [
tempdir / ".formatignore",
tempdir / "src/b",
]
def test_GeneratePaths_ignore_git_submodule(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
"""Test that git submodules are not visited."""
os.chdir(tempdir)
MakeFiles(
[
".git/config", # Fake repo root, should be ignored
"README",
"src/a",
"src/b",
"src/c/d",
"src/submod/.git", # Fake submodule, should be ignored
"src/submod/a", # should be ignored
"src/submod/b", # should be ignored
"src/submod/c/c", # should be ignored
]
)
paths = set(path_generator.GeneratePaths(["."]))
assert paths == {
tempdir / "README",
tempdir / "src/a",
tempdir / "src/b",
tempdir / "src/c/d",
}
def test_GeneratePaths_explicitly_requested_submodule(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
"""Test that a git submodule is visited if it is explicitly asked for."""
os.chdir(tempdir)
MakeFiles(
[
".git/config", # Fake repo root, should be ignored
"README",
"src/a",
"src/b",
"src/c/d",
"src/submod/.git", # Fake submodule, should be ignored
"src/submod/a", # should be ignored
"src/submod/b", # should be ignored
"src/submod/c/c", # should be ignored
]
)
paths = set(path_generator.GeneratePaths(["src/submod"]))
assert paths == {
tempdir / "src/submod/a",
tempdir / "src/submod/b",
tempdir / "src/submod/c/c",
}
def test_GeneratePaths_ignored_in_glob_expansion(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
"""Test that a git submodule is not visited if it would only be visited as
the result of a glob expansion.
"""
os.chdir(tempdir)
MakeFiles(
[
".git/config", # Fake repo root, should be ignored
"README",
"src/a",
"src/b",
"src/c/d",
"src/submod/.git", # Fake submodule, should be ignored
"src/submod/a", # should be ignored
"src/submod/b", # should be ignored
"src/submod/c/c", # should be ignored
]
)
paths = list(path_generator.GeneratePaths(["src/*"]))
assert paths == [
tempdir / "src/a",
tempdir / "src/b",
tempdir / "src/c/d",
]
if __name__ == "__main__":
test.Main()
| 25.537415 | 76 | 0.681273 |
import os
import pathlib
from labm8.py import fs
from labm8.py import test
from tools.format import path_generator as path_generator_lib
FLAGS = test.FLAGS
@test.Fixture(scope="function")
def path_generator():
return path_generator_lib.PathGenerator(".formatignore")
def MakeFiles(relpaths):
for path in relpaths:
path = pathlib.Path(path)
path.parent.mkdir(exist_ok=True, parents=True)
path.touch()
def test_GeneratePaths_non_existent_path(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
paths = list(path_generator.GeneratePaths([str(tempdir / "not_a_path")]))
assert paths == []
def test_GeneratePaths_single_abspath(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / "hello.txt",]
)
paths = list(path_generator.GeneratePaths([str(tempdir / "hello.txt")]))
assert paths == [tempdir / "hello.txt"]
def test_GeneratePaths_single_relpath(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[tempdir / "hello.txt",]
)
paths = list(path_generator.GeneratePaths(["hello.txt"]))
assert paths == [
tempdir / "hello.txt",
]
def test_GeneratePaths_empty_directory(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == []
def test_GeneratePaths_directory_with_file(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / "a",]
)
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == [
tempdir / "a",
]
def test_GeneratePaths_file_in_ignore_list(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / ".formatignore", tempdir / "a",]
)
fs.Write(tempdir / ".formatignore", "a".encode("utf-8"))
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == [tempdir / ".formatignore"]
def test_GeneratePaths_ignore_list_glob(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / ".formatignore", tempdir / "a",]
)
fs.Write(tempdir / ".formatignore", "*".encode("utf-8"))
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == [tempdir / ".formatignore"]
def test_GeneratePaths_ignore_list_glob_dot_files(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
MakeFiles(
[tempdir / ".formatignore", tempdir / "a",]
)
fs.Write(tempdir / ".formatignore", ".*".encode("utf-8"))
paths = list(path_generator.GeneratePaths([str(tempdir)]))
assert paths == [
tempdir / "a",
]
def test_GeneratePaths_ignore_list_glob_unignored(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[".formatignore", "a", "b", "c",]
)
fs.Write(".formatignore", "*\n!a".encode("utf-8"))
paths = list(path_generator.GeneratePaths(["."]))
assert paths == [
tempdir / ".formatignore",
tempdir / "a",
]
def test_GeneratePaths_ignore_list_parent_directory(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[".formatignore", "src/a", "src/b",]
)
fs.Write(".formatignore", "src".encode("utf-8"))
paths = list(path_generator.GeneratePaths(["."]))
assert paths == [tempdir / ".formatignore"]
def test_GeneratePaths_ignore_list_glob_parent_directory(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[".formatignore", "src/a", "src/b",]
)
fs.Write(".formatignore", "*".encode("utf-8"))
paths = list(path_generator.GeneratePaths(["."]))
assert paths == [tempdir / ".formatignore"]
def test_GeneratePaths_ignore_list_recurisve_glob(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[".formatignore", "src/a", "src/b", "src/c/a/a", "src/c/a/b",]
)
fs.Write(".formatignore", "**/a".encode("utf-8"))
paths = list(path_generator.GeneratePaths(["."]))
print(paths)
assert paths == [
tempdir / ".formatignore",
tempdir / "src/b",
]
def test_GeneratePaths_ignore_git_submodule(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[
".git/config",
"README",
"src/a",
"src/b",
"src/c/d",
"src/submod/.git",
"src/submod/a",
"src/submod/b",
"src/submod/c/c",
]
)
paths = set(path_generator.GeneratePaths(["."]))
assert paths == {
tempdir / "README",
tempdir / "src/a",
tempdir / "src/b",
tempdir / "src/c/d",
}
def test_GeneratePaths_explicitly_requested_submodule(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[
".git/config",
"README",
"src/a",
"src/b",
"src/c/d",
"src/submod/.git",
"src/submod/a",
"src/submod/b",
"src/submod/c/c",
]
)
paths = set(path_generator.GeneratePaths(["src/submod"]))
assert paths == {
tempdir / "src/submod/a",
tempdir / "src/submod/b",
tempdir / "src/submod/c/c",
}
def test_GeneratePaths_ignored_in_glob_expansion(
path_generator: path_generator_lib.PathGenerator, tempdir: pathlib.Path
):
os.chdir(tempdir)
MakeFiles(
[
".git/config",
"README",
"src/a",
"src/b",
"src/c/d",
"src/submod/.git",
"src/submod/a",
"src/submod/b",
"src/submod/c/c",
]
)
paths = list(path_generator.GeneratePaths(["src/*"]))
assert paths == [
tempdir / "src/a",
tempdir / "src/b",
tempdir / "src/c/d",
]
if __name__ == "__main__":
test.Main()
| true | true |
f7fce1ad405e292cbbfdc980a20a50c380185185 | 13,883 | py | Python | airflow/providers/salesforce/hooks/salesforce.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 3 | 2015-08-25T13:56:44.000Z | 2020-03-21T10:26:58.000Z | airflow/providers/salesforce/hooks/salesforce.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 37 | 2020-07-21T07:50:02.000Z | 2022-03-29T22:31:28.000Z | airflow/providers/salesforce/hooks/salesforce.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 4 | 2020-07-17T14:02:28.000Z | 2022-02-23T04:29:58.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Salesforce Hook which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file for other uses.
.. note:: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
import logging
import time
import pandas as pd
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
log = logging.getLogger(__name__)
class SalesforceHook(BaseHook):
"""
Create new connection to Salesforce and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other Airflow operators to move the data into another data source.
:param conn_id: the name of the connection that has the parameters we need to connect to Salesforce.
The connection should be type `http` and include a user's security token in the `Extras` field.
:type conn_id: str
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as `{"security_token":"YOUR_SECURITY_TOKEN"}`
For sandbox mode, add `{"domain":"test"}` in the `Extras` field
"""
def __init__(self, conn_id):
super().__init__()
self.conn_id = conn_id
self.conn = None
def get_conn(self):
"""
Sign into Salesforce, only if we are not already signed in.
"""
if not self.conn:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
self.conn = Salesforce(
username=connection.login,
password=connection.password,
security_token=extras['security_token'],
instance_url=connection.host,
domain=extras.get('domain', None)
)
return self.conn
def make_query(self, query, include_deleted=False, query_params=None):
"""
Make a query to Salesforce.
:param query: The query to make to Salesforce.
:type query: str
:param include_deleted: True if the query should include deleted records.
:type include_deleted: bool
:param query_params: Additional optional arguments
:type query_params: dict
:return: The query result.
:rtype: dict
"""
conn = self.get_conn()
self.log.info("Querying for all objects")
query_params = query_params or {}
query_results = conn.query_all(query, include_deleted=include_deleted, **query_params)
self.log.info("Received results: Total size: %s; Done: %s",
query_results['totalSize'], query_results['done'])
return query_results
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema and
some extra metadata that Salesforce stores for each object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type obj: str
:return: the description of the Salesforce object.
:rtype: dict
"""
conn = self.get_conn()
return conn.__getattr__(obj).describe()
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type obj: str
:return: the names of the fields.
:rtype: list(str)
"""
self.get_conn()
obj_description = self.describe_object(obj)
return [field['name'] for field in obj_description['fields']]
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
:param obj: The object name to get from Salesforce.
:type obj: str
:param fields: The fields to get from the object.
:type fields: iterable
:return: all instances of the object from Salesforce.
:rtype: dict
"""
query = "SELECT {} FROM {}".format(",".join(fields), obj)
self.log.info("Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]]))
return self.make_query(query)
@classmethod
def _to_timestamp(cls, column):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param column: A Series object representing a column of a dataframe.
:type column: pandas.Series
:return: a new series that maintains the same index as the original
:rtype: pandas.Series
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
column = pd.to_datetime(column)
except ValueError:
log.error("Could not convert field to timestamps: %s", column.name)
return column
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for value in column:
try:
converted.append(value.timestamp())
except (ValueError, AttributeError):
converted.append(pd.np.NaN)
return pd.Series(converted, index=column.index)
def write_object_to_file(self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:type query_results: list of dict
:param filename: the name of the file where the data should be dumped to
:type filename: str
:param fmt: the format you want the output in. Default: 'csv'
:type fmt: str
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:type coerce_to_timestamp: bool
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:type record_time_added: bool
:return: the dataframe that gets written to the file.
:rtype: pandas.Dataframe
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {}".format(fmt))
df = self.object_to_df(query_results=query_results, coerce_to_timestamp=coerce_to_timestamp,
record_time_added=record_time_added)
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].astype(str).apply(
lambda x: x.str.replace("\r\n", "").str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
def object_to_df(self, query_results, coerce_to_timestamp=False,
record_time_added=False):
"""
Export query results to dataframe.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:type query_results: list of dict
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:type coerce_to_timestamp: bool
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:type record_time_added: bool
:return: the dataframe.
:rtype: pandas.Dataframe
"""
# this line right here will convert all integers to floats
# if there are any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be converted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
field['name'].lower()
for field in schema['fields']
if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
return df
| 42.32622 | 110 | 0.64064 |
import logging
import time
import pandas as pd
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
log = logging.getLogger(__name__)
class SalesforceHook(BaseHook):
def __init__(self, conn_id):
super().__init__()
self.conn_id = conn_id
self.conn = None
def get_conn(self):
if not self.conn:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
self.conn = Salesforce(
username=connection.login,
password=connection.password,
security_token=extras['security_token'],
instance_url=connection.host,
domain=extras.get('domain', None)
)
return self.conn
def make_query(self, query, include_deleted=False, query_params=None):
conn = self.get_conn()
self.log.info("Querying for all objects")
query_params = query_params or {}
query_results = conn.query_all(query, include_deleted=include_deleted, **query_params)
self.log.info("Received results: Total size: %s; Done: %s",
query_results['totalSize'], query_results['done'])
return query_results
def describe_object(self, obj):
conn = self.get_conn()
return conn.__getattr__(obj).describe()
def get_available_fields(self, obj):
self.get_conn()
obj_description = self.describe_object(obj)
return [field['name'] for field in obj_description['fields']]
def get_object_from_salesforce(self, obj, fields):
query = "SELECT {} FROM {}".format(",".join(fields), obj)
self.log.info("Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]]))
return self.make_query(query)
@classmethod
def _to_timestamp(cls, column):
try:
column = pd.to_datetime(column)
except ValueError:
log.error("Could not convert field to timestamps: %s", column.name)
return column
converted = []
for value in column:
try:
converted.append(value.timestamp())
except (ValueError, AttributeError):
converted.append(pd.np.NaN)
return pd.Series(converted, index=column.index)
def write_object_to_file(self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False):
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {}".format(fmt))
df = self.object_to_df(query_results=query_results, coerce_to_timestamp=coerce_to_timestamp,
record_time_added=record_time_added)
if fmt == "csv":
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].astype(str).apply(
lambda x: x.str.replace("\r\n", "").str.replace("\n", "")
)
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
def object_to_df(self, query_results, coerce_to_timestamp=False,
record_time_added=False):
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
if coerce_to_timestamp and df.shape[0] > 0:
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be converted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
field['name'].lower()
for field in schema['fields']
if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
return df
| true | true |
f7fce2460b5ee2d246230f74c5f22cca15697c08 | 3,467 | py | Python | st2reactor/st2reactor/sensor/base.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | null | null | null | st2reactor/st2reactor/sensor/base.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 15 | 2021-02-11T22:58:54.000Z | 2021-08-06T18:03:47.000Z | st2reactor/st2reactor/sensor/base.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 1 | 2021-07-10T15:02:29.000Z | 2021-07-10T15:02:29.000Z | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import abc
import six
from st2common.util import concurrency
__all__ = [
'Sensor',
'PollingSensor'
]
@six.add_metaclass(abc.ABCMeta)
class BaseSensor(object):
"""
Base Sensor class - not to be instantiated directly.
"""
def __init__(self, sensor_service, config=None):
"""
:param sensor_service: Sensor Service instance.
:type sensor_service: :class:``st2reactor.container.sensor_wrapper.SensorService``
:keyword config: Sensor config.
:type config: ``dict`` or None
"""
self._sensor_service = sensor_service # Deprecate in the future
self.sensor_service = sensor_service
self._config = config or {} # Deprecate in the future
self.config = self._config
@abc.abstractmethod
def setup(self):
"""
Run the sensor initialization / setup code (if any).
"""
pass
@abc.abstractmethod
def run(self):
"""
Run the sensor.
"""
pass
@abc.abstractmethod
def cleanup(self):
"""
Run the sensor cleanup code (if any).
"""
pass
@abc.abstractmethod
def add_trigger(self, trigger):
"""
Runs when trigger is created
"""
pass
@abc.abstractmethod
def update_trigger(self, trigger):
"""
Runs when trigger is updated
"""
pass
@abc.abstractmethod
def remove_trigger(self, trigger):
"""
Runs when trigger is deleted
"""
pass
class Sensor(BaseSensor):
"""
Base class to be inherited from by the passive sensors.
"""
@abc.abstractmethod
def run(self):
pass
class PollingSensor(BaseSensor):
"""
Base class to be inherited from by the active sensors.
Active sensors periodically poll a 3rd party system for new information.
"""
def __init__(self, sensor_service, config=None, poll_interval=5):
super(PollingSensor, self).__init__(sensor_service=sensor_service, config=config)
self._poll_interval = poll_interval
@abc.abstractmethod
def poll(self):
"""
Poll 3rd party system for new information.
"""
pass
def run(self):
while True:
self.poll()
concurrency.sleep(self._poll_interval)
def get_poll_interval(self):
"""
Retrieve current poll interval.
:return: Current poll interval.
:rtype: ``float``
"""
return self._poll_interval
def set_poll_interval(self, poll_interval):
"""
Set the poll interval.
:param poll_interval: Poll interval to use.
:type poll_interval: ``float``
"""
self._poll_interval = poll_interval
| 24.415493 | 90 | 0.628786 |
from __future__ import absolute_import
import abc
import six
from st2common.util import concurrency
__all__ = [
'Sensor',
'PollingSensor'
]
@six.add_metaclass(abc.ABCMeta)
class BaseSensor(object):
def __init__(self, sensor_service, config=None):
self._sensor_service = sensor_service
self.sensor_service = sensor_service
self._config = config or {}
self.config = self._config
@abc.abstractmethod
def setup(self):
pass
@abc.abstractmethod
def run(self):
pass
@abc.abstractmethod
def cleanup(self):
pass
@abc.abstractmethod
def add_trigger(self, trigger):
pass
@abc.abstractmethod
def update_trigger(self, trigger):
pass
@abc.abstractmethod
def remove_trigger(self, trigger):
pass
class Sensor(BaseSensor):
@abc.abstractmethod
def run(self):
pass
class PollingSensor(BaseSensor):
def __init__(self, sensor_service, config=None, poll_interval=5):
super(PollingSensor, self).__init__(sensor_service=sensor_service, config=config)
self._poll_interval = poll_interval
@abc.abstractmethod
def poll(self):
pass
def run(self):
while True:
self.poll()
concurrency.sleep(self._poll_interval)
def get_poll_interval(self):
return self._poll_interval
def set_poll_interval(self, poll_interval):
self._poll_interval = poll_interval
| true | true |
f7fce3397fc4c51059e6c3820d6a3149533c5571 | 1,727 | py | Python | load-events.py | Pivotal-Data-Engineering/gemfire-samples | 141024d5505f1d8bcab15d5abb5bd8dbbf3944f3 | [
"BSD-3-Clause"
] | 1 | 2017-07-19T20:13:33.000Z | 2017-07-19T20:13:33.000Z | load-events.py | Pivotal-Data-Engineering/gemfire-samples | 141024d5505f1d8bcab15d5abb5bd8dbbf3944f3 | [
"BSD-3-Clause"
] | null | null | null | load-events.py | Pivotal-Data-Engineering/gemfire-samples | 141024d5505f1d8bcab15d5abb5bd8dbbf3944f3 | [
"BSD-3-Clause"
] | null | null | null | import httplib
import faker
import json
import os
import random
import time
REST_API_HOST='localhost'
REST_API_PORT=10183
REST_API_URL='/gemfire-api/v1/Event'
EVENT_TYPE = 'io.pivotal.pde.sample.Event'
count = 200
sleep = 1.0
headers = dict()
headers['Content-Type'] = 'application/json'
if __name__ == '__main__':
fake = faker.Factory.create()
conn = httplib.HTTPConnection(REST_API_HOST,REST_API_PORT)
try:
#conn.request('DELETE',REST_API_URL, None, headers )
#resp = conn.getresponse()
#if resp.status != 200:
# raise Exception('An error occurred while clearing the event region - REST API returned {0} {1}'.format(resp.status, resp.reason))
#resp.read()
#print 'cleared event region'
for i in range(count):
json_event = dict()
dt = fake.date_time_between(start_date="-1y", end_date="now")
json_event['@type'] = EVENT_TYPE
json_event['date'] = '{0:4d}-{1:02d}-{2:02d}'.format(dt.year, dt.month, dt.day)
json_event['type'] = random.choice(['A','B','C','D','E'])
json_event['state'] = fake.state_abbr()
json_event['count'] = random.randint(0,99)
jsonStr = json.dumps(json_event, indent=3)
print 'PUTTING' + os.linesep + jsonStr
conn.request('PUT',REST_API_URL + '/{0:06d}'.format(i), jsonStr, headers)
resp = conn.getresponse()
if resp.status != 200:
raise Exception('An error occurred while putting event - REST API returned {0} {1}'.format(resp.status, resp.reason))
resp.read()
if sleep > 0:
time.sleep(sleep)
finally:
conn.close()
| 30.839286 | 139 | 0.604517 | import httplib
import faker
import json
import os
import random
import time
REST_API_HOST='localhost'
REST_API_PORT=10183
REST_API_URL='/gemfire-api/v1/Event'
EVENT_TYPE = 'io.pivotal.pde.sample.Event'
count = 200
sleep = 1.0
headers = dict()
headers['Content-Type'] = 'application/json'
if __name__ == '__main__':
fake = faker.Factory.create()
conn = httplib.HTTPConnection(REST_API_HOST,REST_API_PORT)
try:
for i in range(count):
json_event = dict()
dt = fake.date_time_between(start_date="-1y", end_date="now")
json_event['@type'] = EVENT_TYPE
json_event['date'] = '{0:4d}-{1:02d}-{2:02d}'.format(dt.year, dt.month, dt.day)
json_event['type'] = random.choice(['A','B','C','D','E'])
json_event['state'] = fake.state_abbr()
json_event['count'] = random.randint(0,99)
jsonStr = json.dumps(json_event, indent=3)
print 'PUTTING' + os.linesep + jsonStr
conn.request('PUT',REST_API_URL + '/{0:06d}'.format(i), jsonStr, headers)
resp = conn.getresponse()
if resp.status != 200:
raise Exception('An error occurred while putting event - REST API returned {0} {1}'.format(resp.status, resp.reason))
resp.read()
if sleep > 0:
time.sleep(sleep)
finally:
conn.close()
| false | true |
f7fce37f8095709be46b3922bfc6eec9c08e922b | 2,362 | py | Python | sdc/sdc/filters.py | yandex-research/shifts | 12c8ca805ff4d18bdc1300611c318b264d79fdec | [
"Apache-2.0"
] | 156 | 2021-07-16T08:54:39.000Z | 2022-03-24T11:49:36.000Z | sdc/sdc/filters.py | yandex-research/shifts | 12c8ca805ff4d18bdc1300611c318b264d79fdec | [
"Apache-2.0"
] | 18 | 2021-07-21T14:02:46.000Z | 2022-02-26T04:07:12.000Z | sdc/sdc/filters.py | yandex-research/shifts | 12c8ca805ff4d18bdc1300611c318b264d79fdec | [
"Apache-2.0"
] | 41 | 2021-07-21T05:38:07.000Z | 2022-01-13T15:25:51.000Z | """
** Filtering Info **
To filter scenes by tags one should specify a filter function
Scene tags dict has following structure:
{
'day_time': one of {'kNight', 'kMorning', 'kAfternoon', 'kEvening'}
'season': one of {'kWinter', 'kSpring', 'kSummer', 'kAutumn'}
'track': one of {
'Moscow' , 'Skolkovo', 'Innopolis', 'AnnArbor', 'Modiin', 'TelAviv'}
'sun_phase': one of {'kAstronomicalNight', 'kTwilight', 'kDaylight'}
'precipitation': one of {'kNoPrecipitation', 'kRain', 'kSleet', 'kSnow'}
}
Full description of protobuf message is available at
tags.proto file in sources
** Split Configuration **
Training Data ('train')
'moscow__train': Moscow intersected with NO precipitation
Development Data ('development')
'moscow__development': Moscow intersected with NO precipitation
'ood__development': Skolkovo, Modiin, and Innopolis intersected with
(No precipitation, Rain and Snow)
Test Data ('test')
'moscow__test': Moscow intersected with NO precipitation
'ood__test': Ann-Arbor + Tel Aviv intersected with
(No precipitation, rain, snow and sleet)
"""
def filter_moscow_no_precipitation_data(scene_tags_dict):
"""
This will need to be further divided into train/validation/test splits.
"""
if (scene_tags_dict['track'] == 'Moscow' and
scene_tags_dict['precipitation'] == 'kNoPrecipitation'):
return True
else:
return False
def filter_ood_development_data(scene_tags_dict):
if (scene_tags_dict['track'] in ['Skolkovo', 'Modiin', 'Innopolis'] and
scene_tags_dict[
'precipitation'] in ['kNoPrecipitation', 'kRain', 'kSnow']):
return True
else:
return False
def filter_ood_evaluation_data(scene_tags_dict):
if (scene_tags_dict['track'] in ['AnnArbor', 'TelAviv'] and
scene_tags_dict[
'precipitation'] in ['kNoPrecipitation', 'kRain', 'kSnow']):
return True
else:
return False
DATASETS_TO_FILTERS = {
'train': {
'moscow__train': filter_moscow_no_precipitation_data
},
'development': {
'moscow__development': filter_moscow_no_precipitation_data,
'ood__development': filter_ood_development_data
},
'evaluation': {
'moscow__evaluation': filter_moscow_no_precipitation_data,
'ood__evaluation': filter_ood_evaluation_data
}
}
| 31.078947 | 76 | 0.685859 |
def filter_moscow_no_precipitation_data(scene_tags_dict):
if (scene_tags_dict['track'] == 'Moscow' and
scene_tags_dict['precipitation'] == 'kNoPrecipitation'):
return True
else:
return False
def filter_ood_development_data(scene_tags_dict):
if (scene_tags_dict['track'] in ['Skolkovo', 'Modiin', 'Innopolis'] and
scene_tags_dict[
'precipitation'] in ['kNoPrecipitation', 'kRain', 'kSnow']):
return True
else:
return False
def filter_ood_evaluation_data(scene_tags_dict):
if (scene_tags_dict['track'] in ['AnnArbor', 'TelAviv'] and
scene_tags_dict[
'precipitation'] in ['kNoPrecipitation', 'kRain', 'kSnow']):
return True
else:
return False
DATASETS_TO_FILTERS = {
'train': {
'moscow__train': filter_moscow_no_precipitation_data
},
'development': {
'moscow__development': filter_moscow_no_precipitation_data,
'ood__development': filter_ood_development_data
},
'evaluation': {
'moscow__evaluation': filter_moscow_no_precipitation_data,
'ood__evaluation': filter_ood_evaluation_data
}
}
| true | true |
f7fce549598dd86125163ad1467449cd8372b5e0 | 13,112 | py | Python | vega/algorithms/nas/esr_ea/esr_search.py | Lzc06/vega | 852d2f57e21caed11473ddc96397124561eacf8a | [
"MIT"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | vega/algorithms/nas/esr_ea/esr_search.py | Lzc06/vega | 852d2f57e21caed11473ddc96397124561eacf8a | [
"MIT"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/algorithms/nas/esr_ea/esr_search.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""search algorithm for ESR_EA."""
import csv
import logging
import os
from bisect import bisect_right
from random import random, sample
import numpy as np
import pandas as pd
from vega.core.common.general import General
from .conf import ESRConfig
from vega.core.common import FileOps
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.search_space.search_algs.search_algorithm import SearchAlgorithm
from .esr_ea_individual import ESRIndividual
@ClassFactory.register(ClassType.SEARCH_ALGORITHM)
class ESRSearch(SearchAlgorithm):
"""Evolutionary search algorithm of the efficient super-resolution."""
config = ESRConfig()
def __init__(self, search_space=None, **kwargs):
"""Construct the ESR EA search class.
:param search_space: config of the search space
:type search_space: dictionary
"""
super(ESRSearch, self).__init__(search_space, **kwargs)
self.individual_num = self.config.policy.num_individual
self.generation_num = self.config.policy.num_generation
self.elitism_num = self.config.policy.num_elitism
self.mutation_rate = self.config.policy.mutation_rate
self.min_active = self.config.range.min_active
self.max_params = self.config.range.max_params
self.min_params = self.config.range.min_params
self.indiv_count = 0
self.evolution_count = 0
self.initialize_pop()
self.elitism = [ESRIndividual(self.codec) for _ in range(self.elitism_num)]
self.elit_fitness = [0] * self.elitism_num
self.fitness_pop = [0] * self.individual_num
self.fit_state = [0] * self.individual_num
@property
def is_completed(self):
"""Tell whether the search process is completed.
:return: True is completed, or False otherwise
:rtype: bool
"""
return self.indiv_count > self.generation_num * self.individual_num
def update_fitness(self, evals):
"""Update the fitness of each individual.
:param evals: the evalution
:type evals: list
"""
for i in range(self.individual_num):
self.pop[i].update_fitness(evals[i])
def update_elitism(self, evaluations):
"""Update the elitism and its fitness.
:param evaluations: evaluations result
:type evaluations: list
"""
popu_all = [ESRIndividual(self.codec) for _ in range(self.elitism_num + self.individual_num)]
for i in range(self.elitism_num + self.individual_num):
if i < self.elitism_num:
popu_all[i].copy(self.elitism[i])
else:
popu_all[i].copy(self.pop[i - self.elitism_num])
fitness_all = self.elit_fitness + evaluations
sorted_ind = sorted(range(len(fitness_all)), key=lambda k: fitness_all[k])
for i in range(self.elitism_num):
self.elitism[i].copy(popu_all[sorted_ind[len(fitness_all) - 1 - i]])
self.elit_fitness[i] = fitness_all[sorted_ind[len(fitness_all) - 1 - i]]
logging.info('Generation: {}, updated elitism fitness: {}'.format(self.evolution_count, self.elit_fitness))
def _log_data(self, net_info_type='active_only', pop=None, value=0):
"""Get the evolution and network information of children.
:param net_info_type: defaults to 'active_only'
:type net_info_type: str
:param pop: defaults to None
:type pop: list
:param value: defaults to 0
:type value: int
:return: log_list
:rtype: list
"""
log_list = [value, pop.parameter, pop.flops]
if net_info_type == 'active_only':
log_list.append(pop.active_net_list())
elif net_info_type == 'full':
log_list += pop.gene.flatten().tolist()
else:
pass
return log_list
def save_results(self):
"""Save the results of evolution contains the information of pupulation and elitism."""
_path = FileOps.join_path(self.local_output_path, General.step_name)
FileOps.make_dir(_path)
arch_file = FileOps.join_path(_path, 'arch.txt')
arch_child = FileOps.join_path(_path, 'arch_child.txt')
sel_arch_file = FileOps.join_path(_path, 'selected_arch.npy')
sel_arch = []
with open(arch_file, 'a') as fw_a, open(arch_child, 'a') as fw_ac:
writer_a = csv.writer(fw_a, lineterminator='\n')
writer_ac = csv.writer(fw_ac, lineterminator='\n')
writer_ac.writerow(['Population Iteration: ' + str(self.evolution_count + 1)])
for c in range(self.individual_num):
writer_ac.writerow(
self._log_data(net_info_type='active_only', pop=self.pop[c],
value=self.pop[c].fitness))
writer_a.writerow(['Population Iteration: ' + str(self.evolution_count + 1)])
for c in range(self.elitism_num):
writer_a.writerow(self._log_data(net_info_type='active_only',
pop=self.elitism[c],
value=self.elit_fitness[c]))
sel_arch.append(self.elitism[c].gene)
sel_arch = np.stack(sel_arch)
np.save(sel_arch_file, sel_arch)
if self.backup_base_path is not None:
FileOps.copy_folder(self.local_output_path, self.backup_base_path)
def parent_select(self, parent_num=2, select_type='Tournament'):
"""Select parent from a population with Tournament or Roulette.
:param parent_num: number of parents
:type parent_num: int
:param select_type: select_type, defaults to 'Tournament'
:type select_type: str
:return: the selected parent individuals
:rtype: list
"""
popu_all = [ESRIndividual(self.codec) for _ in range(self.elitism_num + self.individual_num)]
parent = [ESRIndividual(self.codec) for _ in range(parent_num)]
fitness_all = self.elit_fitness
for i in range(self.elitism_num + self.individual_num):
if i < self.elitism_num:
popu_all[i].copy(self.elitism[i])
else:
popu_all[i].copy(self.pop[i - self.elitism_num])
fitness_all = fitness_all + [popu_all[i].fitness]
fitness_all = np.asarray(fitness_all)
if select_type == 'Tournament':
for i in range(parent_num):
tourn = sample(range(len(popu_all)), 2)
if fitness_all[tourn[0]] >= fitness_all[tourn[1]]:
parent[i].copy(popu_all[tourn[0]])
fitness_all[tourn[0]] = 0
else:
parent[i] = popu_all[tourn[1]]
fitness_all[tourn[1]] = 0
elif select_type == 'Roulette':
eval_submean = fitness_all - np.min(fitness_all)
eval_norm = eval_submean / sum(eval_submean)
eva_threshold = np.cumsum(eval_norm)
for i in range(parent_num):
ran = random()
selec_id = bisect_right(eva_threshold, ran)
parent[i].copy(popu_all[selec_id])
eval_submean[selec_id] = 0
eval_norm = eval_submean / sum(eval_submean)
eva_threshold = np.cumsum(eval_norm)
else:
logging.info('Wrong selection type')
return parent
def initialize_pop(self):
"""Initialize the population of first generation."""
self.pop = [ESRIndividual(self.codec) for _ in range(self.individual_num)]
for i in range(self.individual_num):
while self.pop[i].active_num < self.min_active:
self.pop[i].mutation_using(self.mutation_rate)
while self.pop[i].parameter > self.max_params or self.pop[i].parameter < self.min_params:
self.pop[i].mutation_node(self.mutation_rate)
def get_mutate_child(self, muta_num):
"""Generate the mutated children of the next offspring with mutation operation.
:param muta_num: number of mutated children
:type muta_num: int
"""
for i in range(muta_num):
if int(self.individual_num / 2) == len(self.elitism):
self.pop[i].copy(self.elitism[i])
else:
self.pop[i].copy(sample(self.elitism, 1)[0])
self.pop[i].mutation_using(self.mutation_rate)
while self.pop[i].active_num < self.min_active:
self.pop[i].mutation_using(self.mutation_rate)
self.pop[i].mutation_node(self.mutation_rate)
while self.pop[i].parameter > self.max_params or self.pop[i].parameter < self.min_params:
self.pop[i].mutation_node(self.mutation_rate)
def get_cross_child(self, muta_num):
"""Generate the children of the next offspring with crossover operation.
:param muta_num: number of mutated children
:type muta_num: int
"""
for i in range(int(self.individual_num / 4)):
pop_id = muta_num + i * 2
father, mother = self.parent_select(2, 'Roulette')
length = np.random.randint(4, int(father.gene.shape[0] / 2))
location = np.random.randint(0, father.gene.shape[0] - length)
gene_1 = father.gene.copy()
gene_2 = mother.gene.copy()
gene_1[location:(location + length), :] = gene_2[location:(location + length), :]
gene_2[location:(location + length), :] = father.gene[location:(location + length), :]
self.pop[pop_id].update_gene(gene_1)
self.pop[pop_id + 1].update_gene(gene_2)
while self.pop[pop_id].active_num < self.min_active:
self.pop[pop_id].mutation_using(self.mutation_rate)
param = self.pop[pop_id].parameter
while param > self.max_params or param < self.min_params:
self.pop[pop_id].mutation_node(self.mutation_rate)
param = self.pop[pop_id].parameter
while self.pop[pop_id + 1].active_num < self.min_active:
self.pop[pop_id + 1].mutation_using(self.mutation_rate)
param = self.pop[pop_id + 1].parameter
while param > self.max_params or param < self.min_params:
self.pop[pop_id + 1].mutation_node(self.mutation_rate)
param = self.pop[pop_id + 1].parameter
def reproduction(self):
"""Generate the new offsprings."""
muta_num = self.individual_num - (self.individual_num // 4) * 2
self.get_mutate_child(muta_num)
self.get_cross_child(muta_num)
def update(self, record):
"""Update function.
:param local_worker_path: the local path that saved `performance.txt`.
:type local_worker_path: str
"""
worker_id = record.get("worker_id")
performance = record.get("rewards")
self.fitness_pop[(worker_id - 1) % self.individual_num] = performance
self.fit_state[(worker_id - 1) % self.individual_num] = 1
def get_fitness(self):
"""Get the evalutation of each individual.
:return: a list of evaluations
:rtype: list
"""
pd_path = os.path.join(self.local_output_path, 'population_fitness.csv')
with open(pd_path, "r") as file:
df = pd.read_csv(file)
fitness_all = df['PSNR'].values
fitness = fitness_all[fitness_all.size - self.individual_num:]
return list(fitness)
def search(self):
"""Search one random model.
:return: current number of samples, and the model
:rtype: int and class
"""
if self.indiv_count > 0 and self.indiv_count % self.individual_num == 0:
if np.sum(np.asarray(self.fit_state)) < self.individual_num:
return
else:
self.update_fitness(self.fitness_pop)
self.update_elitism(self.fitness_pop)
self.save_results()
self.reproduction()
self.evolution_count += 1
self.fitness_pop = [0] * self.individual_num
self.fit_state = [0] * self.individual_num
current_indiv = self.pop[self.indiv_count % self.individual_num]
indiv_cfg = self.codec.decode(current_indiv)
self.indiv_count += 1
logging.info('model parameters:{}, model flops:{}'.format(current_indiv.parameter, current_indiv.flops))
logging.info('model arch:{}'.format(current_indiv.active_net_list()))
return self.indiv_count, indiv_cfg
| 44.297297 | 115 | 0.624085 |
import csv
import logging
import os
from bisect import bisect_right
from random import random, sample
import numpy as np
import pandas as pd
from vega.core.common.general import General
from .conf import ESRConfig
from vega.core.common import FileOps
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.search_space.search_algs.search_algorithm import SearchAlgorithm
from .esr_ea_individual import ESRIndividual
@ClassFactory.register(ClassType.SEARCH_ALGORITHM)
class ESRSearch(SearchAlgorithm):
config = ESRConfig()
def __init__(self, search_space=None, **kwargs):
super(ESRSearch, self).__init__(search_space, **kwargs)
self.individual_num = self.config.policy.num_individual
self.generation_num = self.config.policy.num_generation
self.elitism_num = self.config.policy.num_elitism
self.mutation_rate = self.config.policy.mutation_rate
self.min_active = self.config.range.min_active
self.max_params = self.config.range.max_params
self.min_params = self.config.range.min_params
self.indiv_count = 0
self.evolution_count = 0
self.initialize_pop()
self.elitism = [ESRIndividual(self.codec) for _ in range(self.elitism_num)]
self.elit_fitness = [0] * self.elitism_num
self.fitness_pop = [0] * self.individual_num
self.fit_state = [0] * self.individual_num
@property
def is_completed(self):
return self.indiv_count > self.generation_num * self.individual_num
def update_fitness(self, evals):
for i in range(self.individual_num):
self.pop[i].update_fitness(evals[i])
def update_elitism(self, evaluations):
popu_all = [ESRIndividual(self.codec) for _ in range(self.elitism_num + self.individual_num)]
for i in range(self.elitism_num + self.individual_num):
if i < self.elitism_num:
popu_all[i].copy(self.elitism[i])
else:
popu_all[i].copy(self.pop[i - self.elitism_num])
fitness_all = self.elit_fitness + evaluations
sorted_ind = sorted(range(len(fitness_all)), key=lambda k: fitness_all[k])
for i in range(self.elitism_num):
self.elitism[i].copy(popu_all[sorted_ind[len(fitness_all) - 1 - i]])
self.elit_fitness[i] = fitness_all[sorted_ind[len(fitness_all) - 1 - i]]
logging.info('Generation: {}, updated elitism fitness: {}'.format(self.evolution_count, self.elit_fitness))
def _log_data(self, net_info_type='active_only', pop=None, value=0):
log_list = [value, pop.parameter, pop.flops]
if net_info_type == 'active_only':
log_list.append(pop.active_net_list())
elif net_info_type == 'full':
log_list += pop.gene.flatten().tolist()
else:
pass
return log_list
def save_results(self):
_path = FileOps.join_path(self.local_output_path, General.step_name)
FileOps.make_dir(_path)
arch_file = FileOps.join_path(_path, 'arch.txt')
arch_child = FileOps.join_path(_path, 'arch_child.txt')
sel_arch_file = FileOps.join_path(_path, 'selected_arch.npy')
sel_arch = []
with open(arch_file, 'a') as fw_a, open(arch_child, 'a') as fw_ac:
writer_a = csv.writer(fw_a, lineterminator='\n')
writer_ac = csv.writer(fw_ac, lineterminator='\n')
writer_ac.writerow(['Population Iteration: ' + str(self.evolution_count + 1)])
for c in range(self.individual_num):
writer_ac.writerow(
self._log_data(net_info_type='active_only', pop=self.pop[c],
value=self.pop[c].fitness))
writer_a.writerow(['Population Iteration: ' + str(self.evolution_count + 1)])
for c in range(self.elitism_num):
writer_a.writerow(self._log_data(net_info_type='active_only',
pop=self.elitism[c],
value=self.elit_fitness[c]))
sel_arch.append(self.elitism[c].gene)
sel_arch = np.stack(sel_arch)
np.save(sel_arch_file, sel_arch)
if self.backup_base_path is not None:
FileOps.copy_folder(self.local_output_path, self.backup_base_path)
def parent_select(self, parent_num=2, select_type='Tournament'):
popu_all = [ESRIndividual(self.codec) for _ in range(self.elitism_num + self.individual_num)]
parent = [ESRIndividual(self.codec) for _ in range(parent_num)]
fitness_all = self.elit_fitness
for i in range(self.elitism_num + self.individual_num):
if i < self.elitism_num:
popu_all[i].copy(self.elitism[i])
else:
popu_all[i].copy(self.pop[i - self.elitism_num])
fitness_all = fitness_all + [popu_all[i].fitness]
fitness_all = np.asarray(fitness_all)
if select_type == 'Tournament':
for i in range(parent_num):
tourn = sample(range(len(popu_all)), 2)
if fitness_all[tourn[0]] >= fitness_all[tourn[1]]:
parent[i].copy(popu_all[tourn[0]])
fitness_all[tourn[0]] = 0
else:
parent[i] = popu_all[tourn[1]]
fitness_all[tourn[1]] = 0
elif select_type == 'Roulette':
eval_submean = fitness_all - np.min(fitness_all)
eval_norm = eval_submean / sum(eval_submean)
eva_threshold = np.cumsum(eval_norm)
for i in range(parent_num):
ran = random()
selec_id = bisect_right(eva_threshold, ran)
parent[i].copy(popu_all[selec_id])
eval_submean[selec_id] = 0
eval_norm = eval_submean / sum(eval_submean)
eva_threshold = np.cumsum(eval_norm)
else:
logging.info('Wrong selection type')
return parent
def initialize_pop(self):
self.pop = [ESRIndividual(self.codec) for _ in range(self.individual_num)]
for i in range(self.individual_num):
while self.pop[i].active_num < self.min_active:
self.pop[i].mutation_using(self.mutation_rate)
while self.pop[i].parameter > self.max_params or self.pop[i].parameter < self.min_params:
self.pop[i].mutation_node(self.mutation_rate)
def get_mutate_child(self, muta_num):
for i in range(muta_num):
if int(self.individual_num / 2) == len(self.elitism):
self.pop[i].copy(self.elitism[i])
else:
self.pop[i].copy(sample(self.elitism, 1)[0])
self.pop[i].mutation_using(self.mutation_rate)
while self.pop[i].active_num < self.min_active:
self.pop[i].mutation_using(self.mutation_rate)
self.pop[i].mutation_node(self.mutation_rate)
while self.pop[i].parameter > self.max_params or self.pop[i].parameter < self.min_params:
self.pop[i].mutation_node(self.mutation_rate)
def get_cross_child(self, muta_num):
for i in range(int(self.individual_num / 4)):
pop_id = muta_num + i * 2
father, mother = self.parent_select(2, 'Roulette')
length = np.random.randint(4, int(father.gene.shape[0] / 2))
location = np.random.randint(0, father.gene.shape[0] - length)
gene_1 = father.gene.copy()
gene_2 = mother.gene.copy()
gene_1[location:(location + length), :] = gene_2[location:(location + length), :]
gene_2[location:(location + length), :] = father.gene[location:(location + length), :]
self.pop[pop_id].update_gene(gene_1)
self.pop[pop_id + 1].update_gene(gene_2)
while self.pop[pop_id].active_num < self.min_active:
self.pop[pop_id].mutation_using(self.mutation_rate)
param = self.pop[pop_id].parameter
while param > self.max_params or param < self.min_params:
self.pop[pop_id].mutation_node(self.mutation_rate)
param = self.pop[pop_id].parameter
while self.pop[pop_id + 1].active_num < self.min_active:
self.pop[pop_id + 1].mutation_using(self.mutation_rate)
param = self.pop[pop_id + 1].parameter
while param > self.max_params or param < self.min_params:
self.pop[pop_id + 1].mutation_node(self.mutation_rate)
param = self.pop[pop_id + 1].parameter
def reproduction(self):
muta_num = self.individual_num - (self.individual_num // 4) * 2
self.get_mutate_child(muta_num)
self.get_cross_child(muta_num)
def update(self, record):
worker_id = record.get("worker_id")
performance = record.get("rewards")
self.fitness_pop[(worker_id - 1) % self.individual_num] = performance
self.fit_state[(worker_id - 1) % self.individual_num] = 1
def get_fitness(self):
pd_path = os.path.join(self.local_output_path, 'population_fitness.csv')
with open(pd_path, "r") as file:
df = pd.read_csv(file)
fitness_all = df['PSNR'].values
fitness = fitness_all[fitness_all.size - self.individual_num:]
return list(fitness)
def search(self):
if self.indiv_count > 0 and self.indiv_count % self.individual_num == 0:
if np.sum(np.asarray(self.fit_state)) < self.individual_num:
return
else:
self.update_fitness(self.fitness_pop)
self.update_elitism(self.fitness_pop)
self.save_results()
self.reproduction()
self.evolution_count += 1
self.fitness_pop = [0] * self.individual_num
self.fit_state = [0] * self.individual_num
current_indiv = self.pop[self.indiv_count % self.individual_num]
indiv_cfg = self.codec.decode(current_indiv)
self.indiv_count += 1
logging.info('model parameters:{}, model flops:{}'.format(current_indiv.parameter, current_indiv.flops))
logging.info('model arch:{}'.format(current_indiv.active_net_list()))
return self.indiv_count, indiv_cfg
| true | true |
f7fce6a167660fd64eb86c6b55209849d93a2a6b | 7,526 | py | Python | cmdmenu.py | arkocal/pycmdmenu | 0cb3f55e90f9f64f50ab6ca784a5329439cca2c2 | [
"MIT"
] | 1 | 2021-04-27T19:36:16.000Z | 2021-04-27T19:36:16.000Z | cmdmenu.py | arkocal/pycmdmenu | 0cb3f55e90f9f64f50ab6ca784a5329439cca2c2 | [
"MIT"
] | 4 | 2018-10-04T11:28:05.000Z | 2018-10-04T12:39:24.000Z | cmdmenu.py | arkocal/pycmdmenu | 0cb3f55e90f9f64f50ab6ca784a5329439cca2c2 | [
"MIT"
] | null | null | null | """Automatically create command line menus with arg hiearchies."""
import argparse
import copy
import inspect
import pkgutil
from types import ModuleType
FUNC_NAME_ARG = "_cmdmenu_func_name"
def cmdmenu_function(param, description=None):
"""Decorator for marking a function to appear in cmdmenu.
If it is used without a parameter, the function will only be
marked (necassary for add_build).
Otherwise it takes two single str, first one is help message,
second one is description (optional).
"""
def decorator(func):
func.cmdmenu_help = param
if description is not None:
func.cmdmenu_description = description
else:
func.cmdmenu_description = param
func.cmdmenu_is_marked = True
return func
# Decorator used with parameter (return actual decorator)
if isinstance(param, str):
return decorator
# Decorator used without parameter (this is the actual decorator)
if callable(param):
param.cmdmenu_is_marked = True
return param
raise ValueError("Invalid parameters")
def add_command(subparsers, command_function):
"""Add a command function to argparse subparser.
params:
--------------------
subparsers: The argparse._SubParsersAction object to add command to.
command_function: A python function.
Using the cmdmenu_function decorator, a menu description can be added
to the command.
Using parameter annotations menu options can be configured. For each
parameter the annotation is either a string containing a help message
of a dictionary containing parameters for the 'add_argument' method from
argparse.ArgumentParser. 'dest' argument is not allowed.
"name": name or flags (positional in add_argument), either a string
or a list of strings. Defaults to parameter_name for parameters with no
default value, and --parameter_name for others.
"default": defaults to parameter default value, overrides if provided.
All other parameters are passed directly.
"""
subparser = subparsers.add_parser(command_function.__name__,
help=getattr(command_function,
"cmdmenu_help", ""),
description=getattr(command_function,
"cmdmenu_description",
""))
sig = inspect.signature(command_function)
if FUNC_NAME_ARG in sig.parameters.keys():
raise ValueError("Parameter name {} not allowed for cmdmenu "
"function {}.".format(FUNC_NAME_ARG,
command_function.__name__))
for param_name, param in sig.parameters.items():
# Do not change the actual annotation
meta = copy.deepcopy(param.annotation)
if isinstance(meta, str):
meta = {"help": meta}
if not isinstance(meta, dict):
meta = {}
# dest is set automatically as it has to match parameter name
if "dest" in meta:
raise ValueError("'dest' not allowed in annotation dict of cmdmenu"
" function {}.".format(command_function.__name__))
# Annotated default overrides python argument default
if "default" not in meta.keys() and param.default is not inspect._empty:
meta["default"] = param.default
name_and_flags = meta.pop("name", None)
if name_and_flags is None:
if param.default is inspect._empty:
name_and_flags = param_name
# Assume parameter is optional if default value is provided
else:
name_and_flags = "--{}".format(param_name)
if isinstance(name_and_flags, str):
name_and_flags = [name_and_flags]
# If optional parameter, make sure dist is the parameter name
if name_and_flags[0].startswith("-"):
meta["dest"] = param_name
subparser.add_argument(*name_and_flags, **meta)
subparser.set_defaults(**{FUNC_NAME_ARG:command_function})
def add_module(subparsers, module, recursive=True, toplevel=None):
"""Add a command function to argparse subparser.
It will add functions marked with the cmdmenu_function decorator,
and (if recursive=True) submodules containing a CMDMENU_META var.
params:
--------------------
subparsers: The argparse._SubParsersAction object to add command to.
module: Module to be added. A CMDMENU_META variable of the module
can be used to configure argparse.
recursive (boolean): Whether to add submodules recursively.
toplevel (boolean): If True, content of the module will be added
directly to subparsers level, otherwise a new subparser will be created.
Defaults to False, unless CMDMENU_META dictionary overrides it. Explicit
value will override both.
If you define CMDMENU_META for the module or any of the submodules,
elements of this dictionary will be passed as parameters to
subparsers.add_parser.
"name" will be passed as a first positional argument, "toplevel" will
not be passed but used as the toplevel parameter of the module when
adding modules recursively.
"""
# Do not change the original module variables.
meta = copy.deepcopy(getattr(module, "CMDMENU_META", {}))
if toplevel is None:
toplevel = meta.pop("toplevel", False)
if toplevel:
add_to = subparsers
else:
name = meta.pop("name", module.__name__.split(".")[-1])
add_to = subparsers.add_parser(name, **meta).add_subparsers()
# Only add functions marked with the cmdmenu_function decorator
commands = [(n, v) for n, v in inspect.getmembers(module)
if getattr(v, "cmdmenu_is_marked", False) is True]
for name, func in commands:
add_command(add_to, func)
if recursive and hasattr(module, "__path__"):
for _, name, ispkg in pkgutil.iter_modules(module.__path__):
submodule = __import__(module.__name__+"."+name, fromlist=(name))
if vars(submodule).get("IS_MENU_MODULE", False):
add_module(add_to, submodule, recursive=ispkg)
def parse_and_run_with(argument_parser):
"""Run function with arguments from populated argument_parser."""
args = vars(argument_parser.parse_args())
func = args.pop(FUNC_NAME_ARG, None)
assert func is not None, "No function for given args"
return func(**args)
def run(toplevel=None, module=None, *args, **kwargs):
"""Create a parser, parse args and run module.
params:
----------
toplevel: module or list of modules, added as toplevels (see add_module)
module: module or list of modules, added with module-name arg (not toplevel)
*args, **kwargs: Passed directly to ArgumentParser constructor
"""
argument_parser = argparse.ArgumentParser(*args, **kwargs)
subparsers = argument_parser.add_subparsers()
if toplevel is None:
toplevel = []
elif isinstance(toplevel, ModuleType):
toplevel = [toplevel]
for m in toplevel:
add_module(subparsers, m, recursive=True, toplevel=True)
if module is None:
module = []
elif isinstance(module, ModuleType):
module = [module]
for m in module:
add_module(subparsers, m, recursive=True, toplevel=False)
parse_and_run_with(argument_parser)
| 39.820106 | 80 | 0.657056 | import argparse
import copy
import inspect
import pkgutil
from types import ModuleType
FUNC_NAME_ARG = "_cmdmenu_func_name"
def cmdmenu_function(param, description=None):
def decorator(func):
func.cmdmenu_help = param
if description is not None:
func.cmdmenu_description = description
else:
func.cmdmenu_description = param
func.cmdmenu_is_marked = True
return func
if isinstance(param, str):
return decorator
if callable(param):
param.cmdmenu_is_marked = True
return param
raise ValueError("Invalid parameters")
def add_command(subparsers, command_function):
subparser = subparsers.add_parser(command_function.__name__,
help=getattr(command_function,
"cmdmenu_help", ""),
description=getattr(command_function,
"cmdmenu_description",
""))
sig = inspect.signature(command_function)
if FUNC_NAME_ARG in sig.parameters.keys():
raise ValueError("Parameter name {} not allowed for cmdmenu "
"function {}.".format(FUNC_NAME_ARG,
command_function.__name__))
for param_name, param in sig.parameters.items():
meta = copy.deepcopy(param.annotation)
if isinstance(meta, str):
meta = {"help": meta}
if not isinstance(meta, dict):
meta = {}
if "dest" in meta:
raise ValueError("'dest' not allowed in annotation dict of cmdmenu"
" function {}.".format(command_function.__name__))
if "default" not in meta.keys() and param.default is not inspect._empty:
meta["default"] = param.default
name_and_flags = meta.pop("name", None)
if name_and_flags is None:
if param.default is inspect._empty:
name_and_flags = param_name
else:
name_and_flags = "--{}".format(param_name)
if isinstance(name_and_flags, str):
name_and_flags = [name_and_flags]
if name_and_flags[0].startswith("-"):
meta["dest"] = param_name
subparser.add_argument(*name_and_flags, **meta)
subparser.set_defaults(**{FUNC_NAME_ARG:command_function})
def add_module(subparsers, module, recursive=True, toplevel=None):
meta = copy.deepcopy(getattr(module, "CMDMENU_META", {}))
if toplevel is None:
toplevel = meta.pop("toplevel", False)
if toplevel:
add_to = subparsers
else:
name = meta.pop("name", module.__name__.split(".")[-1])
add_to = subparsers.add_parser(name, **meta).add_subparsers()
commands = [(n, v) for n, v in inspect.getmembers(module)
if getattr(v, "cmdmenu_is_marked", False) is True]
for name, func in commands:
add_command(add_to, func)
if recursive and hasattr(module, "__path__"):
for _, name, ispkg in pkgutil.iter_modules(module.__path__):
submodule = __import__(module.__name__+"."+name, fromlist=(name))
if vars(submodule).get("IS_MENU_MODULE", False):
add_module(add_to, submodule, recursive=ispkg)
def parse_and_run_with(argument_parser):
args = vars(argument_parser.parse_args())
func = args.pop(FUNC_NAME_ARG, None)
assert func is not None, "No function for given args"
return func(**args)
def run(toplevel=None, module=None, *args, **kwargs):
argument_parser = argparse.ArgumentParser(*args, **kwargs)
subparsers = argument_parser.add_subparsers()
if toplevel is None:
toplevel = []
elif isinstance(toplevel, ModuleType):
toplevel = [toplevel]
for m in toplevel:
add_module(subparsers, m, recursive=True, toplevel=True)
if module is None:
module = []
elif isinstance(module, ModuleType):
module = [module]
for m in module:
add_module(subparsers, m, recursive=True, toplevel=False)
parse_and_run_with(argument_parser)
| true | true |
f7fce7c5ac7c35de29f219370a9c2598b5a8f090 | 17,819 | py | Python | scripts/data_processing/process_climis_unicef_ieconomics_data.py | mwdchang/delphi | c6177f2d614118883eaaa7f5300f3e46f10ddc7e | [
"Apache-2.0"
] | null | null | null | scripts/data_processing/process_climis_unicef_ieconomics_data.py | mwdchang/delphi | c6177f2d614118883eaaa7f5300f3e46f10ddc7e | [
"Apache-2.0"
] | null | null | null | scripts/data_processing/process_climis_unicef_ieconomics_data.py | mwdchang/delphi | c6177f2d614118883eaaa7f5300f3e46f10ddc7e | [
"Apache-2.0"
] | 1 | 2019-07-18T19:13:13.000Z | 2019-07-18T19:13:13.000Z | """ Script for cleaning data for 12 month evaluation. """
import os
import re
import sys
import numpy as np
import pandas as pd
from pprint import pprint
from glob import glob
from typing import List, Dict
from delphi.utils.shell import cd
from delphi.paths import data_dir, south_sudan_data
from delphi.utils.fp import grouper
from functools import partial
from itertools import groupby
def get_state_from_filename(filename, get_state_func):
return " ".join(re.findall("[A-Z][^A-Z]*", get_state_func(filename)))
def process_file_with_single_table(
filename, variable_name_func, get_state_func, country="South Sudan"
):
records = []
df = pd.read_csv(
filename, index_col=0, names=range(12), header=0, skipinitialspace=True
)
for ind in df.index:
for column in df.columns:
record = {
"Variable": variable_name_func(ind),
"Month": column + 1,
"Value": df.loc[ind][column],
"State": get_state_from_filename(filename, get_state_func),
"Country": country,
}
set_defaults(record)
records.append(record)
return records
def set_climis_south_sudan_default_params(
filename, df, get_state_func=lambda x: x.split("_")[-2]
):
df["Country"] = "South Sudan"
df["Source"] = "CLiMIS"
df["Year"] = int(filename.split(".")[0].split("_")[-1])
df["State"] = get_state_from_filename(filename, get_state_func)
return df
def make_livestock_prices_table(filename):
df = pd.read_csv(
filename,
index_col=[0, 1],
header=0,
names=["County", "Market"] + list(range(1, 13)),
skipinitialspace=True,
thousands=",",
)
df = df.stack().reset_index(name="Value")
df.columns = ["County", "Market", "Month", "Value"]
df = df.pivot_table(values="Value", index=["County", "Month"])
df = set_climis_south_sudan_default_params(filename, df)
df["Unit"] = "SSP"
df["Variable"] = f"Average price of {filename.split('_')[-3].lower()}"
df = df.reset_index()
return df
def set_defaults(record: Dict):
record.update(
{
"Year": 2017,
"Country": "South Sudan",
"Unit": "%",
"Source": "CLiMIS",
"County": None,
}
)
def make_group_dict(groups):
return {k[0][0]: g for k, g in grouper(groups, 2)}
def make_df_from_group(k, v, index_func):
df = pd.DataFrame(v)
df.set_index(0, inplace=True)
df.index = [index_func(k, i) for i in df.index]
df = df.stack().reset_index(name="Value")
df.columns = ["Variable", "Month", "Value"]
df["Month"] = df["Month"].astype(int)
return df
def process_file_with_multiple_tables(filename, header_dict):
dfs = []
df = pd.read_csv(filename, index_col=0, names=range(12), header=0)
# Define a grouping key function to split the CSV by the header rows
grouping_key_function = lambda _tuple: _tuple[1][1:].isna().all()
iterrows = filter(lambda r: r[1][0] != "", df.iterrows())
key_group_tuples = groupby(iterrows, grouping_key_function)
groups = [
[
[x[0].strip()] + x[1].values.tolist()
for x in list(g)
if isinstance(x[0], str)
]
for k, g in key_group_tuples
]
for k, v in make_group_dict(groups).items():
if v is not None:
df = make_df_from_group(
k, v, lambda k, i: header_dict.get(k.strip(), lambda x: k)(i)
)
df["Value"] = df["Value"].replace(" ", np.nan)
df = df.dropna()
df["County"] = None
df = set_climis_south_sudan_default_params(filename, df)
if len(df.Value.values) > 0 and any(
map(lambda v: "%" in v, df["Value"].values)
):
df.Value = df.Value.str.replace("%", "")
df["Unit"] = "%"
else:
df["Unit"] = None
if len(df["Variable"].values) > 0:
if "SSP" in df["Variable"].values[0]:
df["Variable"] = (
df["Variable"].str.replace("\(SSP\)", "").str.strip()
)
df["Unit"] = "SSP"
if len(df.Value.values) > 0 and "-" in df.Value.values[0]:
# For percentage ranges, take the mean value
df.Value = (
df.Value.str.strip()
.str.split("-")
.map(lambda x: list(map(float, x)))
.map(lambda x: np.mean(x))
)
dfs.append(df)
if len(dfs) > 0:
return pd.concat(dfs)
else:
return None
def process_climis_crop_production_data(data_dir: str):
""" Process CLiMIS crop production data """
climis_crop_production_csvs = glob(
"{data_dir}/Climis South Sudan Crop Production Data/"
"Crops_EstimatedProductionConsumptionBalance*.csv"
)
state_county_df = pd.read_csv(
f"data/south_sudan_data_fewsnet.tsv", skipinitialspace=True
)
combined_records = []
for f in climis_crop_production_csvs:
year = int(f.split("/")[-1].split("_")[2].split(".")[0])
df = pd.read_csv(f).dropna()
for i, r in df.iterrows():
record = {
"Year": year,
"Month": None,
"Source": "CLiMIS",
"Country": "South Sudan",
}
region = r["State/County"].strip()
if region.lower() in state_county_df["State"].str.lower().values:
record["State"] = region
record["County"] = None
else:
potential_states = state_county_df.loc[
state_county_df["County"] == region
]["State"]
record["State"] = (
potential_states.iloc[0]
if len(potential_states) != 0
else None
)
record["County"] = region
for field in r.index:
if field != "State/County":
if "Net Cereal production" in field:
record["Variable"] = "Net Cereal Production"
record["Value"] = r[field]
if field.split()[-1].startswith("("):
record["Unit"] = field.split()[-1][1:-1].lower()
else:
record["Unit"] = None
combined_records.append(record)
df = pd.DataFrame(combined_records)
return df
def process_climis_livestock_data(data_dir: str):
""" Process CLiMIS livestock data. """
records = []
livestock_data_dir = f"{data_dir}/Climis South Sudan Livestock Data"
for filename in glob(
f"{livestock_data_dir}/Livestock Body Condition/*2017.csv"
):
records += process_file_with_single_table(
filename,
lambda ind: f"Percentage of {filename.split('_')[-3].lower()} with body condition {ind.lower()}",
lambda f: f.split("_")[-2],
)
for filename in glob(
f"{livestock_data_dir}/Livestock Production/*2017.csv"
):
records += process_file_with_single_table(
filename,
lambda ind: "Percentage of householding at least milking one of their livestocks",
lambda f: f.split("_")[1],
)
disease_acronym_dict = {
"FMD": "Foot and Mouth Disease (FMD)",
"LSD": "Lumpy Skin Disease (LSD)",
"CBPP": "Contagious Bovine Pleuropneumonia (CBPP)",
"CCPP": "Contagious Caprine Pleuropneumonia (CCPP)",
"NC": "NC",
"PPR": "Peste des Petits Ruminants (PPR)",
"Others": "Other diseases",
}
func = (
lambda k, i: f"Percentage of livestock with {disease_acronym_dict[k]} that are {i.lower().strip()}"
)
livestock_disease_header_dict = {
k: partial(func, k) for k in disease_acronym_dict
}
livestock_migration_header_dict = {
"Livestock migration": lambda i: f"Percentage of livestock migrating {i.split()[-1].lower()}",
"Distance covered": lambda i: "Distance covered by migrating livestock",
"Proportion of livestock that migrated": lambda i: "Percentage of livestock that migrated",
"Migration normal at this time of the year": lambda i: f"Migration normal at this time of year, {i}",
"Duration in months when the migrated animals are expected to be back after": lambda i: "Duration in months when the migrated animals are expected to be back after",
"Reasons for livestock migration": lambda i: f"Percentage of livestock migrating due to {i.lower()}",
}
def process_directory(dirname, header_dict):
return pd.concat(
[
df
for df in [
process_file_with_multiple_tables(f, header_dict)
for f in glob(f"{livestock_data_dir}/{dirname}/*2017.csv")
]
if df is not None
]
)
func2 = (
lambda k, i: f"{k.replace('animals', i.lower()).replace('stock', 'stock of '+i.lower()).replace('animal', i.lower())}"
)
livestock_ownership_headers = [
"Average current stock per household",
"Average number of animals born per household during last 4 weeks",
"Average number of animals acquired per household during last 4 weeks (dowry, purchase, gift)",
"Average number of animals given out as bride price/gift per household during last 4 weeks per household",
"Average number of animals sold per household during last 4 weeks household",
"Average price of animal sold (SSP)",
"Average number of animals exchanged for grain per household during last 4 weeks",
"Average number of animals died/slaughtered/lost per household during last 4 weeks",
]
livestock_ownership_header_dict = {
k: partial(func2, k) for k in livestock_ownership_headers
}
ownership_df = process_directory(
"Livestock Ownership", livestock_ownership_header_dict
)
disease_df = process_directory(
"Livestock Diseases", livestock_disease_header_dict
)
livestock_migration_df = process_directory(
"Livestock Migration", livestock_migration_header_dict
)
livestock_pasture_header_dict = {
"Pasture condtion": lambda i: f"Percentage of livestock pasture in {i.lower()} condition",
"Pasture condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} condition compared to a similar time in a normal year",
"Browse condition": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition",
"Browse condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition compared to a similar time in a normal year",
"Presence of constraints in accessing forage": lambda i: f"Percentage reporting the {('presence' if i=='Yes' else 'absence')} of constraints in accessing forage",
"Main forage constraints": lambda i: f"Percentage reporting {i.lower()} as the main forage constraint",
}
livestock_pasture_df = process_directory(
"Livestock Pasture", livestock_pasture_header_dict
)
livestock_water_sources_header_dict = {
"Main water sources": lambda i: f"Percentage of livestock whose main water source is {i.lower()}",
"Number of days livestock have been watered in the last 7 days": lambda i: f"Number of days {i.lower()} have been watered in the last 7 days",
}
livestock_water_sources_df = process_directory(
"Livestock Water Sources", livestock_water_sources_header_dict
)
for filename in glob(f"{livestock_data_dir}/Livestock Loss/*2017.csv"):
records += process_file_with_single_table(
filename,
lambda ind: f"Percentage of {filename.split('_')[-3].lower()} loss accounted for by {ind.lower()}",
lambda f: f.split("_")[-2],
)
for record in records:
if isinstance(record["Value"], str):
record["Value"] = record["Value"].replace("%", "")
livestock_prices_df = pd.concat(
[
make_livestock_prices_table(f)
for f in glob(
f"{livestock_data_dir}/Livestock Market Prices/*2017.csv"
)
]
)
climis_livestock_data_df = pd.concat(
[
pd.DataFrame(records),
disease_df,
ownership_df,
livestock_prices_df,
livestock_migration_df,
livestock_pasture_df,
livestock_water_sources_df,
],
sort=True
)
return climis_livestock_data_df
def process_climis_import_data(data_dir: str) -> pd.DataFrame:
dfs = []
for f in glob(f"{data_dir}/CLiMIS Import Data/*.csv"):
df = pd.read_csv(f, names=range(1, 13), header=0, thousands=",")
df = df.stack().reset_index(name="Value")
df.columns = ["Year", "Month", "Value"]
df["Month"] = df["Month"].astype(int)
df["Year"] = df["Year"].astype(int)
dfs.append(df)
df = (
pd.concat(dfs)
.pivot_table(values="Value", index=["Year", "Month"], aggfunc=np.sum)
.reset_index()
)
df.columns = ["Year", "Month", "Value"]
df["Variable"] = "Total amount of cereal grains imported"
df["Unit"] = "metric tonne"
df["Country"] = "South Sudan"
df["County"] = None
df["State"] = None
return df
def process_climis_rainfall_data(data_dir: str) -> pd.DataFrame:
dfs = []
# Read CSV files first
for f in glob(f"{data_dir}/CLiMIS South Sudan Rainfall Data in"
" Millimeters/*.csv"):
# Get the name of the table without path and extension
table_name = os.path.basename(f)[:-4]
# Get state and year from groups
pattern = r'^(.*) ([0-9]+) Rainfall'
state, year = re.match(pattern, table_name).groups()
df = pd.read_csv(f, header=0, thousands=",")
cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',
'State', 'County', 'Country']
df_new = pd.DataFrame(columns=cols)
df_new['Month'] = range(1, 13)
df_new['Year'] = int(year)
df_new['Value'] = df['monthly rainfall data ']
df_new['Variable'] = 'Rainfall'
df_new['Unit'] = 'millimeters'
df_new['County'] = None
df_new['State'] = state
df_new['Source'] = 'CLiMIS'
df_new['Country'] = 'South Sudan'
dfs.append(df_new)
df1 = pd.concat(dfs)
# Read XLSX file next
fname = f'{data_dir}/CLiMIS South Sudan Rainfall Data in Millimeters/' + \
'Rainfall-Early_Warning_6month_Summary-2017-data_table.xlsx'
df = pd.read_excel(fname, sheet_name='Rainfall Data', header=1)
cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',
'State', 'County', 'Country']
df_new = pd.DataFrame(columns=cols)
states = []
counties = []
years = []
months = []
values = []
for row in df.itertuples():
state, county, year = row[1:4]
for month in range(1,13):
value = row[3 + month]
if pd.isnull(value):
continue
states.append(state)
counties.append(county)
years.append(year)
months.append(month)
values.append(value)
df_new['Year'] = years
df_new['Month'] = months
df_new['Value'] = values
df_new['County'] = counties
df_new['State'] = states
df_new['Variable'] = 'Rainfall'
df_new['Unit'] = 'millimeters'
df_new['Source'] = 'CLiMIS'
df_new['Country'] = 'South Sudan'
df = pd.concat([df1, df_new])
return df
def process_UNHCR_data(data_dir: str):
df = pd.read_table(f"{data_dir}/UNHCR Refugee Data/RefugeeData.tsv",
index_col=0,
parse_dates=True, infer_datetime_format=True)
df["Year"] = df.index.year
df["Month"] = df.index.month
df.rename(columns = {"individuals":"Value"}, inplace=True)
df["Country"] = "South Sudan"
df["State"] = None
df["County"] = None
df["Source"] = "UNHCR"
df["Unit"] = None
df["Variable"] = "Number of refugees"
del df["unix_timestamp"]
return df
def create_combined_table(data_dir: str, columns: List[str]) -> pd.DataFrame:
climis_crop_production_df = process_climis_crop_production_data(data_dir)
climis_livestock_data_df = process_climis_livestock_data(data_dir)
climis_import_data_df = process_climis_import_data(data_dir)
climis_rainfall_data_df = process_climis_rainfall_data(data_dir)
UNHCR_data_df = process_UNHCR_data(data_dir)
# Severe acute malnutrition and inflation rate indicators from PDFs
pdf_indicators_df = pd.read_table(f"{data_dir}/indicator_data_from_pdfs.tsv")
df = pd.concat(
[
climis_crop_production_df,
climis_livestock_data_df,
climis_import_data_df,
climis_rainfall_data_df,
pdf_indicators_df,
UNHCR_data_df,
],
sort=True,
)
return df[columns]
if __name__ == "__main__":
columns = [
"Variable",
"Year",
"Month",
"Value",
"Unit",
"Source",
"State",
"County",
"Country",
]
data_dir = str(data_dir / "raw" / "wm_12_month_evaluation")
df = create_combined_table(data_dir, columns)
df["Year"] = df["Year"].astype(int)
df.to_csv(sys.argv[1], index=False, sep="\t")
| 35.496016 | 191 | 0.589483 |
import os
import re
import sys
import numpy as np
import pandas as pd
from pprint import pprint
from glob import glob
from typing import List, Dict
from delphi.utils.shell import cd
from delphi.paths import data_dir, south_sudan_data
from delphi.utils.fp import grouper
from functools import partial
from itertools import groupby
def get_state_from_filename(filename, get_state_func):
return " ".join(re.findall("[A-Z][^A-Z]*", get_state_func(filename)))
def process_file_with_single_table(
filename, variable_name_func, get_state_func, country="South Sudan"
):
records = []
df = pd.read_csv(
filename, index_col=0, names=range(12), header=0, skipinitialspace=True
)
for ind in df.index:
for column in df.columns:
record = {
"Variable": variable_name_func(ind),
"Month": column + 1,
"Value": df.loc[ind][column],
"State": get_state_from_filename(filename, get_state_func),
"Country": country,
}
set_defaults(record)
records.append(record)
return records
def set_climis_south_sudan_default_params(
filename, df, get_state_func=lambda x: x.split("_")[-2]
):
df["Country"] = "South Sudan"
df["Source"] = "CLiMIS"
df["Year"] = int(filename.split(".")[0].split("_")[-1])
df["State"] = get_state_from_filename(filename, get_state_func)
return df
def make_livestock_prices_table(filename):
df = pd.read_csv(
filename,
index_col=[0, 1],
header=0,
names=["County", "Market"] + list(range(1, 13)),
skipinitialspace=True,
thousands=",",
)
df = df.stack().reset_index(name="Value")
df.columns = ["County", "Market", "Month", "Value"]
df = df.pivot_table(values="Value", index=["County", "Month"])
df = set_climis_south_sudan_default_params(filename, df)
df["Unit"] = "SSP"
df["Variable"] = f"Average price of {filename.split('_')[-3].lower()}"
df = df.reset_index()
return df
def set_defaults(record: Dict):
record.update(
{
"Year": 2017,
"Country": "South Sudan",
"Unit": "%",
"Source": "CLiMIS",
"County": None,
}
)
def make_group_dict(groups):
return {k[0][0]: g for k, g in grouper(groups, 2)}
def make_df_from_group(k, v, index_func):
df = pd.DataFrame(v)
df.set_index(0, inplace=True)
df.index = [index_func(k, i) for i in df.index]
df = df.stack().reset_index(name="Value")
df.columns = ["Variable", "Month", "Value"]
df["Month"] = df["Month"].astype(int)
return df
def process_file_with_multiple_tables(filename, header_dict):
dfs = []
df = pd.read_csv(filename, index_col=0, names=range(12), header=0)
grouping_key_function = lambda _tuple: _tuple[1][1:].isna().all()
iterrows = filter(lambda r: r[1][0] != "", df.iterrows())
key_group_tuples = groupby(iterrows, grouping_key_function)
groups = [
[
[x[0].strip()] + x[1].values.tolist()
for x in list(g)
if isinstance(x[0], str)
]
for k, g in key_group_tuples
]
for k, v in make_group_dict(groups).items():
if v is not None:
df = make_df_from_group(
k, v, lambda k, i: header_dict.get(k.strip(), lambda x: k)(i)
)
df["Value"] = df["Value"].replace(" ", np.nan)
df = df.dropna()
df["County"] = None
df = set_climis_south_sudan_default_params(filename, df)
if len(df.Value.values) > 0 and any(
map(lambda v: "%" in v, df["Value"].values)
):
df.Value = df.Value.str.replace("%", "")
df["Unit"] = "%"
else:
df["Unit"] = None
if len(df["Variable"].values) > 0:
if "SSP" in df["Variable"].values[0]:
df["Variable"] = (
df["Variable"].str.replace("\(SSP\)", "").str.strip()
)
df["Unit"] = "SSP"
if len(df.Value.values) > 0 and "-" in df.Value.values[0]:
df.Value = (
df.Value.str.strip()
.str.split("-")
.map(lambda x: list(map(float, x)))
.map(lambda x: np.mean(x))
)
dfs.append(df)
if len(dfs) > 0:
return pd.concat(dfs)
else:
return None
def process_climis_crop_production_data(data_dir: str):
climis_crop_production_csvs = glob(
"{data_dir}/Climis South Sudan Crop Production Data/"
"Crops_EstimatedProductionConsumptionBalance*.csv"
)
state_county_df = pd.read_csv(
f"data/south_sudan_data_fewsnet.tsv", skipinitialspace=True
)
combined_records = []
for f in climis_crop_production_csvs:
year = int(f.split("/")[-1].split("_")[2].split(".")[0])
df = pd.read_csv(f).dropna()
for i, r in df.iterrows():
record = {
"Year": year,
"Month": None,
"Source": "CLiMIS",
"Country": "South Sudan",
}
region = r["State/County"].strip()
if region.lower() in state_county_df["State"].str.lower().values:
record["State"] = region
record["County"] = None
else:
potential_states = state_county_df.loc[
state_county_df["County"] == region
]["State"]
record["State"] = (
potential_states.iloc[0]
if len(potential_states) != 0
else None
)
record["County"] = region
for field in r.index:
if field != "State/County":
if "Net Cereal production" in field:
record["Variable"] = "Net Cereal Production"
record["Value"] = r[field]
if field.split()[-1].startswith("("):
record["Unit"] = field.split()[-1][1:-1].lower()
else:
record["Unit"] = None
combined_records.append(record)
df = pd.DataFrame(combined_records)
return df
def process_climis_livestock_data(data_dir: str):
records = []
livestock_data_dir = f"{data_dir}/Climis South Sudan Livestock Data"
for filename in glob(
f"{livestock_data_dir}/Livestock Body Condition/*2017.csv"
):
records += process_file_with_single_table(
filename,
lambda ind: f"Percentage of {filename.split('_')[-3].lower()} with body condition {ind.lower()}",
lambda f: f.split("_")[-2],
)
for filename in glob(
f"{livestock_data_dir}/Livestock Production/*2017.csv"
):
records += process_file_with_single_table(
filename,
lambda ind: "Percentage of householding at least milking one of their livestocks",
lambda f: f.split("_")[1],
)
disease_acronym_dict = {
"FMD": "Foot and Mouth Disease (FMD)",
"LSD": "Lumpy Skin Disease (LSD)",
"CBPP": "Contagious Bovine Pleuropneumonia (CBPP)",
"CCPP": "Contagious Caprine Pleuropneumonia (CCPP)",
"NC": "NC",
"PPR": "Peste des Petits Ruminants (PPR)",
"Others": "Other diseases",
}
func = (
lambda k, i: f"Percentage of livestock with {disease_acronym_dict[k]} that are {i.lower().strip()}"
)
livestock_disease_header_dict = {
k: partial(func, k) for k in disease_acronym_dict
}
livestock_migration_header_dict = {
"Livestock migration": lambda i: f"Percentage of livestock migrating {i.split()[-1].lower()}",
"Distance covered": lambda i: "Distance covered by migrating livestock",
"Proportion of livestock that migrated": lambda i: "Percentage of livestock that migrated",
"Migration normal at this time of the year": lambda i: f"Migration normal at this time of year, {i}",
"Duration in months when the migrated animals are expected to be back after": lambda i: "Duration in months when the migrated animals are expected to be back after",
"Reasons for livestock migration": lambda i: f"Percentage of livestock migrating due to {i.lower()}",
}
def process_directory(dirname, header_dict):
return pd.concat(
[
df
for df in [
process_file_with_multiple_tables(f, header_dict)
for f in glob(f"{livestock_data_dir}/{dirname}/*2017.csv")
]
if df is not None
]
)
func2 = (
lambda k, i: f"{k.replace('animals', i.lower()).replace('stock', 'stock of '+i.lower()).replace('animal', i.lower())}"
)
livestock_ownership_headers = [
"Average current stock per household",
"Average number of animals born per household during last 4 weeks",
"Average number of animals acquired per household during last 4 weeks (dowry, purchase, gift)",
"Average number of animals given out as bride price/gift per household during last 4 weeks per household",
"Average number of animals sold per household during last 4 weeks household",
"Average price of animal sold (SSP)",
"Average number of animals exchanged for grain per household during last 4 weeks",
"Average number of animals died/slaughtered/lost per household during last 4 weeks",
]
livestock_ownership_header_dict = {
k: partial(func2, k) for k in livestock_ownership_headers
}
ownership_df = process_directory(
"Livestock Ownership", livestock_ownership_header_dict
)
disease_df = process_directory(
"Livestock Diseases", livestock_disease_header_dict
)
livestock_migration_df = process_directory(
"Livestock Migration", livestock_migration_header_dict
)
livestock_pasture_header_dict = {
"Pasture condtion": lambda i: f"Percentage of livestock pasture in {i.lower()} condition",
"Pasture condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} condition compared to a similar time in a normal year",
"Browse condition": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition",
"Browse condition compared to similar time in a normal year": lambda i: f"Percentage of livestock pasture in {i.lower()} browse condition compared to a similar time in a normal year",
"Presence of constraints in accessing forage": lambda i: f"Percentage reporting the {('presence' if i=='Yes' else 'absence')} of constraints in accessing forage",
"Main forage constraints": lambda i: f"Percentage reporting {i.lower()} as the main forage constraint",
}
livestock_pasture_df = process_directory(
"Livestock Pasture", livestock_pasture_header_dict
)
livestock_water_sources_header_dict = {
"Main water sources": lambda i: f"Percentage of livestock whose main water source is {i.lower()}",
"Number of days livestock have been watered in the last 7 days": lambda i: f"Number of days {i.lower()} have been watered in the last 7 days",
}
livestock_water_sources_df = process_directory(
"Livestock Water Sources", livestock_water_sources_header_dict
)
for filename in glob(f"{livestock_data_dir}/Livestock Loss/*2017.csv"):
records += process_file_with_single_table(
filename,
lambda ind: f"Percentage of {filename.split('_')[-3].lower()} loss accounted for by {ind.lower()}",
lambda f: f.split("_")[-2],
)
for record in records:
if isinstance(record["Value"], str):
record["Value"] = record["Value"].replace("%", "")
livestock_prices_df = pd.concat(
[
make_livestock_prices_table(f)
for f in glob(
f"{livestock_data_dir}/Livestock Market Prices/*2017.csv"
)
]
)
climis_livestock_data_df = pd.concat(
[
pd.DataFrame(records),
disease_df,
ownership_df,
livestock_prices_df,
livestock_migration_df,
livestock_pasture_df,
livestock_water_sources_df,
],
sort=True
)
return climis_livestock_data_df
def process_climis_import_data(data_dir: str) -> pd.DataFrame:
dfs = []
for f in glob(f"{data_dir}/CLiMIS Import Data/*.csv"):
df = pd.read_csv(f, names=range(1, 13), header=0, thousands=",")
df = df.stack().reset_index(name="Value")
df.columns = ["Year", "Month", "Value"]
df["Month"] = df["Month"].astype(int)
df["Year"] = df["Year"].astype(int)
dfs.append(df)
df = (
pd.concat(dfs)
.pivot_table(values="Value", index=["Year", "Month"], aggfunc=np.sum)
.reset_index()
)
df.columns = ["Year", "Month", "Value"]
df["Variable"] = "Total amount of cereal grains imported"
df["Unit"] = "metric tonne"
df["Country"] = "South Sudan"
df["County"] = None
df["State"] = None
return df
def process_climis_rainfall_data(data_dir: str) -> pd.DataFrame:
dfs = []
for f in glob(f"{data_dir}/CLiMIS South Sudan Rainfall Data in"
" Millimeters/*.csv"):
table_name = os.path.basename(f)[:-4]
pattern = r'^(.*) ([0-9]+) Rainfall'
state, year = re.match(pattern, table_name).groups()
df = pd.read_csv(f, header=0, thousands=",")
cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',
'State', 'County', 'Country']
df_new = pd.DataFrame(columns=cols)
df_new['Month'] = range(1, 13)
df_new['Year'] = int(year)
df_new['Value'] = df['monthly rainfall data ']
df_new['Variable'] = 'Rainfall'
df_new['Unit'] = 'millimeters'
df_new['County'] = None
df_new['State'] = state
df_new['Source'] = 'CLiMIS'
df_new['Country'] = 'South Sudan'
dfs.append(df_new)
df1 = pd.concat(dfs)
fname = f'{data_dir}/CLiMIS South Sudan Rainfall Data in Millimeters/' + \
'Rainfall-Early_Warning_6month_Summary-2017-data_table.xlsx'
df = pd.read_excel(fname, sheet_name='Rainfall Data', header=1)
cols = ['Variable', 'Year', 'Month', 'Value', 'Unit', 'Source',
'State', 'County', 'Country']
df_new = pd.DataFrame(columns=cols)
states = []
counties = []
years = []
months = []
values = []
for row in df.itertuples():
state, county, year = row[1:4]
for month in range(1,13):
value = row[3 + month]
if pd.isnull(value):
continue
states.append(state)
counties.append(county)
years.append(year)
months.append(month)
values.append(value)
df_new['Year'] = years
df_new['Month'] = months
df_new['Value'] = values
df_new['County'] = counties
df_new['State'] = states
df_new['Variable'] = 'Rainfall'
df_new['Unit'] = 'millimeters'
df_new['Source'] = 'CLiMIS'
df_new['Country'] = 'South Sudan'
df = pd.concat([df1, df_new])
return df
def process_UNHCR_data(data_dir: str):
df = pd.read_table(f"{data_dir}/UNHCR Refugee Data/RefugeeData.tsv",
index_col=0,
parse_dates=True, infer_datetime_format=True)
df["Year"] = df.index.year
df["Month"] = df.index.month
df.rename(columns = {"individuals":"Value"}, inplace=True)
df["Country"] = "South Sudan"
df["State"] = None
df["County"] = None
df["Source"] = "UNHCR"
df["Unit"] = None
df["Variable"] = "Number of refugees"
del df["unix_timestamp"]
return df
def create_combined_table(data_dir: str, columns: List[str]) -> pd.DataFrame:
climis_crop_production_df = process_climis_crop_production_data(data_dir)
climis_livestock_data_df = process_climis_livestock_data(data_dir)
climis_import_data_df = process_climis_import_data(data_dir)
climis_rainfall_data_df = process_climis_rainfall_data(data_dir)
UNHCR_data_df = process_UNHCR_data(data_dir)
pdf_indicators_df = pd.read_table(f"{data_dir}/indicator_data_from_pdfs.tsv")
df = pd.concat(
[
climis_crop_production_df,
climis_livestock_data_df,
climis_import_data_df,
climis_rainfall_data_df,
pdf_indicators_df,
UNHCR_data_df,
],
sort=True,
)
return df[columns]
if __name__ == "__main__":
columns = [
"Variable",
"Year",
"Month",
"Value",
"Unit",
"Source",
"State",
"County",
"Country",
]
data_dir = str(data_dir / "raw" / "wm_12_month_evaluation")
df = create_combined_table(data_dir, columns)
df["Year"] = df["Year"].astype(int)
df.to_csv(sys.argv[1], index=False, sep="\t")
| true | true |
f7fce80c40b6f8a65c0cd284f2acc0ef0b071d44 | 11,794 | py | Python | gooddata-metadata-client/gooddata_metadata_client/model/json_api_metric_in_document.py | hkad98/gooddata-python-sdk | 64942080ecb44c2d8e914e57f7a591daa6cca205 | [
"MIT"
] | null | null | null | gooddata-metadata-client/gooddata_metadata_client/model/json_api_metric_in_document.py | hkad98/gooddata-python-sdk | 64942080ecb44c2d8e914e57f7a591daa6cca205 | [
"MIT"
] | null | null | null | gooddata-metadata-client/gooddata_metadata_client/model/json_api_metric_in_document.py | hkad98/gooddata-python-sdk | 64942080ecb44c2d8e914e57f7a591daa6cca205 | [
"MIT"
] | null | null | null | """
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Contact: support@gooddata.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from gooddata_metadata_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_metadata_client.exceptions import ApiAttributeError
def lazy_import():
from gooddata_metadata_client.model.json_api_metric_in import JsonApiMetricIn
globals()['JsonApiMetricIn'] = JsonApiMetricIn
class JsonApiMetricInDocument(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (JsonApiMetricIn,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs): # noqa: E501
"""JsonApiMetricInDocument - a model defined in OpenAPI
Args:
data (JsonApiMetricIn):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs): # noqa: E501
"""JsonApiMetricInDocument - a model defined in OpenAPI
Args:
data (JsonApiMetricIn):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.577617 | 124 | 0.564185 |
import re
import sys
from gooddata_metadata_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_metadata_client.exceptions import ApiAttributeError
def lazy_import():
from gooddata_metadata_client.model.json_api_metric_in import JsonApiMetricIn
globals()['JsonApiMetricIn'] = JsonApiMetricIn
class JsonApiMetricInDocument(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'data': (JsonApiMetricIn,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
f7fce83c4594f2a8a90f733a733619e15b315e1a | 4,367 | py | Python | venv/Lib/site-packages/engineio/async_drivers/sanic.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 3 | 2021-09-26T17:30:31.000Z | 2022-03-05T13:37:46.000Z | venv/Lib/site-packages/engineio/async_drivers/sanic.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 1 | 2020-11-29T20:27:13.000Z | 2020-11-29T20:27:13.000Z | venv/Lib/site-packages/engineio/async_drivers/sanic.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 2 | 2020-11-28T19:41:07.000Z | 2020-11-29T19:42:22.000Z | import sys
from urllib.parse import urlsplit
try: # pragma: no cover
from sanic.response import HTTPResponse
from sanic.websocket import WebSocketProtocol
except ImportError:
HTTPResponse = None
WebSocketProtocol = None
import six
def create_route(app, engineio_server, engineio_endpoint): # pragma: no cover
"""This function sets up the engine.io endpoint as a route for the
application.
Note that both GET and POST requests must be hooked up on the engine.io
endpoint.
"""
app.add_route(engineio_server.handle_request, engineio_endpoint,
methods=['GET', 'POST', 'OPTIONS'])
try:
app.enable_websocket()
except AttributeError:
# ignore, this version does not support websocket
pass
def translate_request(request): # pragma: no cover
"""This function takes the arguments passed to the request handler and
uses them to generate a WSGI compatible environ dictionary.
"""
class AwaitablePayload(object):
def __init__(self, payload):
self.payload = payload or b''
async def read(self, length=None):
if length is None:
r = self.payload
self.payload = b''
else:
r = self.payload[:length]
self.payload = self.payload[length:]
return r
uri_parts = urlsplit(request.url)
environ = {
'wsgi.input': AwaitablePayload(request.body),
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'sanic',
'REQUEST_METHOD': request.method,
'QUERY_STRING': uri_parts.query or '',
'RAW_URI': request.url,
'SERVER_PROTOCOL': 'HTTP/' + request.version,
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'sanic',
'SERVER_PORT': '0',
'sanic.request': request
}
for hdr_name, hdr_value in request.headers.items():
hdr_name = hdr_name.upper()
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
if key in environ:
hdr_value = '%s,%s' % (environ[key], hdr_value)
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
path_info = uri_parts.path
environ['PATH_INFO'] = path_info
environ['SCRIPT_NAME'] = ''
return environ
def make_response(status, headers, payload, environ): # pragma: no cover
"""This function generates an appropriate response object for this async
mode.
"""
headers_dict = {}
content_type = None
for h in headers:
if h[0].lower() == 'content-type':
content_type = h[1]
else:
headers_dict[h[0]] = h[1]
return HTTPResponse(body_bytes=payload, content_type=content_type,
status=int(status.split()[0]), headers=headers_dict)
class WebSocket(object): # pragma: no cover
"""
This wrapper class provides a sanic WebSocket interface that is
somewhat compatible with eventlet's implementation.
"""
def __init__(self, handler):
self.handler = handler
self._sock = None
async def __call__(self, environ):
request = environ['sanic.request']
protocol = request.transport.get_protocol()
self._sock = await protocol.websocket_handshake(request)
self.environ = environ
await self.handler(self)
async def close(self):
await self._sock.close()
async def send(self, message):
await self._sock.send(message)
async def wait(self):
data = await self._sock.recv()
if not isinstance(data, six.binary_type) and \
not isinstance(data, six.text_type):
raise IOError()
return data
_async = {
'asyncio': True,
'create_route': create_route,
'translate_request': translate_request,
'make_response': make_response,
'websocket': WebSocket if WebSocketProtocol else None,
}
| 30.117241 | 78 | 0.616899 | import sys
from urllib.parse import urlsplit
try:
from sanic.response import HTTPResponse
from sanic.websocket import WebSocketProtocol
except ImportError:
HTTPResponse = None
WebSocketProtocol = None
import six
def create_route(app, engineio_server, engineio_endpoint):
app.add_route(engineio_server.handle_request, engineio_endpoint,
methods=['GET', 'POST', 'OPTIONS'])
try:
app.enable_websocket()
except AttributeError:
pass
def translate_request(request):
class AwaitablePayload(object):
def __init__(self, payload):
self.payload = payload or b''
async def read(self, length=None):
if length is None:
r = self.payload
self.payload = b''
else:
r = self.payload[:length]
self.payload = self.payload[length:]
return r
uri_parts = urlsplit(request.url)
environ = {
'wsgi.input': AwaitablePayload(request.body),
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'sanic',
'REQUEST_METHOD': request.method,
'QUERY_STRING': uri_parts.query or '',
'RAW_URI': request.url,
'SERVER_PROTOCOL': 'HTTP/' + request.version,
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'sanic',
'SERVER_PORT': '0',
'sanic.request': request
}
for hdr_name, hdr_value in request.headers.items():
hdr_name = hdr_name.upper()
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
if key in environ:
hdr_value = '%s,%s' % (environ[key], hdr_value)
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
path_info = uri_parts.path
environ['PATH_INFO'] = path_info
environ['SCRIPT_NAME'] = ''
return environ
def make_response(status, headers, payload, environ):
headers_dict = {}
content_type = None
for h in headers:
if h[0].lower() == 'content-type':
content_type = h[1]
else:
headers_dict[h[0]] = h[1]
return HTTPResponse(body_bytes=payload, content_type=content_type,
status=int(status.split()[0]), headers=headers_dict)
class WebSocket(object):
def __init__(self, handler):
self.handler = handler
self._sock = None
async def __call__(self, environ):
request = environ['sanic.request']
protocol = request.transport.get_protocol()
self._sock = await protocol.websocket_handshake(request)
self.environ = environ
await self.handler(self)
async def close(self):
await self._sock.close()
async def send(self, message):
await self._sock.send(message)
async def wait(self):
data = await self._sock.recv()
if not isinstance(data, six.binary_type) and \
not isinstance(data, six.text_type):
raise IOError()
return data
_async = {
'asyncio': True,
'create_route': create_route,
'translate_request': translate_request,
'make_response': make_response,
'websocket': WebSocket if WebSocketProtocol else None,
}
| true | true |
f7fce857c40b16858e8a511db03496ced1bbfdd7 | 5,678 | py | Python | apps/utils/corsheaders/checks.py | Freen247/dj_blog | f7df1a7b101d41835a334b78cddf3570968799e4 | [
"Apache-2.0"
] | 51 | 2020-09-28T09:41:03.000Z | 2022-03-19T08:25:19.000Z | apps/utils/corsheaders/checks.py | Freen247/dj_blog | f7df1a7b101d41835a334b78cddf3570968799e4 | [
"Apache-2.0"
] | 17 | 2020-09-24T10:26:40.000Z | 2022-03-12T00:49:05.000Z | apps/utils/corsheaders/checks.py | Freen247/django_blogback | f7df1a7b101d41835a334b78cddf3570968799e4 | [
"Apache-2.0"
] | 11 | 2020-10-10T01:23:09.000Z | 2022-02-08T17:06:09.000Z | import re
from collections.abc import Sequence
from numbers import Integral
from urllib.parse import urlparse
from django.conf import settings
from django.core import checks
from apps.utils.corsheaders.conf import conf
re_type = type(re.compile(""))
@checks.register
def check_settings(app_configs, **kwargs):
errors = []
if not is_sequence(conf.CORS_ALLOW_HEADERS, str):
errors.append(
checks.Error(
"CORS_ALLOW_HEADERS should be a sequence of strings.",
id="apps.utils.corsheaders.E001",
)
)
if not is_sequence(conf.CORS_ALLOW_METHODS, str):
errors.append(
checks.Error(
"CORS_ALLOW_METHODS should be a sequence of strings.",
id="apps.utils.corsheaders.E002",
)
)
if not isinstance(conf.CORS_ALLOW_CREDENTIALS, bool):
errors.append(
checks.Error(
"CORS_ALLOW_CREDENTIALS should be a bool.", id="apps.utils.corsheaders.E003"
)
)
if (
not isinstance(conf.CORS_PREFLIGHT_MAX_AGE, Integral)
or conf.CORS_PREFLIGHT_MAX_AGE < 0
):
errors.append(
checks.Error(
(
"CORS_PREFLIGHT_MAX_AGE should be an integer greater than "
+ "or equal to zero."
),
id="apps.utils.corsheaders.E004",
)
)
if not isinstance(conf.CORS_ALLOW_ALL_ORIGINS, bool):
if hasattr(settings, "CORS_ALLOW_ALL_ORIGINS"):
allow_all_alias = "CORS_ALLOW_ALL_ORIGINS"
else:
allow_all_alias = "CORS_ORIGIN_ALLOW_ALL"
errors.append(
checks.Error(
"{} should be a bool.".format(allow_all_alias), id="apps.utils.corsheaders.E005",
)
)
if hasattr(settings, "CORS_ALLOWED_ORIGINS"):
allowed_origins_alias = "CORS_ALLOWED_ORIGINS"
else:
allowed_origins_alias = "CORS_ORIGIN_WHITELIST"
if not is_sequence(conf.CORS_ALLOWED_ORIGINS, str):
errors.append(
checks.Error(
"{} should be a sequence of strings.".format(allowed_origins_alias),
id="apps.utils.corsheaders.E006",
)
)
else:
special_origin_values = (
# From 'security sensitive' contexts
"null",
# From files on Chrome on Android
# https://bugs.chromium.org/p/chromium/issues/detail?id=991107
"file://",
)
for origin in conf.CORS_ALLOWED_ORIGINS:
if origin in special_origin_values:
continue
parsed = urlparse(origin)
if parsed.scheme == "" or parsed.netloc == "":
errors.append(
checks.Error(
"Origin {} in {} is missing scheme or netloc".format(
repr(origin), allowed_origins_alias
),
id="apps.utils.corsheaders.E013",
hint=(
"Add a scheme (e.g. https://) or netloc (e.g. "
+ "example.com)."
),
)
)
else:
# Only do this check in this case because if the scheme is not
# provided, netloc ends up in path
for part in ("path", "params", "query", "fragment"):
if getattr(parsed, part) != "":
errors.append(
checks.Error(
"Origin {} in {} should not have {}".format(
repr(origin), allowed_origins_alias, part
),
id="apps.utils.corsheaders.E014",
)
)
if hasattr(settings, "CORS_ALLOWED_ORIGIN_REGEXES"):
allowed_regexes_alias = "CORS_ALLOWED_ORIGIN_REGEXES"
else:
allowed_regexes_alias = "CORS_ORIGIN_REGEX_WHITELIST"
if not is_sequence(conf.CORS_ALLOWED_ORIGIN_REGEXES, (str, re_type)):
errors.append(
checks.Error(
"{} should be a sequence of strings and/or compiled regexes.".format(
allowed_regexes_alias
),
id="apps.utils.corsheaders.E007",
)
)
if not is_sequence(conf.CORS_EXPOSE_HEADERS, str):
errors.append(
checks.Error(
"CORS_EXPOSE_HEADERS should be a sequence.", id="apps.utils.corsheaders.E008"
)
)
if not isinstance(conf.CORS_URLS_REGEX, (str, re_type)):
errors.append(
checks.Error(
"CORS_URLS_REGEX should be a string or regex.", id="apps.utils.corsheaders.E009"
)
)
if not isinstance(conf.CORS_REPLACE_HTTPS_REFERER, bool):
errors.append(
checks.Error(
"CORS_REPLACE_HTTPS_REFERER should be a bool.", id="apps.utils.corsheaders.E011"
)
)
if hasattr(settings, "CORS_MODEL"):
errors.append(
checks.Error(
(
"The CORS_MODEL setting has been removed - see "
+ "django-cors-headers' HISTORY."
),
id="apps.utils.corsheaders.E012",
)
)
return errors
def is_sequence(thing, type_or_types):
return isinstance(thing, Sequence) and all(
isinstance(x, type_or_types) for x in thing
)
| 33.4 | 97 | 0.525713 | import re
from collections.abc import Sequence
from numbers import Integral
from urllib.parse import urlparse
from django.conf import settings
from django.core import checks
from apps.utils.corsheaders.conf import conf
re_type = type(re.compile(""))
@checks.register
def check_settings(app_configs, **kwargs):
errors = []
if not is_sequence(conf.CORS_ALLOW_HEADERS, str):
errors.append(
checks.Error(
"CORS_ALLOW_HEADERS should be a sequence of strings.",
id="apps.utils.corsheaders.E001",
)
)
if not is_sequence(conf.CORS_ALLOW_METHODS, str):
errors.append(
checks.Error(
"CORS_ALLOW_METHODS should be a sequence of strings.",
id="apps.utils.corsheaders.E002",
)
)
if not isinstance(conf.CORS_ALLOW_CREDENTIALS, bool):
errors.append(
checks.Error(
"CORS_ALLOW_CREDENTIALS should be a bool.", id="apps.utils.corsheaders.E003"
)
)
if (
not isinstance(conf.CORS_PREFLIGHT_MAX_AGE, Integral)
or conf.CORS_PREFLIGHT_MAX_AGE < 0
):
errors.append(
checks.Error(
(
"CORS_PREFLIGHT_MAX_AGE should be an integer greater than "
+ "or equal to zero."
),
id="apps.utils.corsheaders.E004",
)
)
if not isinstance(conf.CORS_ALLOW_ALL_ORIGINS, bool):
if hasattr(settings, "CORS_ALLOW_ALL_ORIGINS"):
allow_all_alias = "CORS_ALLOW_ALL_ORIGINS"
else:
allow_all_alias = "CORS_ORIGIN_ALLOW_ALL"
errors.append(
checks.Error(
"{} should be a bool.".format(allow_all_alias), id="apps.utils.corsheaders.E005",
)
)
if hasattr(settings, "CORS_ALLOWED_ORIGINS"):
allowed_origins_alias = "CORS_ALLOWED_ORIGINS"
else:
allowed_origins_alias = "CORS_ORIGIN_WHITELIST"
if not is_sequence(conf.CORS_ALLOWED_ORIGINS, str):
errors.append(
checks.Error(
"{} should be a sequence of strings.".format(allowed_origins_alias),
id="apps.utils.corsheaders.E006",
)
)
else:
special_origin_values = (
"null",
"file://",
)
for origin in conf.CORS_ALLOWED_ORIGINS:
if origin in special_origin_values:
continue
parsed = urlparse(origin)
if parsed.scheme == "" or parsed.netloc == "":
errors.append(
checks.Error(
"Origin {} in {} is missing scheme or netloc".format(
repr(origin), allowed_origins_alias
),
id="apps.utils.corsheaders.E013",
hint=(
"Add a scheme (e.g. https://) or netloc (e.g. "
+ "example.com)."
),
)
)
else:
for part in ("path", "params", "query", "fragment"):
if getattr(parsed, part) != "":
errors.append(
checks.Error(
"Origin {} in {} should not have {}".format(
repr(origin), allowed_origins_alias, part
),
id="apps.utils.corsheaders.E014",
)
)
if hasattr(settings, "CORS_ALLOWED_ORIGIN_REGEXES"):
allowed_regexes_alias = "CORS_ALLOWED_ORIGIN_REGEXES"
else:
allowed_regexes_alias = "CORS_ORIGIN_REGEX_WHITELIST"
if not is_sequence(conf.CORS_ALLOWED_ORIGIN_REGEXES, (str, re_type)):
errors.append(
checks.Error(
"{} should be a sequence of strings and/or compiled regexes.".format(
allowed_regexes_alias
),
id="apps.utils.corsheaders.E007",
)
)
if not is_sequence(conf.CORS_EXPOSE_HEADERS, str):
errors.append(
checks.Error(
"CORS_EXPOSE_HEADERS should be a sequence.", id="apps.utils.corsheaders.E008"
)
)
if not isinstance(conf.CORS_URLS_REGEX, (str, re_type)):
errors.append(
checks.Error(
"CORS_URLS_REGEX should be a string or regex.", id="apps.utils.corsheaders.E009"
)
)
if not isinstance(conf.CORS_REPLACE_HTTPS_REFERER, bool):
errors.append(
checks.Error(
"CORS_REPLACE_HTTPS_REFERER should be a bool.", id="apps.utils.corsheaders.E011"
)
)
if hasattr(settings, "CORS_MODEL"):
errors.append(
checks.Error(
(
"The CORS_MODEL setting has been removed - see "
+ "django-cors-headers' HISTORY."
),
id="apps.utils.corsheaders.E012",
)
)
return errors
def is_sequence(thing, type_or_types):
return isinstance(thing, Sequence) and all(
isinstance(x, type_or_types) for x in thing
)
| true | true |
f7fce873014ea090979cfd86e57e34a48122a40d | 3,546 | py | Python | eth/vm/forks/frontier/headers.py | dbfreem/py-evm | 02a1f6f38884b1f7a89640c2095ea5b0f20687c3 | [
"MIT"
] | 1,641 | 2017-11-24T04:24:22.000Z | 2022-03-31T14:59:30.000Z | eth/vm/forks/frontier/headers.py | dbfreem/py-evm | 02a1f6f38884b1f7a89640c2095ea5b0f20687c3 | [
"MIT"
] | 1,347 | 2017-11-23T10:37:36.000Z | 2022-03-20T16:31:44.000Z | eth/vm/forks/frontier/headers.py | dbfreem/py-evm | 02a1f6f38884b1f7a89640c2095ea5b0f20687c3 | [
"MIT"
] | 567 | 2017-11-22T18:03:27.000Z | 2022-03-28T17:49:08.000Z | from typing import (
Any,
TYPE_CHECKING,
)
from eth.abc import BlockHeaderAPI
from eth.validation import (
validate_gt,
validate_header_params_for_configuration,
)
from eth.constants import (
GENESIS_GAS_LIMIT,
DIFFICULTY_ADJUSTMENT_DENOMINATOR,
DIFFICULTY_MINIMUM,
BOMB_EXPONENTIAL_PERIOD,
BOMB_EXPONENTIAL_FREE_PERIODS,
)
from eth._utils.db import (
get_parent_header,
)
from eth._utils.headers import (
compute_gas_limit,
fill_header_params_from_parent,
new_timestamp_from_parent,
)
from eth.rlp.headers import BlockHeader
from .constants import (
FRONTIER_DIFFICULTY_ADJUSTMENT_CUTOFF
)
if TYPE_CHECKING:
from eth.vm.forks.frontier import FrontierVM # noqa: F401
def compute_frontier_difficulty(parent_header: BlockHeaderAPI, timestamp: int) -> int:
"""
Computes the difficulty for a frontier block based on the parent block.
"""
validate_gt(timestamp, parent_header.timestamp, title="Header timestamp")
offset = parent_header.difficulty // DIFFICULTY_ADJUSTMENT_DENOMINATOR
# We set the minimum to the lowest of the protocol minimum and the parent
# minimum to allow for the initial frontier *warming* period during which
# the difficulty begins lower than the protocol minimum.
difficulty_minimum = min(parent_header.difficulty, DIFFICULTY_MINIMUM)
if timestamp - parent_header.timestamp < FRONTIER_DIFFICULTY_ADJUSTMENT_CUTOFF:
base_difficulty = max(
parent_header.difficulty + offset,
difficulty_minimum,
)
else:
base_difficulty = max(
parent_header.difficulty - offset,
difficulty_minimum,
)
# Adjust for difficulty bomb.
num_bomb_periods = (
(parent_header.block_number + 1) // BOMB_EXPONENTIAL_PERIOD
) - BOMB_EXPONENTIAL_FREE_PERIODS
if num_bomb_periods >= 0:
difficulty = max(
base_difficulty + 2**num_bomb_periods,
DIFFICULTY_MINIMUM,
)
else:
difficulty = base_difficulty
return difficulty
def create_frontier_header_from_parent(parent_header: BlockHeaderAPI,
**header_params: Any) -> BlockHeader:
if 'timestamp' not in header_params:
header_params['timestamp'] = new_timestamp_from_parent(parent_header)
if 'difficulty' not in header_params:
# Use setdefault to ensure the new header has the same timestamp we use to calculate its
# difficulty.
header_params['difficulty'] = compute_frontier_difficulty(
parent_header,
header_params['timestamp'],
)
if 'gas_limit' not in header_params:
header_params['gas_limit'] = compute_gas_limit(
parent_header,
genesis_gas_limit=GENESIS_GAS_LIMIT,
)
all_fields = fill_header_params_from_parent(parent_header, **header_params)
return BlockHeader(**all_fields)
def configure_frontier_header(vm: "FrontierVM", **header_params: Any) -> BlockHeader:
validate_header_params_for_configuration(header_params)
with vm.get_header().build_changeset(**header_params) as changeset:
if 'timestamp' in header_params and vm.get_header().block_number > 0:
parent_header = get_parent_header(changeset.build_rlp(), vm.chaindb)
changeset.difficulty = compute_frontier_difficulty(
parent_header,
header_params['timestamp'],
)
header = changeset.commit()
return header
| 31.660714 | 96 | 0.698534 | from typing import (
Any,
TYPE_CHECKING,
)
from eth.abc import BlockHeaderAPI
from eth.validation import (
validate_gt,
validate_header_params_for_configuration,
)
from eth.constants import (
GENESIS_GAS_LIMIT,
DIFFICULTY_ADJUSTMENT_DENOMINATOR,
DIFFICULTY_MINIMUM,
BOMB_EXPONENTIAL_PERIOD,
BOMB_EXPONENTIAL_FREE_PERIODS,
)
from eth._utils.db import (
get_parent_header,
)
from eth._utils.headers import (
compute_gas_limit,
fill_header_params_from_parent,
new_timestamp_from_parent,
)
from eth.rlp.headers import BlockHeader
from .constants import (
FRONTIER_DIFFICULTY_ADJUSTMENT_CUTOFF
)
if TYPE_CHECKING:
from eth.vm.forks.frontier import FrontierVM
def compute_frontier_difficulty(parent_header: BlockHeaderAPI, timestamp: int) -> int:
validate_gt(timestamp, parent_header.timestamp, title="Header timestamp")
offset = parent_header.difficulty // DIFFICULTY_ADJUSTMENT_DENOMINATOR
difficulty_minimum = min(parent_header.difficulty, DIFFICULTY_MINIMUM)
if timestamp - parent_header.timestamp < FRONTIER_DIFFICULTY_ADJUSTMENT_CUTOFF:
base_difficulty = max(
parent_header.difficulty + offset,
difficulty_minimum,
)
else:
base_difficulty = max(
parent_header.difficulty - offset,
difficulty_minimum,
)
num_bomb_periods = (
(parent_header.block_number + 1) // BOMB_EXPONENTIAL_PERIOD
) - BOMB_EXPONENTIAL_FREE_PERIODS
if num_bomb_periods >= 0:
difficulty = max(
base_difficulty + 2**num_bomb_periods,
DIFFICULTY_MINIMUM,
)
else:
difficulty = base_difficulty
return difficulty
def create_frontier_header_from_parent(parent_header: BlockHeaderAPI,
**header_params: Any) -> BlockHeader:
if 'timestamp' not in header_params:
header_params['timestamp'] = new_timestamp_from_parent(parent_header)
if 'difficulty' not in header_params:
header_params['difficulty'] = compute_frontier_difficulty(
parent_header,
header_params['timestamp'],
)
if 'gas_limit' not in header_params:
header_params['gas_limit'] = compute_gas_limit(
parent_header,
genesis_gas_limit=GENESIS_GAS_LIMIT,
)
all_fields = fill_header_params_from_parent(parent_header, **header_params)
return BlockHeader(**all_fields)
def configure_frontier_header(vm: "FrontierVM", **header_params: Any) -> BlockHeader:
validate_header_params_for_configuration(header_params)
with vm.get_header().build_changeset(**header_params) as changeset:
if 'timestamp' in header_params and vm.get_header().block_number > 0:
parent_header = get_parent_header(changeset.build_rlp(), vm.chaindb)
changeset.difficulty = compute_frontier_difficulty(
parent_header,
header_params['timestamp'],
)
header = changeset.commit()
return header
| true | true |
f7fce8751c7cf5519b1dcbeff315939f47ea406f | 105 | py | Python | setup.py | MarcelloPerathoner/sphinxcontrib-autojsdoc | fce55cbed369258bcf388a94fee5a35df6f1a0ac | [
"BSD-2-Clause"
] | 21 | 2019-05-05T21:05:32.000Z | 2022-02-03T14:45:01.000Z | setup.py | MarcelloPerathoner/sphinxcontrib-autojsdoc | fce55cbed369258bcf388a94fee5a35df6f1a0ac | [
"BSD-2-Clause"
] | 12 | 2019-04-06T06:54:52.000Z | 2021-11-30T22:23:56.000Z | setup.py | MarcelloPerathoner/sphinxcontrib-autojsdoc | fce55cbed369258bcf388a94fee5a35df6f1a0ac | [
"BSD-2-Clause"
] | 15 | 2019-07-08T03:08:16.000Z | 2022-01-05T13:27:20.000Z | #!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
| 10.5 | 27 | 0.666667 |
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
| true | true |
f7fce897ee2f59d420de72670135bc610f8f54ac | 5,095 | py | Python | rally/common/db/schema.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | null | null | null | rally/common/db/schema.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | null | null | null | rally/common/db/schema.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | 1 | 2015-02-07T09:44:47.000Z | 2015-02-07T09:44:47.000Z | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import alembic
import alembic.config
import alembic.migration
import alembic.script
import sqlalchemy as sa
import sqlalchemy.schema # noqa
from rally.common.db import api
from rally.common.db import models
from rally import exceptions
INITIAL_REVISION_UUID = "ca3626f62937"
def _alembic_config():
path = os.path.join(os.path.dirname(__file__), "alembic.ini")
config = alembic.config.Config(path)
return config
def schema_cleanup():
"""Drop all database objects.
Drops all database objects remaining on the default schema of the given
engine. Per-db implementations will also need to drop items specific to
those systems, such as sequences, custom types (e.g. pg ENUM), etc.
"""
engine = api.get_engine()
with engine.begin() as conn:
inspector = sa.inspect(engine)
metadata = sa.schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk["name"]:
continue
fks.append(
sa.schema.ForeignKeyConstraint((), (), name=fk["name"]))
table = sa.schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
if engine.name != "sqlite":
for fkc in all_fks:
conn.execute(sa.schema.DropConstraint(fkc))
for table in tbs:
conn.execute(sa.schema.DropTable(table))
if engine.name == "postgresql":
sqla_100 = int(sa.__version__.split(".")[0]) >= 1
if sqla_100:
enums = [e["name"] for e in sa.inspect(conn).get_enums()]
else:
enums = conn.dialect._load_enums(conn).keys()
for e in enums:
conn.execute("DROP TYPE %s" % e)
def schema_revision(config=None, engine=None, detailed=False):
"""Current database revision.
:param config: Instance of alembic config
:param engine: Instance of DB engine
:param detailed: whether to return a dict with detailed data
:rtype detailed: bool
:returns: Database revision
:rtype: string
:rtype: dict
"""
engine = engine or api.get_engine()
with engine.connect() as conn:
context = alembic.migration.MigrationContext.configure(conn)
revision = context.get_current_revision()
if detailed:
config = config or _alembic_config()
sc_dir = alembic.script.ScriptDirectory.from_config(config)
return {"revision": revision,
"current_head": sc_dir.get_current_head()}
return revision
def schema_upgrade(revision=None, config=None, engine=None):
"""Used for upgrading database.
:param revision: Desired database version
:type revision: string
:param config: Instance of alembic config
:param engine: Instance of DB engine
"""
revision = revision or "head"
config = config or _alembic_config()
engine = engine or api.get_engine()
if schema_revision() is None:
schema_stamp(INITIAL_REVISION_UUID, config=config)
alembic.command.upgrade(config, revision or "head")
def schema_create(config=None, engine=None):
"""Create database schema from models description.
Can be used for initial installation instead of upgrade('head').
:param config: Instance of alembic config
:param engine: Instance of DB engine
"""
engine = engine or api.get_engine()
# NOTE(viktors): If we will use metadata.create_all() for non empty db
# schema, it will only add the new tables, but leave
# existing as is. So we should avoid of this situation.
if schema_revision(engine=engine) is not None:
raise exceptions.DBMigrationError("DB schema is already under version"
" control. Use upgrade() instead")
models.BASE.metadata.create_all(engine)
schema_stamp("head", config=config)
def schema_stamp(revision, config=None):
"""Stamps database with provided revision.
Don't run any migrations.
:param revision: Should match one from repository or head - to stamp
database with most recent revision
:type revision: string
:param config: Instance of alembic config
"""
config = config or _alembic_config()
return alembic.command.stamp(config, revision=revision)
| 33.300654 | 78 | 0.655937 |
import os
import alembic
import alembic.config
import alembic.migration
import alembic.script
import sqlalchemy as sa
import sqlalchemy.schema
from rally.common.db import api
from rally.common.db import models
from rally import exceptions
INITIAL_REVISION_UUID = "ca3626f62937"
def _alembic_config():
path = os.path.join(os.path.dirname(__file__), "alembic.ini")
config = alembic.config.Config(path)
return config
def schema_cleanup():
engine = api.get_engine()
with engine.begin() as conn:
inspector = sa.inspect(engine)
metadata = sa.schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk["name"]:
continue
fks.append(
sa.schema.ForeignKeyConstraint((), (), name=fk["name"]))
table = sa.schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
if engine.name != "sqlite":
for fkc in all_fks:
conn.execute(sa.schema.DropConstraint(fkc))
for table in tbs:
conn.execute(sa.schema.DropTable(table))
if engine.name == "postgresql":
sqla_100 = int(sa.__version__.split(".")[0]) >= 1
if sqla_100:
enums = [e["name"] for e in sa.inspect(conn).get_enums()]
else:
enums = conn.dialect._load_enums(conn).keys()
for e in enums:
conn.execute("DROP TYPE %s" % e)
def schema_revision(config=None, engine=None, detailed=False):
engine = engine or api.get_engine()
with engine.connect() as conn:
context = alembic.migration.MigrationContext.configure(conn)
revision = context.get_current_revision()
if detailed:
config = config or _alembic_config()
sc_dir = alembic.script.ScriptDirectory.from_config(config)
return {"revision": revision,
"current_head": sc_dir.get_current_head()}
return revision
def schema_upgrade(revision=None, config=None, engine=None):
revision = revision or "head"
config = config or _alembic_config()
engine = engine or api.get_engine()
if schema_revision() is None:
schema_stamp(INITIAL_REVISION_UUID, config=config)
alembic.command.upgrade(config, revision or "head")
def schema_create(config=None, engine=None):
engine = engine or api.get_engine()
if schema_revision(engine=engine) is not None:
raise exceptions.DBMigrationError("DB schema is already under version"
" control. Use upgrade() instead")
models.BASE.metadata.create_all(engine)
schema_stamp("head", config=config)
def schema_stamp(revision, config=None):
config = config or _alembic_config()
return alembic.command.stamp(config, revision=revision)
| true | true |
f7fce8c6002bacf99442a27ae532d11e4cf9a454 | 2,832 | py | Python | adminmgr/media/code/A2/python/task/BD_2384_2390_2412.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/A2/python/task/BD_2384_2390_2412.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/A2/python/task/BD_2384_2390_2412.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z |
from __future__ import print_function
from operator import add
from pyspark.sql import SparkSession
import sys
import findspark
findspark.init()
def computeContribs(merged_tuple):
batsman_list = merged_tuple[1][0] #list of batsman the bowler has bowled to
current_rank = merged_tuple[1][1] #the bowlers current rank
batsman_num = len(batsman_list)
for n in batsman_list:
yield(n, current_rank/batsman_num)
if __name__ == "__main__":
spark = SparkSession.builder.appName("batsmanRank").getOrCreate()
lines = spark.read.text("hdfs://localhost:9000/input/BatsmanRankTestData.txt").rdd.map(lambda r: r[0])
# every bowler with the set of players who faced him ***links go from batsman to bowler***
links = lines.map(lambda x: x.split(',')).map(lambda x: (x[0],x[1])).groupByKey().cache() #x[0] -> batsman & x[1] -> bowler
# using max(sum(averages),1) for each bowler's inital rank
#initial_ranks = lines.map(lambda x: x.split(',')).map(lambda x: (x[1],1) if ((int(x[2])/int(x[3])) < 1) else (x[1],int(x[2])/int(x[3])) ).reduceByKey(add)
initial_ranks = lines.map(lambda x: x.split(',')).map(lambda x: (x[1],int(x[2])/int(x[3]))).reduceByKey(add)
ranks = initial_ranks.map(lambda x: (x[0],1) if (x[1]<1) else (x[0],x[1]))
old_ranks = ranks.sortByKey().collect()
total = initial_ranks.count()
# Print INITIAL RANK LIST and length
print("\n")
for (bowler,avg) in old_ranks:
print(bowler,avg)
print("\n")
print("\n Ranks Count:",total,"\n\n")
links_output = links.collect()
# Print INITIAL LINK LIST and length
print("\n")
for (bowler,l) in links_output:
print(bowler,list(l),"\n")
print("\n")
print("\n Link Count:",links.count(),"\n\n")
# Compute new ranks
convergence = False
j = 0
# For Iterative computation
if(int(sys.argv[1]) > 0):
for i in range(int(sys.argv[1])):
contribs = links.join(ranks).flatMap(lambda x: computeContribs(x))
ranks = contribs.reduceByKey(add).mapValues(lambda rank: rank*0.80 + 0.20)
# For computation until convergence
else:
while( not convergence):
j = j + 1
print("\nIN WHILE LOOP\n")
print("ITERATION ",j,"\n")
contribs = links.join(ranks).flatMap(lambda x: computeContribs(x))
ranks = contribs.reduceByKey(add).mapValues(lambda rank: rank*0.80 + 0.20)
new_ranks = ranks.sortByKey().collect()
for s in range(total):
diff = abs(old_ranks[s][1] - new_ranks[s][1])
name = old_ranks[s][0]
print(old_ranks[s][0]," ",new_ranks[s][0]," ",old_ranks[s][1]," ",new_ranks[s][1]," ",diff)
if(diff < 0.0001):
convergence = True
else:
convergence = False
print("\nDid not converge - Iteration ",j,". Failed at - ",name," Difference - ",diff,"\n")
break
old_ranks = new_ranks
new_ranks_output = ranks.collect()
final_count = ranks.count() | 41.647059 | 157 | 0.660664 |
from __future__ import print_function
from operator import add
from pyspark.sql import SparkSession
import sys
import findspark
findspark.init()
def computeContribs(merged_tuple):
batsman_list = merged_tuple[1][0]
current_rank = merged_tuple[1][1]
batsman_num = len(batsman_list)
for n in batsman_list:
yield(n, current_rank/batsman_num)
if __name__ == "__main__":
spark = SparkSession.builder.appName("batsmanRank").getOrCreate()
lines = spark.read.text("hdfs://localhost:9000/input/BatsmanRankTestData.txt").rdd.map(lambda r: r[0])
links = lines.map(lambda x: x.split(',')).map(lambda x: (x[0],x[1])).groupByKey().cache()
#initial_ranks = lines.map(lambda x: x.split(',')).map(lambda x: (x[1],1) if ((int(x[2])/int(x[3])) < 1) else (x[1],int(x[2])/int(x[3])) ).reduceByKey(add)
initial_ranks = lines.map(lambda x: x.split(',')).map(lambda x: (x[1],int(x[2])/int(x[3]))).reduceByKey(add)
ranks = initial_ranks.map(lambda x: (x[0],1) if (x[1]<1) else (x[0],x[1]))
old_ranks = ranks.sortByKey().collect()
total = initial_ranks.count()
# Print INITIAL RANK LIST and length
print("\n")
for (bowler,avg) in old_ranks:
print(bowler,avg)
print("\n")
print("\n Ranks Count:",total,"\n\n")
links_output = links.collect()
# Print INITIAL LINK LIST and length
print("\n")
for (bowler,l) in links_output:
print(bowler,list(l),"\n")
print("\n")
print("\n Link Count:",links.count(),"\n\n")
# Compute new ranks
convergence = False
j = 0
# For Iterative computation
if(int(sys.argv[1]) > 0):
for i in range(int(sys.argv[1])):
contribs = links.join(ranks).flatMap(lambda x: computeContribs(x))
ranks = contribs.reduceByKey(add).mapValues(lambda rank: rank*0.80 + 0.20)
# For computation until convergence
else:
while( not convergence):
j = j + 1
print("\nIN WHILE LOOP\n")
print("ITERATION ",j,"\n")
contribs = links.join(ranks).flatMap(lambda x: computeContribs(x))
ranks = contribs.reduceByKey(add).mapValues(lambda rank: rank*0.80 + 0.20)
new_ranks = ranks.sortByKey().collect()
for s in range(total):
diff = abs(old_ranks[s][1] - new_ranks[s][1])
name = old_ranks[s][0]
print(old_ranks[s][0]," ",new_ranks[s][0]," ",old_ranks[s][1]," ",new_ranks[s][1]," ",diff)
if(diff < 0.0001):
convergence = True
else:
convergence = False
print("\nDid not converge - Iteration ",j,". Failed at - ",name," Difference - ",diff,"\n")
break
old_ranks = new_ranks
new_ranks_output = ranks.collect()
final_count = ranks.count() | true | true |
f7fce8f1ea54a5556123684709c0e2bea6bd5faa | 5,717 | py | Python | plaso/parsers/sqlite_plugins/firefox_cookies.py | cvandeplas/plaso | b625a2c267ed09505cfac84c9593d8c0922852b1 | [
"Apache-2.0"
] | 3 | 2016-03-11T02:47:08.000Z | 2016-12-24T03:19:27.000Z | plaso/parsers/sqlite_plugins/firefox_cookies.py | cvandeplas/plaso | b625a2c267ed09505cfac84c9593d8c0922852b1 | [
"Apache-2.0"
] | null | null | null | plaso/parsers/sqlite_plugins/firefox_cookies.py | cvandeplas/plaso | b625a2c267ed09505cfac84c9593d8c0922852b1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for the Firefox Cookie database."""
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
# Register the cookie plugins.
from plaso.parsers import cookie_plugins # pylint: disable=unused-import
from plaso.parsers import sqlite
from plaso.parsers.cookie_plugins import interface as cookie_interface
from plaso.parsers.sqlite_plugins import interface
class FirefoxCookieEvent(time_events.TimestampEvent):
"""Convenience class for a Firefox Cookie event."""
DATA_TYPE = 'firefox:cookie:entry'
def __init__(
self, timestamp, usage, identifier, hostname, cookie_name, value, path,
secure, httponly):
"""Initializes the event.
Args:
timestamp: The timestamp value in WebKit format..
usage: Timestamp description string.
identifier: The row identifier.
hostname: The hostname of host that set the cookie value.
cookie_name: The name field of the cookie.
value: The value of the cookie.
path: An URI of the page that set the cookie.
secure: Indication if this cookie should only be transmitted over a secure
channel.
httponly: An indication that the cookie cannot be accessed through client
side script.
"""
super(FirefoxCookieEvent, self).__init__(timestamp, usage)
if hostname.startswith('.'):
hostname = hostname[1:]
self.offset = identifier
self.host = hostname
self.cookie_name = cookie_name
self.data = value
self.path = path
self.secure = True if secure else False
self.httponly = True if httponly else False
if self.secure:
scheme = u'https'
else:
scheme = u'http'
self.url = u'{0:s}://{1:s}{2:s}'.format(scheme, hostname, path)
class FirefoxCookiePlugin(interface.SQLitePlugin):
"""Parse Firefox Cookies file."""
NAME = 'firefox_cookies'
DESCRIPTION = u'Parser for Firefox cookies SQLite database files.'
# Define the needed queries.
QUERIES = [
(('SELECT id, baseDomain, name, value, host, path, expiry, lastAccessed, '
'creationTime, isSecure, isHttpOnly FROM moz_cookies'),
'ParseCookieRow')]
# The required tables common to Archived History and History.
REQUIRED_TABLES = frozenset(['moz_cookies'])
# Point to few sources for URL information.
URLS = [
(u'https://hg.mozilla.org/mozilla-central/file/349a2f003529/netwerk/'
u'cookie/nsCookie.h')]
def __init__(self):
"""Initializes a plugin object."""
super(FirefoxCookiePlugin, self).__init__()
self._cookie_plugins = cookie_interface.GetPlugins()
def ParseCookieRow(self, parser_context, row, query=None, **unused_kwargs):
"""Parses a cookie row.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
if row['creationTime']:
event_object = FirefoxCookieEvent(
row['creationTime'], eventdata.EventTimestamp.CREATION_TIME,
row['id'], row['host'], row['name'], row['value'], row['path'],
row['isSecure'], row['isHttpOnly'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
if row['lastAccessed']:
event_object = FirefoxCookieEvent(
row['lastAccessed'], eventdata.EventTimestamp.ACCESS_TIME, row['id'],
row['host'], row['name'], row['value'], row['path'], row['isSecure'],
row['isHttpOnly'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
if row['expiry']:
# Expiry time (nsCookieService::GetExpiry in
# netwerk/cookie/nsCookieService.cpp).
# It's calculeated as the difference between the server time and the time
# the server wants the cookie to expire and adding that difference to the
# client time. This localizes the client time regardless of whether or not
# the TZ environment variable was set on the client.
timestamp = timelib.Timestamp.FromPosixTime(row['expiry'])
event_object = FirefoxCookieEvent(
timestamp, u'Cookie Expires', row['id'], row['host'], row['name'],
row['value'], row['path'], row['isSecure'], row['isHttpOnly'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
# Go through all cookie plugins to see if there are is any specific parsing
# needed.
hostname = row['host']
if hostname.startswith('.'):
hostname = hostname[1:]
url = u'http{0:s}://{1:s}{2:s}'.format(
u's' if row['isSecure'] else u'', hostname, row['path'])
for cookie_plugin in self._cookie_plugins:
try:
cookie_plugin.Process(
parser_context, cookie_name=row['name'], cookie_data=row['value'],
url=url)
except errors.WrongPlugin:
pass
sqlite.SQLiteParser.RegisterPlugin(FirefoxCookiePlugin)
| 36.883871 | 80 | 0.687948 |
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import cookie_plugins
from plaso.parsers import sqlite
from plaso.parsers.cookie_plugins import interface as cookie_interface
from plaso.parsers.sqlite_plugins import interface
class FirefoxCookieEvent(time_events.TimestampEvent):
DATA_TYPE = 'firefox:cookie:entry'
def __init__(
self, timestamp, usage, identifier, hostname, cookie_name, value, path,
secure, httponly):
super(FirefoxCookieEvent, self).__init__(timestamp, usage)
if hostname.startswith('.'):
hostname = hostname[1:]
self.offset = identifier
self.host = hostname
self.cookie_name = cookie_name
self.data = value
self.path = path
self.secure = True if secure else False
self.httponly = True if httponly else False
if self.secure:
scheme = u'https'
else:
scheme = u'http'
self.url = u'{0:s}://{1:s}{2:s}'.format(scheme, hostname, path)
class FirefoxCookiePlugin(interface.SQLitePlugin):
NAME = 'firefox_cookies'
DESCRIPTION = u'Parser for Firefox cookies SQLite database files.'
QUERIES = [
(('SELECT id, baseDomain, name, value, host, path, expiry, lastAccessed, '
'creationTime, isSecure, isHttpOnly FROM moz_cookies'),
'ParseCookieRow')]
REQUIRED_TABLES = frozenset(['moz_cookies'])
URLS = [
(u'https://hg.mozilla.org/mozilla-central/file/349a2f003529/netwerk/'
u'cookie/nsCookie.h')]
def __init__(self):
super(FirefoxCookiePlugin, self).__init__()
self._cookie_plugins = cookie_interface.GetPlugins()
def ParseCookieRow(self, parser_context, row, query=None, **unused_kwargs):
if row['creationTime']:
event_object = FirefoxCookieEvent(
row['creationTime'], eventdata.EventTimestamp.CREATION_TIME,
row['id'], row['host'], row['name'], row['value'], row['path'],
row['isSecure'], row['isHttpOnly'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
if row['lastAccessed']:
event_object = FirefoxCookieEvent(
row['lastAccessed'], eventdata.EventTimestamp.ACCESS_TIME, row['id'],
row['host'], row['name'], row['value'], row['path'], row['isSecure'],
row['isHttpOnly'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
if row['expiry']:
# the server wants the cookie to expire and adding that difference to the
# client time. This localizes the client time regardless of whether or not
# the TZ environment variable was set on the client.
timestamp = timelib.Timestamp.FromPosixTime(row['expiry'])
event_object = FirefoxCookieEvent(
timestamp, u'Cookie Expires', row['id'], row['host'], row['name'],
row['value'], row['path'], row['isSecure'], row['isHttpOnly'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
# Go through all cookie plugins to see if there are is any specific parsing
# needed.
hostname = row['host']
if hostname.startswith('.'):
hostname = hostname[1:]
url = u'http{0:s}://{1:s}{2:s}'.format(
u's' if row['isSecure'] else u'', hostname, row['path'])
for cookie_plugin in self._cookie_plugins:
try:
cookie_plugin.Process(
parser_context, cookie_name=row['name'], cookie_data=row['value'],
url=url)
except errors.WrongPlugin:
pass
sqlite.SQLiteParser.RegisterPlugin(FirefoxCookiePlugin)
| true | true |
f7fcea3b57813b46d8d47059d275ed86aed8544b | 7,922 | py | Python | zubhub_backend/zubhub/projects/models.py | NdibeRaymond/zubhub | 23907202af4f4f4f85a108ed15e811abb3d22407 | [
"MIT"
] | 1 | 2022-01-21T14:15:24.000Z | 2022-01-21T14:15:24.000Z | zubhub_backend/zubhub/projects/models.py | NdibeRaymond/zubhub | 23907202af4f4f4f85a108ed15e811abb3d22407 | [
"MIT"
] | null | null | null | zubhub_backend/zubhub/projects/models.py | NdibeRaymond/zubhub | 23907202af4f4f4f85a108ed15e811abb3d22407 | [
"MIT"
] | null | null | null | import uuid
from math import floor
from treebeard.mp_tree import MP_Node
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.text import slugify
from django.utils import timezone
from django.contrib.postgres.search import SearchVectorField
from django.contrib.postgres.indexes import GinIndex
from projects.utils import clean_comment_text, clean_project_desc
Creator = get_user_model()
class PublishingRule(models.Model):
DRAFT = 1
PREVIEW = 2
AUTHENTICATED_VIEWERS = 3
PUBLIC = 4
PUBLISHING_CHOICES = (
(DRAFT, 'DRAFT'),
(PREVIEW, 'PREVIEW'),
(AUTHENTICATED_VIEWERS, 'AUTHENTICATED_VIEWERS'),
(PUBLIC, 'PUBLIC')
)
type = models.PositiveSmallIntegerField(
choices=PUBLISHING_CHOICES, blank=False, null=False, default=DRAFT)
publisher_id = models.CharField(max_length=100, blank=True, null=True)
visible_to = models.ManyToManyField(Creator, blank=True)
def __str__(self):
if self.type == PublishingRule.DRAFT:
return "DRAFT"
if self.type == PublishingRule.PREVIEW:
return "PREVIEW"
if self.type == PublishingRule.AUTHENTICATED_VIEWERS:
return "AUTHENTICATED_VIEWERS"
if self.type == PublishingRule.PUBLIC:
return "PUBLIC"
class Category(MP_Node):
name = models.CharField(max_length=100, unique=True)
description = models.CharField(max_length=1000, blank=True, null=True)
slug = models.SlugField(unique=True, max_length=1000)
search_vector = SearchVectorField(null=True)
node_order_by = ['name']
class Meta:
verbose_name_plural = "categories"
indexes = (GinIndex(fields=["search_vector"]),)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.slug:
pass
else:
uid = str(uuid.uuid4())
uid = uid[0: floor(len(uid)/6)]
self.slug = slugify(self.name) + "-" + uid
super().save(*args, **kwargs)
class Project(models.Model):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, unique=True)
creator = models.ForeignKey(
Creator, on_delete=models.CASCADE, related_name="projects")
title = models.CharField(max_length=1000)
description = models.CharField(max_length=10000, blank=True, null=True)
video = models.URLField(max_length=1000, blank=True, null=True)
materials_used = models.CharField(max_length=5000)
category = models.ForeignKey(
Category, on_delete=models.SET_NULL, null=True, blank=True, related_name="projects")
views = models.ManyToManyField(
Creator, blank=True, related_name="projects_viewed")
views_count = models.IntegerField(blank=True, default=0)
likes = models.ManyToManyField(
Creator, blank=True, related_name="projects_liked")
likes_count = models.IntegerField(blank=True, default=0)
comments_count = models.IntegerField(blank=True, default=0)
saved_by = models.ManyToManyField(
Creator, blank=True, related_name="saved_for_future")
slug = models.SlugField(unique=True, max_length=1000)
created_on = models.DateTimeField(default=timezone.now)
publish = models.OneToOneField(PublishingRule, null=True, on_delete=models.RESTRICT, related_name='project_target')
search_vector = SearchVectorField(null=True)
class Meta:
indexes = (GinIndex(fields=["search_vector"]),)
def save(self, *args, **kwargs):
self.description = clean_project_desc(self.description)
if isinstance(self.video, str):
if self.video.find("m.youtube.com") != -1:
self.video = "youtube.com/embed/".join(
self.video.split("m.youtube.com/watch?v="))
elif self.video.find("youtube.com") != -1:
self.video = "embed/".join(self.video.split("watch?v="))
elif self.video.find("youtu.be") != -1:
self.video = "youtube.com/embed".join(
self.video.split("youtu.be"))
elif self.video.find("m.youtube.com") != -1:
self.video = "youtube.com/embed/".join(
self.video.split("m.youtube.com/watch?v="))
elif self.video.find("https://vimeo.com") != -1:
self.video = "player.vimeo.com/video".join(
self.video.split("vimeo.com"))
elif self.video.find("drive.google.com") != -1 and self.video.find("/view") != -1:
self.video = self.video.split("/view")[0] + "/preview"
if self.id:
self.likes_count = self.likes.count()
self.comments_count = self.comments.all().count()
if self.slug:
pass
else:
uid = str(uuid.uuid4())
uid = uid[0: floor(len(uid)/6)]
self.slug = slugify(self.title) + "-" + uid
self.description = clean_project_desc(self.description)
super().save(*args, **kwargs)
def __str__(self):
return self.title
class Image(models.Model):
project = models.ForeignKey(
Project, on_delete=models.CASCADE, null=True, related_name="images", blank=True)
image_url = models.URLField(max_length=1000)
public_id = models.CharField(max_length=1000, null=True, blank=True)
def __str__(self):
try:
image = self.image_url
except AttributeError:
image = ''
return "Photo <%s:%s>" % (self.public_id, image)
class Comment(MP_Node):
project = models.ForeignKey(
Project, on_delete=models.CASCADE, related_name="comments", blank=True, null=True)
profile = models.ForeignKey(Creator, on_delete=models.CASCADE,
related_name="profile_comments", blank=True, null=True)
creator = models.ForeignKey(
Creator, on_delete=models.CASCADE, related_name="comments")
text = models.CharField(max_length=10000)
created_on = models.DateTimeField(default=timezone.now)
publish = models.OneToOneField(PublishingRule, null=True, on_delete=models.RESTRICT, related_name='comment_target')
node_order_by = ['created_on']
def __str__(self):
return self.text
def save(self, *args, **kwargs):
if self.project:
self.project.save()
self.text = clean_comment_text(self.text)
super().save(*args, **kwargs)
class Tag(models.Model):
projects = models.ManyToManyField(
Project, blank=True, related_name="tags")
name = models.CharField(unique=True, max_length=100)
slug = models.SlugField(unique=True, max_length=150)
search_vector = SearchVectorField(null=True)
class Meta:
indexes = (GinIndex(fields=["search_vector"]),)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.slug:
pass
else:
uid = str(uuid.uuid4())
uid = uid[0: floor(len(uid)/6)]
self.slug = slugify(self.name) + "-" + uid
super().save(*args, **kwargs)
class StaffPick(models.Model):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, unique=True)
title = models.CharField(max_length=1000)
description = models.CharField(max_length=1000)
projects = models.ManyToManyField(
Project, related_name="staff_picks")
slug = models.SlugField(unique=True, max_length=1000)
created_on = models.DateTimeField(default=timezone.now)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if self.slug:
pass
else:
uid = str(uuid.uuid4())
uid = uid[0: floor(len(uid)/6)]
self.slug = slugify(self.title) + "-" + uid
super().save(*args, **kwargs)
| 35.684685 | 119 | 0.639485 | import uuid
from math import floor
from treebeard.mp_tree import MP_Node
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.text import slugify
from django.utils import timezone
from django.contrib.postgres.search import SearchVectorField
from django.contrib.postgres.indexes import GinIndex
from projects.utils import clean_comment_text, clean_project_desc
Creator = get_user_model()
class PublishingRule(models.Model):
DRAFT = 1
PREVIEW = 2
AUTHENTICATED_VIEWERS = 3
PUBLIC = 4
PUBLISHING_CHOICES = (
(DRAFT, 'DRAFT'),
(PREVIEW, 'PREVIEW'),
(AUTHENTICATED_VIEWERS, 'AUTHENTICATED_VIEWERS'),
(PUBLIC, 'PUBLIC')
)
type = models.PositiveSmallIntegerField(
choices=PUBLISHING_CHOICES, blank=False, null=False, default=DRAFT)
publisher_id = models.CharField(max_length=100, blank=True, null=True)
visible_to = models.ManyToManyField(Creator, blank=True)
def __str__(self):
if self.type == PublishingRule.DRAFT:
return "DRAFT"
if self.type == PublishingRule.PREVIEW:
return "PREVIEW"
if self.type == PublishingRule.AUTHENTICATED_VIEWERS:
return "AUTHENTICATED_VIEWERS"
if self.type == PublishingRule.PUBLIC:
return "PUBLIC"
class Category(MP_Node):
name = models.CharField(max_length=100, unique=True)
description = models.CharField(max_length=1000, blank=True, null=True)
slug = models.SlugField(unique=True, max_length=1000)
search_vector = SearchVectorField(null=True)
node_order_by = ['name']
class Meta:
verbose_name_plural = "categories"
indexes = (GinIndex(fields=["search_vector"]),)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.slug:
pass
else:
uid = str(uuid.uuid4())
uid = uid[0: floor(len(uid)/6)]
self.slug = slugify(self.name) + "-" + uid
super().save(*args, **kwargs)
class Project(models.Model):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, unique=True)
creator = models.ForeignKey(
Creator, on_delete=models.CASCADE, related_name="projects")
title = models.CharField(max_length=1000)
description = models.CharField(max_length=10000, blank=True, null=True)
video = models.URLField(max_length=1000, blank=True, null=True)
materials_used = models.CharField(max_length=5000)
category = models.ForeignKey(
Category, on_delete=models.SET_NULL, null=True, blank=True, related_name="projects")
views = models.ManyToManyField(
Creator, blank=True, related_name="projects_viewed")
views_count = models.IntegerField(blank=True, default=0)
likes = models.ManyToManyField(
Creator, blank=True, related_name="projects_liked")
likes_count = models.IntegerField(blank=True, default=0)
comments_count = models.IntegerField(blank=True, default=0)
saved_by = models.ManyToManyField(
Creator, blank=True, related_name="saved_for_future")
slug = models.SlugField(unique=True, max_length=1000)
created_on = models.DateTimeField(default=timezone.now)
publish = models.OneToOneField(PublishingRule, null=True, on_delete=models.RESTRICT, related_name='project_target')
search_vector = SearchVectorField(null=True)
class Meta:
indexes = (GinIndex(fields=["search_vector"]),)
def save(self, *args, **kwargs):
self.description = clean_project_desc(self.description)
if isinstance(self.video, str):
if self.video.find("m.youtube.com") != -1:
self.video = "youtube.com/embed/".join(
self.video.split("m.youtube.com/watch?v="))
elif self.video.find("youtube.com") != -1:
self.video = "embed/".join(self.video.split("watch?v="))
elif self.video.find("youtu.be") != -1:
self.video = "youtube.com/embed".join(
self.video.split("youtu.be"))
elif self.video.find("m.youtube.com") != -1:
self.video = "youtube.com/embed/".join(
self.video.split("m.youtube.com/watch?v="))
elif self.video.find("https://vimeo.com") != -1:
self.video = "player.vimeo.com/video".join(
self.video.split("vimeo.com"))
elif self.video.find("drive.google.com") != -1 and self.video.find("/view") != -1:
self.video = self.video.split("/view")[0] + "/preview"
if self.id:
self.likes_count = self.likes.count()
self.comments_count = self.comments.all().count()
if self.slug:
pass
else:
uid = str(uuid.uuid4())
uid = uid[0: floor(len(uid)/6)]
self.slug = slugify(self.title) + "-" + uid
self.description = clean_project_desc(self.description)
super().save(*args, **kwargs)
def __str__(self):
return self.title
class Image(models.Model):
project = models.ForeignKey(
Project, on_delete=models.CASCADE, null=True, related_name="images", blank=True)
image_url = models.URLField(max_length=1000)
public_id = models.CharField(max_length=1000, null=True, blank=True)
def __str__(self):
try:
image = self.image_url
except AttributeError:
image = ''
return "Photo <%s:%s>" % (self.public_id, image)
class Comment(MP_Node):
project = models.ForeignKey(
Project, on_delete=models.CASCADE, related_name="comments", blank=True, null=True)
profile = models.ForeignKey(Creator, on_delete=models.CASCADE,
related_name="profile_comments", blank=True, null=True)
creator = models.ForeignKey(
Creator, on_delete=models.CASCADE, related_name="comments")
text = models.CharField(max_length=10000)
created_on = models.DateTimeField(default=timezone.now)
publish = models.OneToOneField(PublishingRule, null=True, on_delete=models.RESTRICT, related_name='comment_target')
node_order_by = ['created_on']
def __str__(self):
return self.text
def save(self, *args, **kwargs):
if self.project:
self.project.save()
self.text = clean_comment_text(self.text)
super().save(*args, **kwargs)
class Tag(models.Model):
projects = models.ManyToManyField(
Project, blank=True, related_name="tags")
name = models.CharField(unique=True, max_length=100)
slug = models.SlugField(unique=True, max_length=150)
search_vector = SearchVectorField(null=True)
class Meta:
indexes = (GinIndex(fields=["search_vector"]),)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.slug:
pass
else:
uid = str(uuid.uuid4())
uid = uid[0: floor(len(uid)/6)]
self.slug = slugify(self.name) + "-" + uid
super().save(*args, **kwargs)
class StaffPick(models.Model):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, unique=True)
title = models.CharField(max_length=1000)
description = models.CharField(max_length=1000)
projects = models.ManyToManyField(
Project, related_name="staff_picks")
slug = models.SlugField(unique=True, max_length=1000)
created_on = models.DateTimeField(default=timezone.now)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if self.slug:
pass
else:
uid = str(uuid.uuid4())
uid = uid[0: floor(len(uid)/6)]
self.slug = slugify(self.title) + "-" + uid
super().save(*args, **kwargs)
| true | true |
f7fceb355ad6e06f7c2f6e6e56fe9c61ba1475bf | 2,192 | py | Python | train.py | rndtestnt/riskAnalysis | 36601374b60efd3f7680adee84e9e54e2f2b52a2 | [
"Apache-2.0"
] | null | null | null | train.py | rndtestnt/riskAnalysis | 36601374b60efd3f7680adee84e9e54e2f2b52a2 | [
"Apache-2.0"
] | null | null | null | train.py | rndtestnt/riskAnalysis | 36601374b60efd3f7680adee84e9e54e2f2b52a2 | [
"Apache-2.0"
] | null | null | null | import os
import warnings
import sys
import pandas as pd
import numpy as np
from itertools import cycle
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import lasso_path, enet_path
filePath = "PD_Commercial_Train.csv"
os.environ['MLFLOW_TRACKING_URI'] = 'http://localhost:5000'
os.environ['GIT_PYTHON_REFRESH'] = 'quiet'
df_credit = pd.read_csv(filePath)
cols = df_credit.columns
data = df_credit[cols].apply(pd.to_numeric, errors='coerce')
data = data.fillna(0)
X = data.drop(["PD"], axis=1)
y = data[["PD"]]
# Import mlflow
import mlflow
import mlflow.sklearn
# Evaluate metrics
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
#new
if __name__ == "__main__":
warnings.filterwarnings("ignore")
np.random.seed(40)
mlflow.set_experiment('riskAnalysis')
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
#Predict PD
train_x = train.drop(["PD"], axis=1)
test_x = test.drop(["PD"], axis=1)
train_y = train[["PD"]]
test_y = test[["PD"]]
alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.05
l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.05
# Run ElasticNet
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
lr.fit(train_x, train_y)
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
# Print out ElasticNet model metrics
print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print(" RMSE: %s" % rmse)
print(" MAE: %s" % mae)
print(" R2: %s" % r2)
# Log mlflow attributes for mlflow UI
mlflow.log_param("alpha", alpha)
mlflow.log_param("l1_ratio", l1_ratio)
mlflow.log_metric("rmse", rmse)
mlflow.log_metric("r2", r2)
mlflow.log_metric("mae", mae)
mlflow.sklearn.log_model(lr, "model")
| 29.621622 | 77 | 0.697993 | import os
import warnings
import sys
import pandas as pd
import numpy as np
from itertools import cycle
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import lasso_path, enet_path
filePath = "PD_Commercial_Train.csv"
os.environ['MLFLOW_TRACKING_URI'] = 'http://localhost:5000'
os.environ['GIT_PYTHON_REFRESH'] = 'quiet'
df_credit = pd.read_csv(filePath)
cols = df_credit.columns
data = df_credit[cols].apply(pd.to_numeric, errors='coerce')
data = data.fillna(0)
X = data.drop(["PD"], axis=1)
y = data[["PD"]]
import mlflow
import mlflow.sklearn
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
if __name__ == "__main__":
warnings.filterwarnings("ignore")
np.random.seed(40)
mlflow.set_experiment('riskAnalysis')
train, test = train_test_split(data)
train_x = train.drop(["PD"], axis=1)
test_x = test.drop(["PD"], axis=1)
train_y = train[["PD"]]
test_y = test[["PD"]]
alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.05
l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.05
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
lr.fit(train_x, train_y)
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print(" RMSE: %s" % rmse)
print(" MAE: %s" % mae)
print(" R2: %s" % r2)
mlflow.log_param("alpha", alpha)
mlflow.log_param("l1_ratio", l1_ratio)
mlflow.log_metric("rmse", rmse)
mlflow.log_metric("r2", r2)
mlflow.log_metric("mae", mae)
mlflow.sklearn.log_model(lr, "model")
| true | true |
f7fcec19a413a64c81802ce6524bb5662bb070fb | 517 | py | Python | hashcode/10052.py | noritersand/python-lab | 9238d62cb2d8b574a8d3697ac7d8b51f1251bc4b | [
"Unlicense"
] | null | null | null | hashcode/10052.py | noritersand/python-lab | 9238d62cb2d8b574a8d3697ac7d8b51f1251bc4b | [
"Unlicense"
] | null | null | null | hashcode/10052.py | noritersand/python-lab | 9238d62cb2d8b574a8d3697ac7d8b51f1251bc4b | [
"Unlicense"
] | null | null | null | n=4
m=3
k = {}
a=1
b=0
c=0
while c< m*n:
for i in range(abs(m)):
b=b+1
c=c+1
k[(a,b)] = c
if c==m*n:
break
for i in range(abs(n-1)):
a=a+1
c=c+1
k[(a,b)] = c
if c==m*n:
break
for i in range(abs(m-1)):
b=b-1
c=c+1
k[(a,b)] = c
if c==m*n:
break
for i in range(abs(n-2)):
a= a-1
c=c+1
k[(a,b)] = c
if c==m*n:
break
print(k) | 13.972973 | 29 | 0.324952 | n=4
m=3
k = {}
a=1
b=0
c=0
while c< m*n:
for i in range(abs(m)):
b=b+1
c=c+1
k[(a,b)] = c
if c==m*n:
break
for i in range(abs(n-1)):
a=a+1
c=c+1
k[(a,b)] = c
if c==m*n:
break
for i in range(abs(m-1)):
b=b-1
c=c+1
k[(a,b)] = c
if c==m*n:
break
for i in range(abs(n-2)):
a= a-1
c=c+1
k[(a,b)] = c
if c==m*n:
break
print(k) | true | true |
f7fceee9e379c3f537a25770001b3fbabff0277d | 717,715 | py | Python | hydrus/client/db/ClientDB.py | Asday/hydrus | a09ab839633661f446612a92b680bb8118a46b39 | [
"WTFPL"
] | null | null | null | hydrus/client/db/ClientDB.py | Asday/hydrus | a09ab839633661f446612a92b680bb8118a46b39 | [
"WTFPL"
] | null | null | null | hydrus/client/db/ClientDB.py | Asday/hydrus | a09ab839633661f446612a92b680bb8118a46b39 | [
"WTFPL"
] | null | null | null | import collections
import hashlib
import itertools
import os
import random
import re
import sqlite3
import time
import traceback
import typing
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusDB
from hydrus.core import HydrusDBBase
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusPaths
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.core.networking import HydrusNetwork
from hydrus.client import ClientAPI
from hydrus.client import ClientApplicationCommand as CAC
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientData
from hydrus.client import ClientDefaults
from hydrus.client import ClientFiles
from hydrus.client import ClientLocation
from hydrus.client import ClientOptions
from hydrus.client import ClientSearch
from hydrus.client import ClientServices
from hydrus.client import ClientThreading
from hydrus.client.db import ClientDBDefinitionsCache
from hydrus.client.db import ClientDBFilesDuplicates
from hydrus.client.db import ClientDBFilesMaintenance
from hydrus.client.db import ClientDBFilesMaintenanceQueue
from hydrus.client.db import ClientDBFilesMetadataBasic
from hydrus.client.db import ClientDBFilesStorage
from hydrus.client.db import ClientDBMaintenance
from hydrus.client.db import ClientDBMappingsCacheSpecificDisplay
from hydrus.client.db import ClientDBMappingsCounts
from hydrus.client.db import ClientDBMappingsCountsUpdate
from hydrus.client.db import ClientDBMappingsStorage
from hydrus.client.db import ClientDBMaster
from hydrus.client.db import ClientDBRepositories
from hydrus.client.db import ClientDBSerialisable
from hydrus.client.db import ClientDBServices
from hydrus.client.db import ClientDBSimilarFiles
from hydrus.client.db import ClientDBTagDisplay
from hydrus.client.db import ClientDBTagParents
from hydrus.client.db import ClientDBTagSearch
from hydrus.client.db import ClientDBTagSiblings
from hydrus.client.importing import ClientImportFiles
from hydrus.client.media import ClientMedia
from hydrus.client.media import ClientMediaManagers
from hydrus.client.media import ClientMediaResult
from hydrus.client.media import ClientMediaResultCache
from hydrus.client.metadata import ClientTags
from hydrus.client.metadata import ClientTagsHandling
from hydrus.client.networking import ClientNetworkingBandwidth
from hydrus.client.networking import ClientNetworkingDomain
from hydrus.client.networking import ClientNetworkingFunctions
from hydrus.client.networking import ClientNetworkingLogin
from hydrus.client.networking import ClientNetworkingSessions
from hydrus.client.importing import ClientImportSubscriptionLegacy
from hydrus.client.networking import ClientNetworkingSessionsLegacy
from hydrus.client.networking import ClientNetworkingBandwidthLegacy
#
# 𝓑𝓵𝓮𝓼𝓼𝓲𝓷𝓰𝓼 𝓸𝓯 𝓽𝓱𝓮 𝓢𝓱𝓻𝓲𝓷𝓮 𝓸𝓷 𝓽𝓱𝓲𝓼 𝓗𝓮𝓵𝓵 𝓒𝓸𝓭𝓮
# RESOLVE INCIDENT
#
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▒█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██ █▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒░▒▓▓▓░ █▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▒ ░▓▓▓ ▒█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▓▒ ▓▓▓▓ ▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▓▓▒▒▒▒▒▓ ▓▓▓▓ ▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▒█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ ░▓░ ▓▓▓▓▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓▓█▒ ▓▓▓█ ▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓░ ▓░ ▓▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓ ▓▓▓░ ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ ▒▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▒▒▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▓ ▓▓▓ ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█ ▒█▓░▒▓▒▒▒▒▓▓▓█▓████████████▓▓▓▓▓▒▒▒▓ ▒▓▓▓ ▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ ░█▓ ░▓▓█████████▓███▓█▓███████▓▓▓▓▓ ░▓▓█ █▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓█▓▓▓▓▓▓▓▓▓▓▓▓▓█▒▒█▓▓▓▓▓▓▓▓▓▓ ▓▓ ░██████▓███▓█████▓▓▓▓▓█████▓▓▓▒ ▓▓▓▒ ▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒███▓▓▓▓▓▓▓▓▓▓▓████▓█▓▓▓▓▓▓▓▓▓▓█░▓▓███▓▓▓█▓█▓▓▓█▓█▓███▓▓▓▓▓▓██████▓ ▓▓▓ ▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▒▓▓▓█▒▓▓▒▓▓▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒██████▓▓▓▓▓████▓▓█▓▓██▓▓▓▓▓▓██▓███ ▓█ ██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓ ▒███▒█▒▓█▓▓███▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓█▓▓██▓▓▓▓▓▓▓▓██▓▓▓▓█▓░▒▒▒▓▓█████ ▒█ ▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓░▓██▓▒█▓████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███▓▓▓█▓▓██▓▓▓▓▓▓▓▓▓█▓▓▓▓█░ ▓▓▓▓█████▓▓▓░ █▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▒▓██▓▒█▓▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓▓▓▓▓▓▓▓▓██▓▓▓▓▓▒▒▒▓▒ ▒▓▓░▓▓▓▓▓█████▓▓▒ ▓▓▓▒▓▓ ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓███▓███▓▓▓▒█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓█▓█▓▓█▓▓▓▓███▓▒▒▒▒░░▓▓▓▓█▓▓▓▓▓███████▓▓░██▓▓▓▓▒ ▒▓ ▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▒▓█▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▓▓▓█▓▓▓▓▒▒▓██▓▓▒▓▓▓▓████▓▓▓▓▓██▓▓███▒ ▒█▓▒░░ ▓▓ ▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒██▒▓▓█▓█▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓▓█▓█▓▒▓█▓▓▓▓▓▓▓▓██████▓▓███▓▓▓▓█████▓█▓ ▓ ░▒▓▓▒ ▒█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓▓█▓▓█▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓▓█▓▓█▓▓▓▓▓▓██▓██████████████▓▓▓███▓▓▓█░░█░▒▓▓░▒▒ ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▒▓██▓█▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒██▓▓█▓▓▓██▓▓▓▓░▓█▓▒▓███████████▓▓▓███▓▓▓█▓▒▒▓▒ ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓█▒▓██▓▓█▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓███ ▓███░▒▒ ▓▓▒ ░░▒░░▓█▓▓██▓▓▓▓█▓▓▒ ▒█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓██▓▓███▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓██▓███ ███ ▒ ▒▒░░▓▓▒██ ██████▓▓▓█░▒▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓▓██▓█▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓█▒ ░██▓ ░▒▒▓█████▓ █▓█▓██▓▓▓█▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒██▓▓██▓▒█▒█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓▒▓ ░ ▒▒ ▒ ░█▓▒ ▒ ░░▒█▓▓█████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓███▓███▒█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓██▒ ▒▓▓▒ ░▓▒▒██▓▓███▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓░▓▓█░▓█▒▓█▓███▓▓▒▓▓▓▓▓▓▓▒▓██▒▓████ ▒▓░▒█▓▓█▓██▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓▓██▓░█▓█▓▒▒▒▓▓██▓▓▒▓▓▓▓▓▒▓██▒ ▓░ ▒▓▒▓█▓███▓▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓▒▓▓█████▓▓▓██▒▓█▓█▓▓▓▓▒▒██▓▓▓▓▓▓▓▓▒▓█▓ ▒▓▒▓█▓▓█▓█▓▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▒░▒▓▓███▓▓██▓▓▓▓█▓▓█▓██▓█▓▓▒▓█▓▓▓▓▓▓▓▓▓▓▓▓▒ ░ ▓▓▒▓█▒██▓▓▓▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓▓█████▓▒▓▓▓█▓▓▓▓██▒█▓▓███▓▓▓▒██▓▓▓▓▓▓▓▓▓▓▓▓░ ▓█▒░▒▒▓██▓█▓▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█████▓▓ ▓▓██▓▓▓██▒▓█▓█▓▒▓▓▓▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓░ ░░ ░▒█▒▒▒░▒▓█▓▓▓▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▓█▓▓▓ ▒██▓▓▓▓█▓▒██▓▓▒▓▓▓▓▒██▓▓▓▓▓▓▓▓▓▓▓▓█▓ ░▓░░ ░███▓██▓▓▓▓▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒██▓▓▓░▓██▓▓▓▓██░▓█▓▓▓▓▓▓▓▒▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒ ░▓▒ ░ ▓███▓██▓█▓▓▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓█▓█▓▒▓██▓▓▓██▓▒█▓▓▓▓▓▓▓▓▒██▓▒▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▓░ ▓█▓ █▓▓█▓█▓▓█▓▓▓██▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██ ░██▓▓▓▓█▓▒▓█▓▓▒▓▓▓▓▒▓█▓▓▓▓▓▓▓▓▓▒███▓▒▓▓▓▓███▓░ █▓▓█▓█▓▓█▓▓▓██▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓ █▓░ ░█▓▓▓▓██▓▓██▓▓▒▓▓▓▓▒██▓▓▓▓▓▓▓▓▒▓█▓▓▓▒▓▓▓▓▓░ ░█▒▓█▓█▓▓▓█▓▓▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█░ ░ ███ ██▓▓▓██▓▒██▓▓▒▓▓▓▓▒▓██▓▓▓▓▓▓▓▒▓█▓▓▓▒▒▓▓█▓ █▓██▒█▓▓▓▓█▓▓▓█▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒ ░ ███ ▓█▓▓▓▓██▒▓█▓▓▓▓▓▓▓▓▒██▓▒▓▓▓▓▓▓▓██▓█▒▓▓█▓░ █▓██▒▓██████▓▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█ ▓ ▓█ ░█▓▓▓▓██▓▓██▓▓▒▓▓▓▓▒▓█▓▓▒▓▓▓▓▓░▓███▓▓█░ █▓█▓▓▓▓▓█▓░███▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓█ ▓▒ ██▒ ▒████▓███▒▓█▓▓▓▒▓▓▓▒▓██▓▓▓▓▓▓▓▒▒███▓▓▒ ▒ ▓███▓▓▓▓▓ ░░▓▓██▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██▓▓█▓██ ▓█▓▓▓▓▓██▓▓██▓▓▒▒▓▒▒▒▓██▓▒ ▓█▓██ ░ ▓▒▓██▓▓▓▒ ░ ██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓██▓█████▓ ▓██▓█████▓▓▓█▓▓▓▓▓▓▓▓█▒██ █░▒▓▓▓█ ▓▒▓██▓▒░ ▒▒ █▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓████▓ ▓█████████▓▓██▓▓▓▓▓█▓▓▓██▒ █▓ ▓▒▓▒ ▓▓▓█▓ ▒▓ ▒█▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒██▓▒▓░ ▒███▓█████▓▓███▓▓▓▓▓█████▓ ▓▓▓░ ▓▒▓▒ ▒▓▓▓▒ ▓▓▓█▒ ▓█▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓▓▒ ███▓▓█████▓▓████▓▓▓███▓░ ▓▓▓█▓ ▓▓▓ ▓█▒░ ▒▒▓▓▓█ ██▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▒▓▒▓▓▓▓▓▓▓▓▓▓█▓ ▒███▓█████▓▓▓█▓▓▓███▓ ▓▓▓▓▓ ▒▓▓ ▓▓▒ ▒▓▒█▓▓▒▓▓ ▓█▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▒▒█▓▒▓▒▓▓▓▓▓▓▓▓█▒ ███▓▓█▒██▓▓█▓███▓▓▓░ ▓▓▒▓▒▓▓█ ▓▒ ░▓▓░ ▒█▓▓▓▒▒▓▓▓ ▒█▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▒▒▓▓▓▓▓▒▓▓▒▒▒▓▓▓▓▓ ▒██▓█▒▒▓██▒████▓▒▒▓ ▓▓▒▓▒▒▒▓▓▓ ▒▒ ▒▓░▓▒█▓▓▒▒▒▒▒▒▒▓▒ █▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▒▒▓█▓▓▓▓▓▓▓▓▓▓▒▓▒▒▓▒ ▓███▓▓▓██░▓▓██▓▒▓▒ ▓▓▒▒▒▒▒▒▓▓▓█▓░ ▒█▓▓▓▓▓▒▒▒▒▒▒▒▒▓▒ ░░ ▓▓▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▓▒▓▒▓▒▒▓█▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓░ ▒█▓▓█▓▒██░░ ▒██▒▓ ░▓▒▒▒▒▒▒▒▒▓▓▓▓▓█▓▓█▓▓▒▒▒▒▒▒▒▒▒▒▒▓░ ░▒▓▓ █▓▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓
# ▒▓▓▓▒▒▓█▓▓▓█▓▓▓▓▓▓▒▓▓▓▓▓▓▓██████████▓██▒▓█▓▓ ██▓▓ ▓▒▒▒▒▒▒▒▒▒▒▓▓▓░▒▓▒▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓░░▒▒░▒▓ ▒███▒▓▓▓▒▓▓▓▓▓▓▓▒▓▓▓
# ▓▓▓▒▒▓█▓▓████▓▓▓▓▓▓▓▓▓▓▓▓█▓▓███████▓▒▓█▓▓██▒▒ ▓██ ▒▓▒▒▒▒▒▒▒▒▒▒▓▓░ ▒▒░▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓▒ ░░░░█▓ ▓█▒▒▓▓▓▓▒▓▓▓▓▓▓▓▓▓▓
# ▒▓▒▒▓███████████▓▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▓▓██▒▓█▓▒██▒▓░▒██ ▓▓▒▒▒▒▒▒▒▒▒▒▓▒ ▒▒░▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓▓ ░░▒▓▓░ ▒░▒ ▒▓▒▓▒▓▒▓▒▓▒▓▒▓▒▓
# ▓▒▓▒▓▓▓▓███████▓█▓██▓▓█▓▓▓▓▓▓▓▓▓█▓██▓▓██▒▓█▓▒▓▓██░ ▒▓▒▓▒▓▓▓▒▒▓▓▓ ░░▒░ ▒▓▒▒▒▒▒▒▒▒▒▒▒▒▓▓ ░ ▓▓▓▓ ▒ ▒ ▒▓▓▒▓▒▓▒▓▓▓▒▓▒▓▒
# ▒▓▒▓▒▒▒▒▒▓▓██████████▓▓▓▓▓▓█▓█▓█▓███▓▓▓█▓▒██▒▓█▒██ ░█▓▓▓▓▓▓▓▓▓▓ ▒▒▒░ ▒▓▒▒▒▒▒▒▒▒▒▒▒▓▓▓ ░░▒▓▒▓█▒ ░ ██▒▓▒▓▒▓▒▓▒▓▒▓▒▓
# ▓▒▓▒▓▒▓▒▒▒▒▒▒▒▓▓█████████▓▓██████████▓▓█▓▒▓██▓█▒▓█░ ▓▓▓▓▓▓▓▓▓█▒ ▒▒▓░▒ ░█▓▓▓▒▓▓▓▓▓▓▓▒▓▓▒ ▒▓▓▒▓▒░ ░▒█▒ ▓▒▓▓▓▒▓▒▓▒▓▒▓▒▓▒
# ▒▓▒▓▒▓▒▓▒▓▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓███████▓▓██▓▓▓█▒▒██▓▓▓▓█▓ ░█▓▓▓▓▓▓▓▓ ▒▒▒ ▒ █▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ ▒▓▒▓▒▓▓▓▓▓░▓█▓▒ ▒▓▒▓▒▓▒▓▒▓▒▓▒▓
# ▓▒▓▒▓▒▓▒▓▒▒▒▓▒▓▒▒▒▒▒▒▒▒▒▒▒▒▒▓ ░▓▒██▓▓▓▓▒▓█▓▓█▓▓█ ░▓▓▓▓▓▓▓█░ ░▒▒▒ ▒ █▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▒▒▓▒▒▒▒▓▓▓▒░ ░ ▓▓▒▓▒▓▒▓▒▓▒▒▒
# ▒▓▒▓▒▒▒▒▒▒▒▒▒▒▒▓▒▒▒▒▒▒▒▒▒▒▒▒▓▒ ▒░ ██▓██▒▓██▓█▓▒█▒░▓▓▓▓▓▓▓▓ ░▓▓▒ ▒ ▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▓▒ ▓▓▒▒▒▓▒▒▒▓▒▒
# ▓▒▓▒▓▒▓▒▒▒▒▒▒▒▓▒▒▒▒▒▓▒▒▒▓▒▒▒▓▓░░░ ▓██▓█▓▓██▓▓█▒█▓▒▓▓▓▓▓▓▓░ ░▓▒░ ▒ ▒▒▒█▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▒▒▒▒▒▒▓▓▒ ░▓▓▒▒▒▒▒▒▒▓▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒ ░ ██▓▓█▒▓█▓▒█▓▓█▓▓▓▓▓▓▓▓░ ░▓▓ ▒░ ▒▒ ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▓▒▓▒▓▓▒ ░░░ ░▓▓▒▒▒▓▒▒▒▒
# ▓▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒▒▒▓▒▒▒▒▒▒▒▒▒▒▒▓ ░░ ▓██▓█▒▓██▓▓▓▓█▓▓▓▓▓▓▓▓██▒░▒▒ ▓▒ ░▓ ░█▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▓▒▒▒▓▒▓▓ ░░░░ ▒▓▓▒▒▒▒▒▓▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓ ░░ ░██▓▓▓▒██▓▓█▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▒ ▒▒ ▓░ ▓▓▓▓▓▓▓▓▓▓▓█▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒ ░ ▒▓▓▒▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒▒▒▒▓▓ ░░ ██▓▓█▒▓██▓██▓▓▓▓▓▓▓▓▓▓▓▓▓█▒ ▒▓ ░▒ ▓▓▓▓▓▓▓▓▓█▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓ ░ ░░░ ▒▓▒▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓░ ▓██▓▓▓▒██▓▓▓▓▓▓▓▓▒▒▒▓▓▓▓▓▓░ ░▓▒▓▓▓░▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒ ░░ ░░ ▒▓▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒ ▓█▓█▓▓█▒███▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▓▓▓▒▒▓█▓█▓▓█▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓ ░░░ ░░ ▓▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓▒▓█▓█▓▓██▓▒▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓▓▓▓▓▒▒▒▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▓▓ ░ ░░░░░ ▓▓▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓██▒▒▓▓█▓▓▒▓██▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▒▓▒ ▒░ ░ ░░░░░ ▓▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██▓▒▓▓▓▒▓▓▓█▓▒▒▓▓▓▒▓▒▒▒▓▒▒▒▒▓▓▒▒▒▓▓▓▓▓▓▓▒▒▒▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒▒▓░ ██▓ ░ ░░░░ ▒▓▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▓██▒▓▓▓█▓▓██▓▒▓▓▓▓▓▒▒▒▒▒▒▒▒▒▓▒▓▓▓▓▓▒▓▒▓▓▒▓▓▓▓▓▒▓▓▒▒▒▒▒▒▒▒▒▓░▓█▒▒░▒ ░ ░░░░ ▒▓▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██▓▓▓▓▓███▓▒▓▒▓▒▓▒▓▒▒▒▒▒▒▒▒▓▓▒▓▒▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▓▓██▒ ▒░ ░░░ ▓▒▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒███▓▒▓██▓█▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▒▓▓▓▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▒█░ ▒ ░░ ░▓▒▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓██▓▓▓▒▒▓▓▓▓▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒▒▓▓ ▒░▓░ ░░ ▒▓▒▒▒▒▒▒
# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░▒▒░░▓▓▒▓▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▓ ░▒▒▒▒ ▓████▒ ▒▒▒▒▒▒▒▒
def BlockingSafeShowMessage( message ):
HydrusData.DebugPrint( message )
HG.client_controller.CallBlockingToQt( HG.client_controller.app, QW.QMessageBox.warning, None, 'Warning', message )
def report_content_speed_to_job_key( job_key, rows_done, total_rows, precise_timestamp, num_rows, row_name ):
it_took = HydrusData.GetNowPrecise() - precise_timestamp
rows_s = HydrusData.ToHumanInt( int( num_rows / it_took ) )
popup_message = 'content row ' + HydrusData.ConvertValueRangeToPrettyString( rows_done, total_rows ) + ': processing ' + row_name + ' at ' + rows_s + ' rows/s'
HG.client_controller.frame_splash_status.SetText( popup_message, print_to_log = False )
job_key.SetVariable( 'popup_text_2', popup_message )
def report_speed_to_job_key( job_key, precise_timestamp, num_rows, row_name ):
it_took = HydrusData.GetNowPrecise() - precise_timestamp
rows_s = HydrusData.ToHumanInt( int( num_rows / it_took ) )
popup_message = 'processing ' + row_name + ' at ' + rows_s + ' rows/s'
HG.client_controller.frame_splash_status.SetText( popup_message, print_to_log = False )
job_key.SetVariable( 'popup_text_2', popup_message )
def report_speed_to_log( precise_timestamp, num_rows, row_name ):
if num_rows == 0:
return
it_took = HydrusData.GetNowPrecise() - precise_timestamp
rows_s = HydrusData.ToHumanInt( int( num_rows / it_took ) )
summary = 'processed ' + HydrusData.ToHumanInt( num_rows ) + ' ' + row_name + ' at ' + rows_s + ' rows/s'
HydrusData.Print( summary )
class FilteredHashesGenerator( object ):
def __init__( self, file_service_ids_to_valid_hash_ids ):
self._file_service_ids_to_valid_hash_ids = file_service_ids_to_valid_hash_ids
def GetHashes( self, file_service_id, hash_ids ):
return self._file_service_ids_to_valid_hash_ids[ file_service_id ].intersection( hash_ids )
def IterateHashes( self, hash_ids ):
for ( file_service_id, valid_hash_ids ) in self._file_service_ids_to_valid_hash_ids.items():
if len( valid_hash_ids ) == 0:
continue
filtered_hash_ids = valid_hash_ids.intersection( hash_ids )
if len( filtered_hash_ids ) == 0:
continue
yield ( file_service_id, filtered_hash_ids )
class FilteredMappingsGenerator( object ):
def __init__( self, file_service_ids_to_valid_hash_ids, mappings_ids ):
self._file_service_ids_to_valid_hash_ids = file_service_ids_to_valid_hash_ids
self._mappings_ids = mappings_ids
def IterateMappings( self, file_service_id ):
valid_hash_ids = self._file_service_ids_to_valid_hash_ids[ file_service_id ]
if len( valid_hash_ids ) > 0:
for ( tag_id, hash_ids ) in self._mappings_ids:
hash_ids = valid_hash_ids.intersection( hash_ids )
if len( hash_ids ) == 0:
continue
yield ( tag_id, hash_ids )
class JobDatabaseClient( HydrusData.JobDatabase ):
def _DoDelayedResultRelief( self ):
if HG.db_ui_hang_relief_mode:
if QC.QThread.currentThread() == HG.client_controller.main_qt_thread:
HydrusData.Print( 'ui-hang event processing: begin' )
QW.QApplication.instance().processEvents()
HydrusData.Print( 'ui-hang event processing: end' )
class DB( HydrusDB.HydrusDB ):
READ_WRITE_ACTIONS = [ 'service_info', 'system_predicates', 'missing_thumbnail_hashes' ]
def __init__( self, controller, db_dir, db_name ):
self._initial_messages = []
self._have_printed_a_cannot_vacuum_message = False
self._weakref_media_result_cache = ClientMediaResultCache.MediaResultCache()
self._after_job_content_update_jobs = []
self._regen_tags_managers_hash_ids = set()
self._regen_tags_managers_tag_ids = set()
HydrusDB.HydrusDB.__init__( self, controller, db_dir, db_name )
def _AddFiles( self, service_id, rows ):
hash_ids = { row[0] for row in rows }
existing_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, hash_ids, HC.CONTENT_STATUS_CURRENT )
new_hash_ids = hash_ids.difference( existing_hash_ids )
if len( new_hash_ids ) > 0:
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
valid_rows = [ ( hash_id, timestamp ) for ( hash_id, timestamp ) in rows if hash_id in new_hash_ids ]
# if we are adding to a local file domain, either an import or an undelete, remove any from the trash and add to combined local file service if needed
if service_type == HC.LOCAL_FILE_DOMAIN:
self._DeleteFiles( self.modules_services.trash_service_id, new_hash_ids )
self._AddFiles( self.modules_services.combined_local_file_service_id, valid_rows )
# insert the files
pending_changed = self.modules_files_storage.AddFiles( service_id, valid_rows )
if pending_changed:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
delta_size = self.modules_files_metadata_basic.GetTotalSize( new_hash_ids )
num_viewable_files = self.modules_files_metadata_basic.GetNumViewable( new_hash_ids )
num_files = len( new_hash_ids )
num_inbox = len( new_hash_ids.intersection( self.modules_files_metadata_basic.inbox_hash_ids ) )
service_info_updates = []
service_info_updates.append( ( delta_size, service_id, HC.SERVICE_INFO_TOTAL_SIZE ) )
service_info_updates.append( ( num_viewable_files, service_id, HC.SERVICE_INFO_NUM_VIEWABLE_FILES ) )
service_info_updates.append( ( num_files, service_id, HC.SERVICE_INFO_NUM_FILES ) )
service_info_updates.append( ( num_inbox, service_id, HC.SERVICE_INFO_NUM_INBOX ) )
# remove any records of previous deletion
if service_id != self.modules_services.trash_service_id:
num_deleted = self.modules_files_storage.ClearDeleteRecord( service_id, new_hash_ids )
service_info_updates.append( ( -num_deleted, service_id, HC.SERVICE_INFO_NUM_DELETED_FILES ) )
# if entering the combined local domain, update the hash cache
if service_id == self.modules_services.combined_local_file_service_id:
self.modules_hashes_local_cache.AddHashIdsToCache( new_hash_ids )
# if adding an update file, repo manager wants to know
if service_id == self.modules_services.local_update_service_id:
self.modules_repositories.NotifyUpdatesImported( new_hash_ids )
# if we track tags for this service, update the a/c cache
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
with self._MakeTemporaryIntegerTable( new_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsAddFiles( service_id, tag_service_id, new_hash_ids, temp_hash_id_table_name )
self.modules_mappings_cache_specific_display.AddFiles( service_id, tag_service_id, new_hash_ids, temp_hash_id_table_name )
# now update the combined deleted files service
if service_type in HC.FILE_SERVICES_COVERED_BY_COMBINED_DELETED_FILE:
location_context = self.modules_files_storage.GetLocationContextForAllServicesDeletedFiles()
still_deleted_hash_ids = self.modules_files_storage.FilterHashIds( location_context, new_hash_ids )
no_longer_deleted_hash_ids = new_hash_ids.difference( still_deleted_hash_ids )
self._DeleteFiles( self.modules_services.combined_deleted_file_service_id, no_longer_deleted_hash_ids )
# push the service updates, done
self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', service_info_updates )
def _AddService( self, service_key, service_type, name, dictionary ):
name = self.modules_services.GetNonDupeName( name )
service_id = self.modules_services.AddService( service_key, service_type, name, dictionary )
self._AddServiceCreateFiles( service_id, service_type )
if service_type in HC.REPOSITORIES:
self.modules_repositories.GenerateRepositoryTables( service_id )
if service_type in HC.REAL_TAG_SERVICES:
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
for file_service_id in file_service_ids:
self.modules_tag_search.Generate( file_service_id, service_id )
self.modules_tag_parents.Generate( service_id )
self.modules_tag_siblings.Generate( service_id )
self._AddServiceCreateMappings( service_id, service_type )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self.modules_tag_search.Generate( service_id, tag_service_id )
def _AddServiceCreateFiles( self, service_id, service_type ):
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
self.modules_files_storage.GenerateFilesTables( service_id )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsGenerate( service_id, tag_service_id )
def _AddServiceCreateMappings( self, service_id, service_type ):
if service_type in HC.REAL_TAG_SERVICES:
self.modules_mappings_storage.GenerateMappingsTables( service_id )
self._CacheCombinedFilesMappingsGenerate( service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for file_service_id in file_service_ids:
self._CacheSpecificMappingsGenerate( file_service_id, service_id )
def _ArchiveFiles( self, hash_ids ):
hash_ids_archived = self.modules_files_metadata_basic.ArchiveFiles( hash_ids )
if len( hash_ids_archived ) > 0:
service_ids_to_counts = self.modules_files_storage.GetServiceIdCounts( hash_ids_archived )
update_rows = list( service_ids_to_counts.items() )
self._ExecuteMany( 'UPDATE service_info SET info = info - ? WHERE service_id = ? AND info_type = ?;', [ ( count, service_id, HC.SERVICE_INFO_NUM_INBOX ) for ( service_id, count ) in update_rows ] )
def _Backup( self, path ):
self._CloseDBConnection()
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'backing up db' )
self._controller.pub( 'modal_message', job_key )
job_key.SetVariable( 'popup_text_1', 'closing db' )
HydrusPaths.MakeSureDirectoryExists( path )
for filename in self._db_filenames.values():
if job_key.IsCancelled():
break
job_key.SetVariable( 'popup_text_1', 'copying ' + filename )
source = os.path.join( self._db_dir, filename )
dest = os.path.join( path, filename )
HydrusPaths.MirrorFile( source, dest )
additional_filenames = self._GetPossibleAdditionalDBFilenames()
for additional_filename in additional_filenames:
source = os.path.join( self._db_dir, additional_filename )
dest = os.path.join( path, additional_filename )
if os.path.exists( source ):
HydrusPaths.MirrorFile( source, dest )
def is_cancelled_hook():
return job_key.IsCancelled()
def text_update_hook( text ):
job_key.SetVariable( 'popup_text_1', text )
client_files_default = os.path.join( self._db_dir, 'client_files' )
if os.path.exists( client_files_default ):
HydrusPaths.MirrorTree( client_files_default, os.path.join( path, 'client_files' ), text_update_hook = text_update_hook, is_cancelled_hook = is_cancelled_hook )
finally:
self._InitDBConnection()
job_key.SetVariable( 'popup_text_1', 'backup complete!' )
job_key.Finish()
def _CacheCombinedFilesDisplayMappingsAddImplications( self, tag_service_id, implication_tag_ids, tag_id, status_hook = None ):
if len( implication_tag_ids ) == 0:
return
remaining_implication_tag_ids = set( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_id ) ).difference( implication_tag_ids )
( current_delta, pending_delta ) = self._GetWithAndWithoutTagsFileCountCombined( tag_service_id, implication_tag_ids, remaining_implication_tag_ids )
if current_delta > 0 or pending_delta > 0:
counts_cache_changes = ( ( tag_id, current_delta, pending_delta ), )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsAddMappingsForChained( self, tag_service_id, storage_tag_id, hash_ids ):
ac_current_counts = collections.Counter()
ac_pending_counts = collections.Counter()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
display_tag_ids = self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, storage_tag_id )
display_tag_ids_to_implied_by_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, display_tag_ids, tags_are_ideal = True )
file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, temp_hash_ids_table_name )
for ( display_tag_id, implied_by_tag_ids ) in display_tag_ids_to_implied_by_tag_ids.items():
other_implied_by_tag_ids = set( implied_by_tag_ids )
other_implied_by_tag_ids.discard( storage_tag_id )
# get the count of pending that are tagged by storage_tag_id but not tagged by any of the other implied_by
num_pending_to_be_rescinded = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_PENDING, tag_service_id, ( storage_tag_id, ), other_implied_by_tag_ids, hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
# get the count of current that already have any implication
num_non_addable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_CURRENT, tag_service_id, implied_by_tag_ids, set(), hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
num_addable = len( hash_ids ) - num_non_addable
if num_addable > 0:
ac_current_counts[ display_tag_id ] += num_addable
if num_pending_to_be_rescinded > 0:
ac_pending_counts[ display_tag_id ] += num_pending_to_be_rescinded
if len( ac_current_counts ) > 0:
counts_cache_changes = [ ( tag_id, current_delta, 0 ) for ( tag_id, current_delta ) in ac_current_counts.items() ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
if len( ac_pending_counts ) > 0:
counts_cache_changes = [ ( tag_id, 0, pending_delta ) for ( tag_id, pending_delta ) in ac_pending_counts.items() ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsDeleteImplications( self, tag_service_id, implication_tag_ids, tag_id, status_hook = None ):
if len( implication_tag_ids ) == 0:
return
remaining_implication_tag_ids = set( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_id ) ).difference( implication_tag_ids )
( current_delta, pending_delta ) = self._GetWithAndWithoutTagsFileCountCombined( tag_service_id, implication_tag_ids, remaining_implication_tag_ids )
if current_delta > 0 or pending_delta > 0:
counts_cache_changes = ( ( tag_id, current_delta, pending_delta ), )
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsDeleteMappingsForChained( self, tag_service_id, storage_tag_id, hash_ids ):
ac_counts = collections.Counter()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
display_tag_ids = self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, storage_tag_id )
display_tag_ids_to_implied_by_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, display_tag_ids, tags_are_ideal = True )
file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, temp_hash_ids_table_name )
for ( display_tag_id, implied_by_tag_ids ) in display_tag_ids_to_implied_by_tag_ids.items():
other_implied_by_tag_ids = set( implied_by_tag_ids )
other_implied_by_tag_ids.discard( storage_tag_id )
# get the count of current that are tagged by storage_tag_id but not tagged by any of the other implied_by
num_deletable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_CURRENT, tag_service_id, ( storage_tag_id, ), other_implied_by_tag_ids, hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
if num_deletable > 0:
ac_counts[ display_tag_id ] += num_deletable
if len( ac_counts ) > 0:
counts_cache_changes = [ ( tag_id, current_delta, 0 ) for ( tag_id, current_delta ) in ac_counts.items() ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsClear( self, tag_service_id, keep_pending = False ):
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, keep_pending = keep_pending )
def _CacheCombinedFilesDisplayMappingsDrop( self, tag_service_id ):
self.modules_mappings_counts.DropTables( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id )
def _CacheCombinedFilesDisplayMappingsGenerate( self, tag_service_id, status_hook = None ):
if status_hook is not None:
status_hook( 'copying storage counts' )
self.modules_mappings_counts.CreateTables( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, populate_from_storage = True )
def _CacheCombinedFilesDisplayMappingsPendMappingsForChained( self, tag_service_id, storage_tag_id, hash_ids ):
ac_counts = collections.Counter()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
display_tag_ids = self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, storage_tag_id )
display_tag_ids_to_implied_by_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, display_tag_ids, tags_are_ideal = True )
file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, temp_hash_ids_table_name )
for ( display_tag_id, implied_by_tag_ids ) in display_tag_ids_to_implied_by_tag_ids.items():
# get the count of current that are tagged by any of the implications
num_non_pendable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_PENDING, tag_service_id, implied_by_tag_ids, set(), hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
num_pendable = len( hash_ids ) - num_non_pendable
if num_pendable > 0:
ac_counts[ display_tag_id ] += num_pendable
if len( ac_counts ) > 0:
counts_cache_changes = [ ( tag_id, 0, pending_delta ) for ( tag_id, pending_delta ) in ac_counts.items() ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsRegeneratePending( self, tag_service_id, status_hook = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
if status_hook is not None:
message = 'clearing old combined display data'
status_hook( message )
all_pending_storage_tag_ids = self._STS( self._Execute( 'SELECT DISTINCT tag_id FROM {};'.format( pending_mappings_table_name ) ) )
storage_tag_ids_to_display_tag_ids = self.modules_tag_display.GetTagsToImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, all_pending_storage_tag_ids )
all_pending_display_tag_ids = set( itertools.chain.from_iterable( storage_tag_ids_to_display_tag_ids.values() ) )
del all_pending_storage_tag_ids
del storage_tag_ids_to_display_tag_ids
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, keep_current = True )
all_pending_display_tag_ids_to_implied_by_storage_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, all_pending_display_tag_ids, tags_are_ideal = True )
counts_cache_changes = []
num_to_do = len( all_pending_display_tag_ids_to_implied_by_storage_tag_ids )
for ( i, ( display_tag_id, storage_tag_ids ) ) in enumerate( all_pending_display_tag_ids_to_implied_by_storage_tag_ids.items() ):
if i % 100 == 0 and status_hook is not None:
message = 'regenerating pending tags {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
status_hook( message )
# we'll do these counts from raw tables, not 'get withandwithout count' cleverness, since this is a recovery function and other caches may be dodgy atm
if len( storage_tag_ids ) == 1:
( storage_tag_id, ) = storage_tag_ids
( pending_delta, ) = self._Execute( 'SELECT COUNT( DISTINCT hash_id ) FROM {} WHERE tag_id = ?;'.format( pending_mappings_table_name ), ( storage_tag_id, ) ).fetchone()
else:
with self._MakeTemporaryIntegerTable( storage_tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
# temp tags to mappings merged
( pending_delta, ) = self._Execute( 'SELECT COUNT( DISTINCT hash_id ) FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_tag_ids_table_name, pending_mappings_table_name ) ).fetchone()
counts_cache_changes.append( ( display_tag_id, 0, pending_delta ) )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsRescindPendingMappingsForChained( self, tag_service_id, storage_tag_id, hash_ids ):
ac_counts = collections.Counter()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
display_tag_ids = self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, storage_tag_id )
display_tag_ids_to_implied_by_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, display_tag_ids, tags_are_ideal = True )
file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, temp_hash_ids_table_name )
for ( display_tag_id, implied_by_tag_ids ) in display_tag_ids_to_implied_by_tag_ids.items():
other_implied_by_tag_ids = set( implied_by_tag_ids )
other_implied_by_tag_ids.discard( storage_tag_id )
# get the count of current that are tagged by storage_tag_id but not tagged by any of the other implications
num_rescindable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_PENDING, tag_service_id, ( storage_tag_id, ), other_implied_by_tag_ids, hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
if num_rescindable > 0:
ac_counts[ display_tag_id ] += num_rescindable
if len( ac_counts ) > 0:
counts_cache_changes = [ ( tag_id, 0, pending_delta ) for ( tag_id, pending_delta ) in ac_counts.items() ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesMappingsClear( self, tag_service_id, keep_pending = False ):
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, keep_pending = keep_pending )
self._CacheCombinedFilesDisplayMappingsClear( tag_service_id, keep_pending = keep_pending )
def _CacheCombinedFilesMappingsDrop( self, tag_service_id ):
self.modules_mappings_counts.DropTables( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id )
self._CacheCombinedFilesDisplayMappingsDrop( tag_service_id )
def _CacheCombinedFilesMappingsGenerate( self, tag_service_id ):
self.modules_mappings_counts.CreateTables( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id )
#
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
current_mappings_exist = self._Execute( 'SELECT 1 FROM ' + current_mappings_table_name + ' LIMIT 1;' ).fetchone() is not None
pending_mappings_exist = self._Execute( 'SELECT 1 FROM ' + pending_mappings_table_name + ' LIMIT 1;' ).fetchone() is not None
if current_mappings_exist or pending_mappings_exist: # not worth iterating through all known tags for an empty service
for ( group_of_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, 'SELECT tag_id FROM tags;', 10000 ): # must be a cleverer way of doing this
with self._MakeTemporaryIntegerTable( group_of_ids, 'tag_id' ) as temp_table_name:
current_counter = collections.Counter()
# temp tags to mappings
for ( tag_id, count ) in self._Execute( 'SELECT tag_id, COUNT( * ) FROM {} CROSS JOIN {} USING ( tag_id ) GROUP BY ( tag_id );'.format( temp_table_name, current_mappings_table_name ) ):
current_counter[ tag_id ] = count
pending_counter = collections.Counter()
# temp tags to mappings
for ( tag_id, count ) in self._Execute( 'SELECT tag_id, COUNT( * ) FROM {} CROSS JOIN {} USING ( tag_id ) GROUP BY ( tag_id );'.format( temp_table_name, pending_mappings_table_name ) ):
pending_counter[ tag_id ] = count
all_ids_seen = set( current_counter.keys() )
all_ids_seen.update( pending_counter.keys() )
counts_cache_changes = [ ( tag_id, current_counter[ tag_id ], pending_counter[ tag_id ] ) for tag_id in all_ids_seen ]
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
self._CacheCombinedFilesDisplayMappingsGenerate( tag_service_id )
def _CacheCombinedFilesMappingsRegeneratePending( self, tag_service_id, status_hook = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
if status_hook is not None:
message = 'clearing old combined display data'
status_hook( message )
all_pending_storage_tag_ids = self._STS( self._Execute( 'SELECT DISTINCT tag_id FROM {};'.format( pending_mappings_table_name ) ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, keep_current = True )
counts_cache_changes = []
num_to_do = len( all_pending_storage_tag_ids )
for ( i, storage_tag_id ) in enumerate( all_pending_storage_tag_ids ):
if i % 100 == 0 and status_hook is not None:
message = 'regenerating pending tags {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
status_hook( message )
( pending_delta, ) = self._Execute( 'SELECT COUNT( DISTINCT hash_id ) FROM {} WHERE tag_id = ?;'.format( pending_mappings_table_name ), ( storage_tag_id, ) ).fetchone()
counts_cache_changes.append( ( storage_tag_id, 0, pending_delta ) )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
self._CacheCombinedFilesDisplayMappingsRegeneratePending( tag_service_id, status_hook = status_hook )
def _CacheSpecificMappingsAddFiles( self, file_service_id, tag_service_id, hash_ids, hash_ids_table_name ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
# deleted don't have a/c counts to update, so we can do it all in one go here
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id, tag_id ) SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( cache_deleted_mappings_table_name, hash_ids_table_name, deleted_mappings_table_name ) )
# temp hashes to mappings
current_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, current_mappings_table_name ) ).fetchall()
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
# temp hashes to mappings
pending_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, pending_mappings_table_name ) ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
counts_cache_changes = []
for tag_id in all_ids_seen:
current_hash_ids = current_mapping_ids_dict[ tag_id ]
current_delta = len( current_hash_ids )
if current_delta > 0:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in current_hash_ids ) )
current_delta = self._GetRowCount()
#
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
pending_delta = len( pending_hash_ids )
if pending_delta > 0:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_pending_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in pending_hash_ids ) )
pending_delta = self._GetRowCount()
#
if current_delta > 0 or pending_delta > 0:
counts_cache_changes.append( ( tag_id, current_delta, pending_delta ) )
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def _CacheSpecificMappingsAddMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
# we have to interleave this into the iterator so that if two siblings with the same ideal are pend->currented at once, we remain logic consistent for soletag lookups!
self.modules_mappings_cache_specific_display.RescindPendingMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_pending_rescinded = self._GetRowCount()
#
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_current_inserted = self._GetRowCount()
#
self._ExecuteMany( 'DELETE FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
if num_current_inserted > 0:
counts_cache_changes = [ ( tag_id, num_current_inserted, 0 ) ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
if num_pending_rescinded > 0:
counts_cache_changes = [ ( tag_id, 0, num_pending_rescinded ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.AddMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
def _CacheSpecificMappingsClear( self, file_service_id, tag_service_id, keep_pending = False ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._Execute( 'DELETE FROM {};'.format( cache_current_mappings_table_name ) )
self._Execute( 'DELETE FROM {};'.format( cache_deleted_mappings_table_name ) )
if not keep_pending:
self._Execute( 'DELETE FROM {};'.format( cache_pending_mappings_table_name ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, keep_pending = keep_pending )
self.modules_mappings_cache_specific_display.Clear( file_service_id, tag_service_id, keep_pending = keep_pending )
def _CacheSpecificMappingsCreateTables( self, file_service_id, tag_service_id ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._Execute( 'CREATE TABLE IF NOT EXISTS ' + cache_current_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._Execute( 'CREATE TABLE IF NOT EXISTS ' + cache_deleted_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._Execute( 'CREATE TABLE IF NOT EXISTS ' + cache_pending_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._CreateIndex( cache_current_mappings_table_name, [ 'tag_id', 'hash_id' ], unique = True )
self._CreateIndex( cache_deleted_mappings_table_name, [ 'tag_id', 'hash_id' ], unique = True )
self._CreateIndex( cache_pending_mappings_table_name, [ 'tag_id', 'hash_id' ], unique = True )
self.modules_mappings_counts.CreateTables( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
def _CacheSpecificMappingsDrop( self, file_service_id, tag_service_id ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( cache_current_mappings_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( cache_deleted_mappings_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( cache_pending_mappings_table_name ) )
self.modules_mappings_counts.DropTables( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.Drop( file_service_id, tag_service_id )
def _CacheSpecificMappingsDeleteFiles( self, file_service_id, tag_service_id, hash_ids, hash_id_table_name ):
self.modules_mappings_cache_specific_display.DeleteFiles( file_service_id, tag_service_id, hash_ids, hash_id_table_name )
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
# temp hashes to mappings
deleted_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_deleted_mappings_table_name ) ).fetchall()
if len( deleted_mapping_ids_raw ) > 0:
self._ExecuteMany( 'DELETE FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( cache_deleted_mappings_table_name ), deleted_mapping_ids_raw )
# temp hashes to mappings
current_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_current_mappings_table_name ) ).fetchall()
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
# temp hashes to mappings
pending_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_pending_mappings_table_name ) ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
counts_cache_changes = []
for tag_id in all_ids_seen:
current_hash_ids = current_mapping_ids_dict[ tag_id ]
num_current = len( current_hash_ids )
#
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
num_pending = len( pending_hash_ids )
counts_cache_changes.append( ( tag_id, num_current, num_pending ) )
self._ExecuteMany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def _CacheSpecificMappingsDeleteMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.DeleteMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_deleted = self._GetRowCount()
#
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_deleted_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
if num_deleted > 0:
counts_cache_changes = [ ( tag_id, num_deleted, 0 ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def _CacheSpecificMappingsGenerate( self, file_service_id, tag_service_id ):
self._CacheSpecificMappingsCreateTables( file_service_id, tag_service_id )
#
hash_ids = self.modules_files_storage.GetCurrentHashIdsList( file_service_id )
BLOCK_SIZE = 10000
for ( i, block_of_hash_ids ) in enumerate( HydrusData.SplitListIntoChunks( hash_ids, BLOCK_SIZE ) ):
with self._MakeTemporaryIntegerTable( block_of_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, block_of_hash_ids, temp_hash_id_table_name )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self.modules_mappings_cache_specific_display.Generate( file_service_id, tag_service_id, populate_from_storage = True )
def _CacheSpecificMappingsGetFilteredHashesGenerator( self, file_service_ids, tag_service_id, hash_ids ):
file_service_ids_to_valid_hash_ids = collections.defaultdict( set )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_table_name:
for file_service_id in file_service_ids:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, temp_table_name, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
file_service_ids_to_valid_hash_ids[ file_service_id ] = valid_hash_ids
return FilteredHashesGenerator( file_service_ids_to_valid_hash_ids )
def _CacheSpecificMappingsGetFilteredMappingsGenerator( self, file_service_ids, tag_service_id, mappings_ids ):
all_hash_ids = set( itertools.chain.from_iterable( ( hash_ids for ( tag_id, hash_ids ) in mappings_ids ) ) )
file_service_ids_to_valid_hash_ids = collections.defaultdict( set )
with self._MakeTemporaryIntegerTable( all_hash_ids, 'hash_id' ) as temp_table_name:
for file_service_id in file_service_ids:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, temp_table_name, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
file_service_ids_to_valid_hash_ids[ file_service_id ] = valid_hash_ids
return FilteredMappingsGenerator( file_service_ids_to_valid_hash_ids, mappings_ids )
def _CacheSpecificMappingsPendMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_pending_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_added = self._GetRowCount()
if num_added > 0:
counts_cache_changes = [ ( tag_id, 0, num_added ) ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.PendMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
def _CacheSpecificMappingsRegeneratePending( self, file_service_id, tag_service_id, status_hook = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
if status_hook is not None:
message = 'clearing old specific data'
status_hook( message )
all_pending_storage_tag_ids = self._STS( self._Execute( 'SELECT DISTINCT tag_id FROM {};'.format( pending_mappings_table_name ) ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, keep_current = True )
self._Execute( 'DELETE FROM {};'.format( cache_pending_mappings_table_name ) )
counts_cache_changes = []
num_to_do = len( all_pending_storage_tag_ids )
select_table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, pending_mappings_table_name, HC.CONTENT_STATUS_CURRENT )
for ( i, storage_tag_id ) in enumerate( all_pending_storage_tag_ids ):
if i % 100 == 0 and status_hook is not None:
message = 'regenerating pending tags {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
status_hook( message )
self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, hash_id ) SELECT tag_id, hash_id FROM {} WHERE tag_id = ?;'.format( cache_pending_mappings_table_name, select_table_join ), ( storage_tag_id, ) )
pending_delta = self._GetRowCount()
counts_cache_changes.append( ( storage_tag_id, 0, pending_delta ) )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.RegeneratePending( file_service_id, tag_service_id, status_hook = status_hook )
def _CacheSpecificMappingsRescindPendingMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
ac_counts = collections.Counter()
self.modules_mappings_cache_specific_display.RescindPendingMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_deleted = self._GetRowCount()
if num_deleted > 0:
counts_cache_changes = [ ( tag_id, 0, num_deleted ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def _CacheTagDisplayForceFullSyncTagsOnSpecifics( self, tag_service_id, file_service_ids ):
# this assumes the caches are empty. it is a 'quick' force repopulation for emergency fill-in maintenance
tag_ids_in_dispute = set()
tag_ids_in_dispute.update( self.modules_tag_siblings.GetAllTagIds( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id ) )
tag_ids_in_dispute.update( self.modules_tag_parents.GetAllTagIds( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id ) )
for tag_id in tag_ids_in_dispute:
storage_implication_tag_ids = { tag_id }
actual_implication_tag_ids = self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_id )
add_implication_tag_ids = actual_implication_tag_ids.difference( storage_implication_tag_ids )
if len( add_implication_tag_ids ) > 0:
for file_service_id in file_service_ids:
self.modules_mappings_cache_specific_display.AddImplications( file_service_id, tag_service_id, add_implication_tag_ids, tag_id )
delete_implication_tag_ids = storage_implication_tag_ids.difference( actual_implication_tag_ids )
if len( delete_implication_tag_ids ) > 0:
for file_service_id in file_service_ids:
self.modules_mappings_cache_specific_display.DeleteImplications( file_service_id, tag_service_id, delete_implication_tag_ids, tag_id )
for block_of_tag_ids in HydrusData.SplitIteratorIntoChunks( tag_ids_in_dispute, 1024 ):
self._CacheTagsSyncTags( tag_service_id, block_of_tag_ids, just_these_file_service_ids = file_service_ids )
def _CacheTagDisplayGetApplicationStatusNumbers( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self.modules_tag_display.GetApplicationStatus( service_id )
status = {
'num_siblings_to_sync' : len( sibling_rows_to_add ) + len( sibling_rows_to_remove ),
'num_parents_to_sync' : len( parent_rows_to_add ) + len( parent_rows_to_remove ),
'num_actual_rows' : num_actual_rows,
'num_ideal_rows' : num_ideal_rows,
'waiting_on_tag_repos' : []
}
for ( applicable_service_ids, content_type ) in [
( self.modules_tag_parents.GetApplicableServiceIds( service_id ), HC.CONTENT_TYPE_TAG_PARENTS ),
( self.modules_tag_siblings.GetApplicableServiceIds( service_id ), HC.CONTENT_TYPE_TAG_SIBLINGS )
]:
for applicable_service_id in applicable_service_ids:
service = self.modules_services.GetService( applicable_service_id )
if service.GetServiceType() == HC.TAG_REPOSITORY:
if self.modules_repositories.HasLotsOfOutstandingLocalProcessing( applicable_service_id, ( content_type, ) ):
status[ 'waiting_on_tag_repos' ].append( 'waiting on {} for {} processing'.format( service.GetName(), HC.content_type_string_lookup[ content_type ] ) )
return status
def _CacheTagDisplaySync( self, service_key: bytes, work_time = 0.5 ):
# ok, this is the big maintenance lad
# basically, we fetch what is in actual, what should be in ideal, and migrate
# the important change here as compared to the old system is that if you have a bunch of parents like 'character name' -> 'female', which might be a 10k-to-1 relationship, adding a new link to the chain does need much work
# we compare the current structure, the ideal structure, and just make the needed changes
time_started = HydrusData.GetNowFloat()
tag_service_id = self.modules_services.GetServiceId( service_key )
all_tag_ids_altered = set()
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self.modules_tag_display.GetApplicationStatus( tag_service_id )
while len( sibling_rows_to_add ) + len( sibling_rows_to_remove ) + len( parent_rows_to_add ) + len( parent_rows_to_remove ) > 0 and not HydrusData.TimeHasPassedFloat( time_started + work_time ):
# ok, so it turns out that migrating entire chains at once was sometimes laggy for certain large parent chains like 'azur lane'
# imagine the instance where we simply want to parent a hundred As to a single B--we obviously don't have to do all that in one go
# therefore, we are now going to break the migration into smaller pieces
# I spent a large amount of time trying to figure out a way to _completely_ sync subsets of a chain's tags. this was a gigantic logical pain and complete sync couldn't get neat subsets in certain situations
# █▓█▓███▓█▓███████████████████████████████▓▓▓███▓████████████████
# █▓▓█▓▓▓▓▓███████████████████▓▓▓▓▓▓▓▓▓██████▓▓███▓███████████████
# █▓███▓████████████████▓▒░ ░▒▓██████████████████████
# █▓▓▓▓██████████████▒ ░░░░░░░░░░░░ ▒▓███████████████████
# █▓█▓████████████▓░ ░░░░░░░░░░░░░░░░░ ░░░ ░▓█████████████████
# ██████████████▓ ░░▒▒▒▒▒░░ ░░░ ░░ ░ ░░░░ ░████████████████
# █████████████▒ ░░░▒▒▒▒░░░░░░░░ ░ ░░░░ ████▓▓█████████
# ▓▓██████████▒ ░░░░▒▓▒░▒▒░░ ░░░ ░ ░ ░░░░░ ███▓▓▓████████
# ███▓███████▒ ▒▒▒░░▒▒▒▒░░░ ░ ░░░ ░░░ ███▓▓▓███████
# ██████████▓ ▒▒░▒░▒░▒▒▒▒▒░▒░ ░░ ░░░░░ ░ ██▓▓▓███████
# █████▓▓▓█▒ ▒▒░▒░░░░▒▒░░░░░▒░ ░ ░ ▒▒▒ ██▓▓███████
# ▓▓▓▓▓▓▓█░ ▒▓░░▒░░▒▒▒▒▓░░░░░▒░░ ░ ░░▒▒▒▒░ ▒██▓█▓▓▓▓▓▓
# ▓▓▓▓███▓ ▒▒▒░░░▒▒░░▒░▒▒░░ ░░░░░ ░░░▒░ ▒░▒ ███▓▓▓▓▓▓▓
# ███████▓░▒▒▒▒▒▒░░░▒▒▒░░░░ ░ ░░░ ░░░▒▒░ ░██▓████▓▓
# ▓▓█▓███▒▒▒▓▒▒▓░░▒░▒▒▒▒░░░░░ ░ ░ ░ ░░░░░░▒░░░ ██▓█████▓
# ▒▒▓▓▓▓▓▓▒▓▓░░▓▒ ▒▒░▒▒▒▒▒░░ ░░ ░░░▒░▒▓▓██████
# ▒▒▓▓▓▓▓▓▒▒▒░▒▒▓░░░▒▒▒▒▒▒░ ░░░░▒▒░▒▓▓▓▓▓▓▓▓
# ▓▒▓▓▓▓▓▓▒▓░ ▒▒▒▓▒▒░░▒▒▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒░░░░░▒░▒░░░▒░▒▒▒░▓█▓▓▓▓▓▓▓
# ▓▒▒▓▓▓▓▓▓▓▓░ ▒▒▒▓▒▓▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▒░▒▒▒░▒▒▓▒▒▒░░▒▓▓▓██▓▓▓░░░░░▒▒▒▓▓▒ ░▒▒▒▒▒▒▓▓▓▓▒▒▒▓▓▓
# █▓█▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▓▓▓▒▒▒▓▓▓▓▒▒▒▓█▓ ░▓▓▒▒▓█▓▒░▒▒▒▒▓█▓█▓▓▓▓▓▓▓
# █████▓▒▓▓▓▓▓▒▓▓▒▒▒▒▒▒▒▒▒▒▓▒░▒▓▒░░ ░▒▒ ░░░ ▓█▓▓▓▒▒▒▒█▓▒▒▒▓▓▓▓▓▒
# █████▓▓▓█▓▓▓▓▒▓▓▓▒▒▒▒▒▒░▒▒░░░░ ░░░▒░ ▒ ░ ░ ░▒░░▒▓▓▓▒▒▒▒▒▒▒▒░
# ████▓▓▓███▓▓▓▓▓▓▓▒▒▒▒░░ ▒▒░ ░░░░▒▒ ░▒░▒░ ░░ ░▓█▓▓▒▒▒▒░░▒▒▒
# ███▓▓▓█████▓▓▓▒▒▓▒▒▒▒▒░░ ░ ░░▒░ ░▒▒▒ ▒░░▒░░ ▒▓▒▒▒░▒▒▒▒▓▓▓▒
# ████▓███████▓▒▒▒░▒▒▓▓▓▒▒░░ ░ ▒▒▓██▒▒▓▓░ ░░░░▒▒░▒▒▒▒▒▓▒▓▒▓▒▒
# ████████████▒░▒██░▒▓▓▓▓▓▒▒▒░░░░ ▒▓▒▓▓▓▒░▒▒░ ▒▒▒▓▒▒▒▒▓▒▒▓▓▓▒▒▒▒
# ████▓▓▓▓▓▓▒▓▒ ▓▓ ▒▓▓▓▓▓▓▒▒▒░░░░░ ░ ░░░▒░░▒▒▒▒▒▒ ▒▓▒▒▒▒▒▒▒▒▒
# ▓░░░░░░░▒▒▓▓▓ ▒█▒ ▒▒▓▒▒▒▒▒▒░░░░ ░░░ ░ ░ ▒░▒▒▒▒▒░░▒▓▒▒▒▒▒▒▒▓▒
# ▒▒░░░▒▒▒▒▓▒▒▓▒░ ░▒▒▒▒▓▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▒▓▓▓▓▒░▒▒▒▒▒░░▒▓▒▒▒▒▒▒▒▓▒▒
# ▓▒▒▒▓▓▓▓▓▒▒▒▒▒▓▓▒▓██▓▓▓▒▒▒▒▒░░▒▒▒▒░░░▒▒░░▒▒▓▒░░▒▓▓▓▒▓▓▒▒▒▒▒▒▒▒▒▒
# ▓▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▓▓▒▓▓▓▓▓▒▒▒▒░░░░░░▒▒▒▒▒▒░░ ░▒░░▒▒▒▒▒▒▒▒▒▒▓▒▓▓▓▓▒
# ▓▒▒▒▒▒▓▓▓▒▓▓▓▓▓▓▓▒▒▒▓▓▓▓▓▒▒▒░░░░░░░ ░░░░░▒▒▓▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▒▒▒▒▒▓▓▓▒▓▒▒▓▓▓▓▓▓▓▒▒▒░░░░░░ ░░▒▒▒▒▓▒▒▒▒▒▒▒▓▒▒▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▒▒▒▒▓▓▓▓▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒░░▒▒░░░▒▒▓▓▓▒▒█▓▒▓▒▒▒▓▓▒▒▓▓▓▓▓▓
# █▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▒▓▓▓▓▓▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▒▒░█▓▓▓▓▓▒▒▒▒▒▒▓▓▓▓▓
# ▓▓▓▒▒▒▒▒▓▓▓▓▓▒▓▓▓▒▒▒▒▒ ░▓▓▓▓▓▓▓▓▓██▓█▓▓▓▒▓▒░░░ ▓▓▒▓▒▒▒▒▒▒▒▒▒▓▓▓▒
#
# IN MEMORIAM
# tag_ids_to_trunkward_additional_implication_work_weight
#
# I am now moving to table row addition/subtraction. we'll try to move one row at a time and do the smallest amount of work
# There are potential multi-row optimisations here to reduce total work amount. Stuff like reordering existing chains, reassigning siblings.
# e.g. if sibling A->B moves to A->C, we now go:
# rescind A->B sibling: remove A->B, add A->A implications
# add A->C sibling: remove A->A, add A->C implications
# However, multi-row tech requires mixing removes and adds, which means we again stray into Hell Logic Zone 3000. We'll put the thought off.
# I can always remove a sibling row from actual and stay valid. this does not invalidate ideals in parents table
# I can always remove a parent row from actual and stay valid
# I know I can copy a parent to actual if the tags aren't in any pending removes
# I know I can copy a sibling to actual if the tags aren't in any pending removes (I would if there were pending removes indicating merges or something, but there won't be!)
# we will remove surplus rows from actual and then add needed rows
# There may be multi-row optimisations here to reduce total work amount, I am not sure. Probably for stuff like reordering existing chains. It probably requires mixing removes and adds, which means we stray into hell logic mode, so we'll put the thought off.
# If we need to remove 1,000 mappings and then add 500 to be correct, we'll be doing 1,500 total no matter the order we do them in. This 1,000/500 is not the sum of all the current rows' individual current estimated work.
# When removing, the sum overestimates, when adding, the sum underestimates. The number of sibling/parent rows to change is obviously also the same.
# When you remove a row, the other row estimates may stay as weighty, or they may get less. (e.g. removing sibling A->B makes the parent B->C easier to remove later)
# When you add a row, the other row estimates may stay as weighty, or they may get more. (e.g. adding parent A->B makes adding the sibling b->B more difficult later on)
# The main priority of this function is to reduce each piece of work time.
# When removing, we can break down the large jobs by doing small jobs. So, by doing small jobs first, we reduce max job time.
# However, if we try that strategy when adding, we actually increase max job time, as those delayed big jobs only have the option of staying the same or getting bigger! We get zoom speed and then clunk mode.
# Therefore, when adding, to limit max work time for the whole migration, we want to actually choose the largest jobs first! That work has to be done, and it doesn't get easier!
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableNames( tag_service_id )
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = ClientDBTagParents.GenerateTagParentsLookupCacheTableNames( tag_service_id )
def GetWeightedSiblingRow( sibling_rows, index ):
# when you change the sibling A->B in the _lookup table_:
# you need to add/remove about A number of mappings for B and all it implies. the weight is: A * count( all the B->X implications )
ideal_tag_ids = { ideal_tag_id for ( bad_tag_id, ideal_tag_id ) in sibling_rows }
ideal_tag_ids_to_implies = self.modules_tag_display.GetTagsToImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, ideal_tag_ids )
bad_tag_ids = { bad_tag_id for ( bad_tag_id, ideal_tag ) in sibling_rows }
bad_tag_ids_to_count = self.modules_mappings_counts.GetCountsEstimate( ClientTags.TAG_DISPLAY_STORAGE, tag_service_id, self.modules_services.combined_file_service_id, bad_tag_ids, True, True )
weight_and_rows = [ ( bad_tag_ids_to_count[ b ] * len( ideal_tag_ids_to_implies[ i ] ) + 1, ( b, i ) ) for ( b, i ) in sibling_rows ]
weight_and_rows.sort()
return weight_and_rows[ index ]
def GetWeightedParentRow( parent_rows, index ):
# when you change the parent A->B in the _lookup table_:
# you need to add/remove mappings (of B) for all instances of A and all that implies it. the weight is: sum( all the X->A implications )
child_tag_ids = { c for ( c, a ) in parent_rows }
child_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, child_tag_ids )
all_child_tags = set( child_tag_ids )
all_child_tags.update( itertools.chain.from_iterable( child_tag_ids_to_implied_by.values() ) )
child_tag_ids_to_count = self.modules_mappings_counts.GetCountsEstimate( ClientTags.TAG_DISPLAY_STORAGE, tag_service_id, self.modules_services.combined_file_service_id, all_child_tags, True, True )
weight_and_rows = [ ( sum( ( child_tag_ids_to_count[ implied_by ] for implied_by in child_tag_ids_to_implied_by[ c ] ) ), ( c, p ) ) for ( c, p ) in parent_rows ]
weight_and_rows.sort()
return weight_and_rows[ index ]
# first up, the removees. what is in actual but not ideal
some_removee_sibling_rows = HydrusData.SampleSetByGettingFirst( sibling_rows_to_remove, 20 )
some_removee_parent_rows = HydrusData.SampleSetByGettingFirst( parent_rows_to_remove, 20 )
if len( some_removee_sibling_rows ) + len( some_removee_parent_rows ) > 0:
smallest_sibling_weight = None
smallest_sibling_row = None
smallest_parent_weight = None
smallest_parent_row = None
if len( some_removee_sibling_rows ) > 0:
( smallest_sibling_weight, smallest_sibling_row ) = GetWeightedSiblingRow( some_removee_sibling_rows, 0 )
if len( some_removee_parent_rows ) > 0:
( smallest_parent_weight, smallest_parent_row ) = GetWeightedParentRow( some_removee_parent_rows, 0 )
if smallest_sibling_weight is not None and smallest_parent_weight is not None:
if smallest_sibling_weight < smallest_parent_weight:
smallest_parent_weight = None
smallest_parent_row = None
else:
smallest_sibling_weight = None
smallest_sibling_row = None
if smallest_sibling_row is not None:
# the only things changed here are those implied by or that imply one of these values
( a, b ) = smallest_sibling_row
possibly_affected_tag_ids = { a, b }
# when you delete a sibling, impliesA and impliedbyA should be subsets of impliesB and impliedbyB
# but let's do everything anyway, just in case of invalid cache or something
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
previous_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self._Execute( 'DELETE FROM {} WHERE bad_tag_id = ? AND ideal_tag_id = ?;'.format( cache_actual_tag_siblings_lookup_table_name ), smallest_sibling_row )
after_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self.modules_tag_siblings.NotifySiblingDeleteRowSynced( tag_service_id, smallest_sibling_row )
if smallest_parent_row is not None:
# the only things changed here are those implied by or that imply one of these values
( a, b ) = smallest_parent_row
possibly_affected_tag_ids = { a, b }
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
previous_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self._Execute( 'DELETE FROM {} WHERE child_tag_id = ? AND ancestor_tag_id = ?;'.format( cache_actual_tag_parents_lookup_table_name ), smallest_parent_row )
after_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self.modules_tag_parents.NotifyParentDeleteRowSynced( tag_service_id, smallest_parent_row )
else:
# there is nothing to remove, so we'll now go for what is in ideal but not actual
some_addee_sibling_rows = HydrusData.SampleSetByGettingFirst( sibling_rows_to_add, 20 )
some_addee_parent_rows = HydrusData.SampleSetByGettingFirst( parent_rows_to_add, 20 )
if len( some_addee_sibling_rows ) + len( some_addee_parent_rows ) > 0:
largest_sibling_weight = None
largest_sibling_row = None
largest_parent_weight = None
largest_parent_row = None
if len( some_addee_sibling_rows ) > 0:
( largest_sibling_weight, largest_sibling_row ) = GetWeightedSiblingRow( some_addee_sibling_rows, -1 )
if len( some_addee_parent_rows ) > 0:
( largest_parent_weight, largest_parent_row ) = GetWeightedParentRow( some_addee_parent_rows, -1 )
if largest_sibling_weight is not None and largest_parent_weight is not None:
if largest_sibling_weight > largest_parent_weight:
largest_parent_weight = None
largest_parent_row = None
else:
largest_sibling_weight = None
largest_sibling_row = None
if largest_sibling_row is not None:
# the only things changed here are those implied by or that imply one of these values
( a, b ) = largest_sibling_row
possibly_affected_tag_ids = { a, b }
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
previous_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self._Execute( 'INSERT OR IGNORE INTO {} ( bad_tag_id, ideal_tag_id ) VALUES ( ?, ? );'.format( cache_actual_tag_siblings_lookup_table_name ), largest_sibling_row )
after_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self.modules_tag_siblings.NotifySiblingAddRowSynced( tag_service_id, largest_sibling_row )
if largest_parent_row is not None:
# the only things changed here are those implied by or that imply one of these values
( a, b ) = largest_parent_row
possibly_affected_tag_ids = { a, b }
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
previous_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self._Execute( 'INSERT OR IGNORE INTO {} ( child_tag_id, ancestor_tag_id ) VALUES ( ?, ? );'.format( cache_actual_tag_parents_lookup_table_name ), largest_parent_row )
after_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self.modules_tag_parents.NotifyParentAddRowSynced( tag_service_id, largest_parent_row )
else:
break
#
tag_ids_to_delete_implied_by = collections.defaultdict( set )
tag_ids_to_add_implied_by = collections.defaultdict( set )
for tag_id in possibly_affected_tag_ids:
previous_implied_by = previous_chain_tag_ids_to_implied_by[ tag_id ]
after_implied_by = after_chain_tag_ids_to_implied_by[ tag_id ]
to_delete = previous_implied_by.difference( after_implied_by )
to_add = after_implied_by.difference( previous_implied_by )
if len( to_delete ) > 0:
tag_ids_to_delete_implied_by[ tag_id ] = to_delete
all_tag_ids_altered.add( tag_id )
all_tag_ids_altered.update( to_delete )
if len( to_add ) > 0:
tag_ids_to_add_implied_by[ tag_id ] = to_add
all_tag_ids_altered.add( tag_id )
all_tag_ids_altered.update( to_add )
# now do the implications
# if I am feeling very clever, I could potentially add tag_ids_to_migrate_implied_by, which would be an UPDATE
# this would only work for tag_ids that have the same current implied by in actual and ideal (e.g. moving a tag sibling from A->B to B->A)
# may be better to do this in a merged add/deleteimplication function that would be able to well detect this with 'same current implied' of count > 0 for that domain
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for file_service_id in file_service_ids:
for ( tag_id, implication_tag_ids ) in tag_ids_to_delete_implied_by.items():
self.modules_mappings_cache_specific_display.DeleteImplications( file_service_id, tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_add_implied_by.items():
self.modules_mappings_cache_specific_display.AddImplications( file_service_id, tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_delete_implied_by.items():
self._CacheCombinedFilesDisplayMappingsDeleteImplications( tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_add_implied_by.items():
self._CacheCombinedFilesDisplayMappingsAddImplications( tag_service_id, implication_tag_ids, tag_id )
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self.modules_tag_display.GetApplicationStatus( tag_service_id )
if len( all_tag_ids_altered ) > 0:
self._regen_tags_managers_tag_ids.update( all_tag_ids_altered )
self._CacheTagsSyncTags( tag_service_id, all_tag_ids_altered )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_sync_status', service_key )
still_needs_work = len( sibling_rows_to_add ) + len( sibling_rows_to_remove ) + len( parent_rows_to_add ) + len( parent_rows_to_remove ) > 0
return still_needs_work
def _CacheTagsPopulate( self, file_service_id, tag_service_id, status_hook = None ):
siblings_table_name = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableName( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id )
parents_table_name = ClientDBTagParents.GenerateTagParentsLookupCacheTableName( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id )
queries = [
self.modules_mappings_counts.GetQueryPhraseForCurrentTagIds( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id ),
'SELECT DISTINCT bad_tag_id FROM {}'.format( siblings_table_name ),
'SELECT ideal_tag_id FROM {}'.format( siblings_table_name ),
'SELECT DISTINCT child_tag_id FROM {}'.format( parents_table_name ),
'SELECT DISTINCT ancestor_tag_id FROM {}'.format( parents_table_name )
]
full_query = '{};'.format( ' UNION '.join( queries ) )
BLOCK_SIZE = 10000
for ( group_of_tag_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, full_query, BLOCK_SIZE ):
self.modules_tag_search.AddTags( file_service_id, tag_service_id, group_of_tag_ids )
message = HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
self._controller.frame_splash_status.SetSubtext( message )
if status_hook is not None:
status_hook( message )
self.modules_db_maintenance.TouchAnalyzeNewTables()
def _CacheTagsSyncTags( self, tag_service_id, tag_ids, just_these_file_service_ids = None ):
if len( tag_ids ) == 0:
return
if just_these_file_service_ids is None:
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
else:
file_service_ids = just_these_file_service_ids
chained_tag_ids = self.modules_tag_display.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids )
unchained_tag_ids = { tag_id for tag_id in tag_ids if tag_id not in chained_tag_ids }
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
with self._MakeTemporaryIntegerTable( unchained_tag_ids, 'tag_id' ) as temp_unchained_tag_ids_table_name:
for file_service_id in file_service_ids:
exist_in_tag_search_tag_ids = self.modules_tag_search.FilterExistingTagIds( file_service_id, tag_service_id, temp_tag_ids_table_name )
exist_in_counts_cache_tag_ids = self.modules_mappings_counts.FilterExistingTagIds( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, temp_unchained_tag_ids_table_name )
should_have = chained_tag_ids.union( exist_in_counts_cache_tag_ids )
should_not_have = unchained_tag_ids.difference( exist_in_counts_cache_tag_ids )
should_add = should_have.difference( exist_in_tag_search_tag_ids )
should_delete = exist_in_tag_search_tag_ids.intersection( should_not_have )
self.modules_tag_search.AddTags( file_service_id, tag_service_id, should_add )
self.modules_tag_search.DeleteTags( file_service_id, tag_service_id, should_delete )
def _CheckDBIntegrity( self ):
prefix_string = 'checking db integrity: '
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( prefix_string + 'preparing' )
self._controller.pub( 'modal_message', job_key )
num_errors = 0
job_key.SetStatusTitle( prefix_string + 'running' )
job_key.SetVariable( 'popup_text_1', 'errors found so far: ' + HydrusData.ToHumanInt( num_errors ) )
db_names = [ name for ( index, name, path ) in self._Execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp', 'durable_temp' ) ]
for db_name in db_names:
for ( text, ) in self._Execute( 'PRAGMA ' + db_name + '.integrity_check;' ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
job_key.SetStatusTitle( prefix_string + 'cancelled' )
job_key.SetVariable( 'popup_text_1', 'errors found: ' + HydrusData.ToHumanInt( num_errors ) )
return
if text != 'ok':
if num_errors == 0:
HydrusData.Print( 'During a db integrity check, these errors were discovered:' )
HydrusData.Print( text )
num_errors += 1
job_key.SetVariable( 'popup_text_1', 'errors found so far: ' + HydrusData.ToHumanInt( num_errors ) )
finally:
job_key.SetStatusTitle( prefix_string + 'completed' )
job_key.SetVariable( 'popup_text_1', 'errors found: ' + HydrusData.ToHumanInt( num_errors ) )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
def _CleanAfterJobWork( self ):
self._after_job_content_update_jobs = []
self._regen_tags_managers_hash_ids = set()
self._regen_tags_managers_tag_ids = set()
HydrusDB.HydrusDB._CleanAfterJobWork( self )
def _ClearOrphanFileRecords( self ):
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetStatusTitle( 'clear orphan file records' )
self._controller.pub( 'modal_message', job_key )
try:
job_key.SetVariable( 'popup_text_1', 'looking for orphans' )
local_file_service_ids = self.modules_services.GetServiceIds( ( HC.LOCAL_FILE_DOMAIN, HC.LOCAL_FILE_TRASH_DOMAIN ) )
local_hash_ids = set()
for local_file_service_id in local_file_service_ids:
some_hash_ids = self.modules_files_storage.GetCurrentHashIdsList( local_file_service_id )
local_hash_ids.update( some_hash_ids )
combined_local_hash_ids = set( self.modules_files_storage.GetCurrentHashIdsList( self.modules_services.combined_local_file_service_id ) )
in_local_not_in_combined = local_hash_ids.difference( combined_local_hash_ids )
in_combined_not_in_local = combined_local_hash_ids.difference( local_hash_ids )
if job_key.IsCancelled():
return
job_key.SetVariable( 'popup_text_1', 'deleting orphans' )
if len( in_local_not_in_combined ) > 0:
# these files were deleted from the umbrella service without being cleared from a specific file domain
# they are most likely deleted from disk
# pushing the 'delete combined' call will flush from the local services as well
self._DeleteFiles( self.modules_services.combined_local_file_service_id, in_local_not_in_combined )
for hash_id in in_local_not_in_combined:
self.modules_similar_files.StopSearchingFile( hash_id )
HydrusData.ShowText( 'Found and deleted ' + HydrusData.ToHumanInt( len( in_local_not_in_combined ) ) + ' local domain orphan file records.' )
if job_key.IsCancelled():
return
if len( in_combined_not_in_local ) > 0:
# these files were deleted from all specific services but not from the combined service
# I have only ever seen one example of this and am not sure how it happened
# in any case, the same 'delete combined' call will do the job
self._DeleteFiles( self.modules_services.combined_local_file_service_id, in_combined_not_in_local )
for hash_id in in_combined_not_in_local:
self.modules_similar_files.StopSearchingFile( hash_id )
HydrusData.ShowText( 'Found and deleted ' + HydrusData.ToHumanInt( len( in_combined_not_in_local ) ) + ' combined domain orphan file records.' )
if len( in_local_not_in_combined ) == 0 and len( in_combined_not_in_local ) == 0:
HydrusData.ShowText( 'No orphan file records found!' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
def _ClearOrphanTables( self ):
all_table_names = set()
db_names = [ name for ( index, name, path ) in self._Execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp', 'durable_temp' ) ]
for db_name in db_names:
table_names = self._STS( self._Execute( 'SELECT name FROM {}.sqlite_master WHERE type = ?;'.format( db_name ), ( 'table', ) ) )
if db_name != 'main':
table_names = { '{}.{}'.format( db_name, table_name ) for table_name in table_names }
all_table_names.update( table_names )
all_surplus_table_names = set()
for module in self._modules:
surplus_table_names = module.GetSurplusServiceTableNames( all_table_names )
all_surplus_table_names.update( surplus_table_names )
if len( surplus_table_names ) == 0:
HydrusData.ShowText( 'No orphan tables!' )
for table_name in surplus_table_names:
HydrusData.ShowText( 'Dropping ' + table_name )
self._Execute( 'DROP table ' + table_name + ';' )
def _CreateDB( self ):
client_files_default = os.path.join( self._db_dir, 'client_files' )
HydrusPaths.MakeSureDirectoryExists( client_files_default )
# main
for module in self._modules:
module.CreateInitialTables()
module.CreateInitialIndices()
# intentionally not IF NOT EXISTS here, to catch double-creation accidents early and on a good table
self._Execute( 'CREATE TABLE version ( version INTEGER );' )
#
self._Execute( 'CREATE TABLE IF NOT EXISTS client_files_locations ( prefix TEXT, location TEXT );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS ideal_client_files_locations ( location TEXT, weight INTEGER );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS ideal_thumbnail_override_location ( location TEXT );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS file_notes ( hash_id INTEGER, name_id INTEGER, note_id INTEGER, PRIMARY KEY ( hash_id, name_id ) );' )
self._CreateIndex( 'file_notes', [ 'note_id' ] )
self._CreateIndex( 'file_notes', [ 'name_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS local_ratings ( service_id INTEGER, hash_id INTEGER, rating REAL, PRIMARY KEY ( service_id, hash_id ) );' )
self._CreateIndex( 'local_ratings', [ 'hash_id' ] )
self._CreateIndex( 'local_ratings', [ 'rating' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS file_modified_timestamps ( hash_id INTEGER PRIMARY KEY, file_modified_timestamp INTEGER );' )
self._CreateIndex( 'file_modified_timestamps', [ 'file_modified_timestamp' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS options ( options TEXT_YAML );', )
self._Execute( 'CREATE TABLE IF NOT EXISTS recent_tags ( service_id INTEGER, tag_id INTEGER, timestamp INTEGER, PRIMARY KEY ( service_id, tag_id ) );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS remote_thumbnails ( service_id INTEGER, hash_id INTEGER, PRIMARY KEY( service_id, hash_id ) );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS service_filenames ( service_id INTEGER, hash_id INTEGER, filename TEXT, PRIMARY KEY ( service_id, hash_id ) );' )
self._CreateIndex( 'service_filenames', [ 'hash_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS service_directories ( service_id INTEGER, directory_id INTEGER, num_files INTEGER, total_size INTEGER, note TEXT, PRIMARY KEY ( service_id, directory_id ) );' )
self._CreateIndex( 'service_directories', [ 'directory_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS service_directory_file_map ( service_id INTEGER, directory_id INTEGER, hash_id INTEGER, PRIMARY KEY ( service_id, directory_id, hash_id ) );' )
self._CreateIndex( 'service_directory_file_map', [ 'directory_id' ] )
self._CreateIndex( 'service_directory_file_map', [ 'hash_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS service_info ( service_id INTEGER, info_type INTEGER, info INTEGER, PRIMARY KEY ( service_id, info_type ) );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS statuses ( status_id INTEGER PRIMARY KEY, status TEXT UNIQUE );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS url_map ( hash_id INTEGER, url_id INTEGER, PRIMARY KEY ( hash_id, url_id ) );' )
self._CreateIndex( 'url_map', [ 'url_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS file_viewing_stats ( hash_id INTEGER, canvas_type INTEGER, last_viewed_timestamp INTEGER, views INTEGER, viewtime INTEGER, PRIMARY KEY ( hash_id, canvas_type ) );' )
self._CreateIndex( 'file_viewing_stats', [ 'last_viewed_timestamp' ] )
self._CreateIndex( 'file_viewing_stats', [ 'views' ] )
self._CreateIndex( 'file_viewing_stats', [ 'viewtime' ] )
# inserts
location = HydrusPaths.ConvertAbsPathToPortablePath( client_files_default )
for prefix in HydrusData.IterateHexPrefixes():
self._Execute( 'INSERT INTO client_files_locations ( prefix, location ) VALUES ( ?, ? );', ( 'f' + prefix, location ) )
self._Execute( 'INSERT INTO client_files_locations ( prefix, location ) VALUES ( ?, ? );', ( 't' + prefix, location ) )
self._Execute( 'INSERT INTO ideal_client_files_locations ( location, weight ) VALUES ( ?, ? );', ( location, 1 ) )
init_service_info = [
( CC.COMBINED_TAG_SERVICE_KEY, HC.COMBINED_TAG, 'all known tags' ),
( CC.COMBINED_FILE_SERVICE_KEY, HC.COMBINED_FILE, 'all known files' ),
( CC.COMBINED_DELETED_FILE_SERVICE_KEY, HC.COMBINED_DELETED_FILE, 'all deleted files' ),
( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, HC.COMBINED_LOCAL_FILE, 'all local files' ),
( CC.LOCAL_FILE_SERVICE_KEY, HC.LOCAL_FILE_DOMAIN, 'my files' ),
( CC.TRASH_SERVICE_KEY, HC.LOCAL_FILE_TRASH_DOMAIN, 'trash' ),
( CC.LOCAL_UPDATE_SERVICE_KEY, HC.LOCAL_FILE_DOMAIN, 'repository updates' ),
( CC.DEFAULT_LOCAL_TAG_SERVICE_KEY, HC.LOCAL_TAG, 'my tags' ),
( CC.DEFAULT_LOCAL_DOWNLOADER_TAG_SERVICE_KEY, HC.LOCAL_TAG, 'downloader tags' ),
( CC.LOCAL_BOORU_SERVICE_KEY, HC.LOCAL_BOORU, 'local booru' ),
( CC.LOCAL_NOTES_SERVICE_KEY, HC.LOCAL_NOTES, 'local notes' ),
( CC.DEFAULT_FAVOURITES_RATING_SERVICE_KEY, HC.LOCAL_RATING_LIKE, 'favourites' ),
( CC.CLIENT_API_SERVICE_KEY, HC.CLIENT_API_SERVICE, 'client api' )
]
for ( service_key, service_type, name ) in init_service_info:
dictionary = ClientServices.GenerateDefaultServiceDictionary( service_type )
if service_key == CC.DEFAULT_FAVOURITES_RATING_SERVICE_KEY:
from hydrus.client.metadata import ClientRatings
dictionary[ 'shape' ] = ClientRatings.STAR
like_colours = {}
like_colours[ ClientRatings.LIKE ] = ( ( 0, 0, 0 ), ( 240, 240, 65 ) )
like_colours[ ClientRatings.DISLIKE ] = ( ( 0, 0, 0 ), ( 200, 80, 120 ) )
like_colours[ ClientRatings.NULL ] = ( ( 0, 0, 0 ), ( 191, 191, 191 ) )
like_colours[ ClientRatings.MIXED ] = ( ( 0, 0, 0 ), ( 95, 95, 95 ) )
dictionary[ 'colours' ] = list( like_colours.items() )
self._AddService( service_key, service_type, name, dictionary )
self._ExecuteMany( 'INSERT INTO yaml_dumps VALUES ( ?, ?, ? );', ( ( ClientDBSerialisable.YAML_DUMP_ID_IMAGEBOARD, name, imageboards ) for ( name, imageboards ) in ClientDefaults.GetDefaultImageboards() ) )
new_options = ClientOptions.ClientOptions()
new_options.SetSimpleDownloaderFormulae( ClientDefaults.GetDefaultSimpleDownloaderFormulae() )
names_to_tag_filters = {}
tag_filter = HydrusTags.TagFilter()
tag_filter.SetRule( 'diaper', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'gore', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'guro', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'scat', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'vore', HC.FILTER_BLACKLIST )
names_to_tag_filters[ 'example blacklist' ] = tag_filter
tag_filter = HydrusTags.TagFilter()
tag_filter.SetRule( '', HC.FILTER_BLACKLIST )
tag_filter.SetRule( ':', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'series:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'creator:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'studio:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'character:', HC.FILTER_WHITELIST )
names_to_tag_filters[ 'basic namespaces only' ] = tag_filter
tag_filter = HydrusTags.TagFilter()
tag_filter.SetRule( ':', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'series:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'creator:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'studio:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'character:', HC.FILTER_WHITELIST )
names_to_tag_filters[ 'basic booru tags only' ] = tag_filter
tag_filter = HydrusTags.TagFilter()
tag_filter.SetRule( 'title:', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'filename:', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'source:', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'booru:', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'url:', HC.FILTER_BLACKLIST )
names_to_tag_filters[ 'exclude long/spammy namespaces' ] = tag_filter
new_options.SetFavouriteTagFilters( names_to_tag_filters )
self.modules_serialisable.SetJSONDump( new_options )
list_of_shortcuts = ClientDefaults.GetDefaultShortcuts()
for shortcuts in list_of_shortcuts:
self.modules_serialisable.SetJSONDump( shortcuts )
client_api_manager = ClientAPI.APIManager()
self.modules_serialisable.SetJSONDump( client_api_manager )
bandwidth_manager = ClientNetworkingBandwidth.NetworkBandwidthManager()
bandwidth_manager.SetDirty()
ClientDefaults.SetDefaultBandwidthManagerRules( bandwidth_manager )
self.modules_serialisable.SetJSONDump( bandwidth_manager )
domain_manager = ClientNetworkingDomain.NetworkDomainManager()
ClientDefaults.SetDefaultDomainManagerData( domain_manager )
self.modules_serialisable.SetJSONDump( domain_manager )
session_manager = ClientNetworkingSessions.NetworkSessionManager()
session_manager.SetDirty()
self.modules_serialisable.SetJSONDump( session_manager )
login_manager = ClientNetworkingLogin.NetworkLoginManager()
ClientDefaults.SetDefaultLoginManagerScripts( login_manager )
self.modules_serialisable.SetJSONDump( login_manager )
favourite_search_manager = ClientSearch.FavouriteSearchManager()
ClientDefaults.SetDefaultFavouriteSearchManagerData( favourite_search_manager )
self.modules_serialisable.SetJSONDump( favourite_search_manager )
tag_display_manager = ClientTagsHandling.TagDisplayManager()
self.modules_serialisable.SetJSONDump( tag_display_manager )
from hydrus.client.gui.lists import ClientGUIListManager
column_list_manager = ClientGUIListManager.ColumnListManager()
self.modules_serialisable.SetJSONDump( column_list_manager )
self._Execute( 'INSERT INTO namespaces ( namespace_id, namespace ) VALUES ( ?, ? );', ( 1, '' ) )
self._Execute( 'INSERT INTO version ( version ) VALUES ( ? );', ( HC.SOFTWARE_VERSION, ) )
self._ExecuteMany( 'INSERT INTO json_dumps_named VALUES ( ?, ?, ?, ?, ? );', ClientDefaults.GetDefaultScriptRows() )
def _CullFileViewingStatistics( self ):
media_min = self._controller.new_options.GetNoneableInteger( 'file_viewing_statistics_media_min_time' )
media_max = self._controller.new_options.GetNoneableInteger( 'file_viewing_statistics_media_max_time' )
preview_min = self._controller.new_options.GetNoneableInteger( 'file_viewing_statistics_preview_min_time' )
preview_max = self._controller.new_options.GetNoneableInteger( 'file_viewing_statistics_preview_max_time' )
if media_min is not None and media_max is not None and media_min > media_max:
raise Exception( 'Media min was greater than media max! Abandoning cull now!' )
if preview_min is not None and preview_max is not None and preview_min > preview_max:
raise Exception( 'Preview min was greater than preview max! Abandoning cull now!' )
if media_min is not None:
self._Execute( 'UPDATE file_viewing_stats SET views = CAST( viewtime / ? AS INTEGER ) WHERE views * ? > viewtime AND canvas_type = ?;', ( media_min, media_min, CC.CANVAS_MEDIA_VIEWER ) )
if media_max is not None:
self._Execute( 'UPDATE file_viewing_stats SET viewtime = views * ? WHERE viewtime > views * ? AND canvas_type = ?;', ( media_max, media_max, CC.CANVAS_MEDIA_VIEWER ) )
if preview_min is not None:
self._Execute( 'UPDATE file_viewing_stats SET views = CAST( viewtime / ? AS INTEGER ) WHERE views * ? > viewtime AND canvas_type = ?;', ( preview_min, preview_min, CC.CANVAS_PREVIEW ) )
if preview_max is not None:
self._Execute( 'UPDATE file_viewing_stats SET viewtime = views * ? WHERE viewtime > views * ? AND canvas_type = ?;', ( preview_max, preview_max, CC.CANVAS_PREVIEW ) )
def _DeleteFiles( self, service_id, hash_ids, only_if_current = False ):
# the gui sometimes gets out of sync and sends a DELETE FROM TRASH call before the SEND TO TRASH call
# in this case, let's make sure the local file domains are clear before deleting from the umbrella domain
local_file_service_ids = self.modules_services.GetServiceIds( ( HC.LOCAL_FILE_DOMAIN, ) )
if service_id == self.modules_services.combined_local_file_service_id:
for local_file_service_id in local_file_service_ids:
self._DeleteFiles( local_file_service_id, hash_ids, only_if_current = True )
self._DeleteFiles( self.modules_services.trash_service_id, hash_ids )
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
existing_hash_ids_to_timestamps = self.modules_files_storage.GetCurrentHashIdsToTimestamps( service_id, hash_ids )
existing_hash_ids = set( existing_hash_ids_to_timestamps.keys() )
service_info_updates = []
# do delete outside, file repos and perhaps some other bananas situation can delete without ever having added
now = HydrusData.GetNow()
if service_type not in HC.FILE_SERVICES_WITH_NO_DELETE_RECORD:
if only_if_current:
deletee_hash_ids = existing_hash_ids
else:
deletee_hash_ids = hash_ids
if len( deletee_hash_ids ) > 0:
insert_rows = [ ( hash_id, existing_hash_ids_to_timestamps[ hash_id ] if hash_id in existing_hash_ids_to_timestamps else None ) for hash_id in deletee_hash_ids ]
num_new_deleted_files = self.modules_files_storage.RecordDeleteFiles( service_id, insert_rows )
service_info_updates.append( ( num_new_deleted_files, service_id, HC.SERVICE_INFO_NUM_DELETED_FILES ) )
if len( existing_hash_ids_to_timestamps ) > 0:
# remove them from the service
pending_changed = self.modules_files_storage.RemoveFiles( service_id, existing_hash_ids )
if pending_changed:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
delta_size = self.modules_files_metadata_basic.GetTotalSize( existing_hash_ids )
num_viewable_files = self.modules_files_metadata_basic.GetNumViewable( existing_hash_ids )
num_existing_files_removed = len( existing_hash_ids )
num_inbox = len( existing_hash_ids.intersection( self.modules_files_metadata_basic.inbox_hash_ids ) )
service_info_updates.append( ( -delta_size, service_id, HC.SERVICE_INFO_TOTAL_SIZE ) )
service_info_updates.append( ( -num_viewable_files, service_id, HC.SERVICE_INFO_NUM_VIEWABLE_FILES ) )
service_info_updates.append( ( -num_existing_files_removed, service_id, HC.SERVICE_INFO_NUM_FILES ) )
service_info_updates.append( ( -num_inbox, service_id, HC.SERVICE_INFO_NUM_INBOX ) )
# now do special stuff
# if we maintain tag counts for this service, update
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
with self._MakeTemporaryIntegerTable( existing_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsDeleteFiles( service_id, tag_service_id, existing_hash_ids, temp_hash_id_table_name )
# update the combined deleted file service
if service_type in HC.FILE_SERVICES_COVERED_BY_COMBINED_DELETED_FILE:
now = HydrusData.GetNow()
rows = [ ( hash_id, now ) for hash_id in existing_hash_ids ]
self._AddFiles( self.modules_services.combined_deleted_file_service_id, rows )
# if any files are no longer in any local file services, send them to the trash
if service_id in local_file_service_ids:
hash_ids_still_in_another_service = set()
other_local_file_service_ids = set( local_file_service_ids )
other_local_file_service_ids.discard( service_id )
hash_ids_still_in_another_service = self.modules_files_storage.FilterAllCurrentHashIds( existing_hash_ids, just_these_service_ids = other_local_file_service_ids )
trashed_hash_ids = existing_hash_ids.difference( hash_ids_still_in_another_service )
if len( trashed_hash_ids ) > 0:
now = HydrusData.GetNow()
delete_rows = [ ( hash_id, now ) for hash_id in trashed_hash_ids ]
self._AddFiles( self.modules_services.trash_service_id, delete_rows )
# if the files are being fully deleted, then physically delete them
if service_id == self.modules_services.combined_local_file_service_id:
self._ArchiveFiles( hash_ids )
for hash_id in hash_ids:
self.modules_similar_files.StopSearchingFile( hash_id )
self.modules_files_maintenance_queue.CancelFiles( hash_ids )
self.modules_hashes_local_cache.DropHashIdsFromCache( existing_hash_ids )
# push the info updates, notify
self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', service_info_updates )
def _DeletePending( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
if service.GetServiceType() == HC.TAG_REPOSITORY:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( service_id )
pending_rescinded_mappings_ids = list( HydrusData.BuildKeyToListDict( self._Execute( 'SELECT tag_id, hash_id FROM ' + pending_mappings_table_name + ';' ) ).items() )
petitioned_rescinded_mappings_ids = list( HydrusData.BuildKeyToListDict( self._Execute( 'SELECT tag_id, hash_id FROM ' + petitioned_mappings_table_name + ';' ) ).items() )
self._UpdateMappings( service_id, pending_rescinded_mappings_ids = pending_rescinded_mappings_ids, petitioned_rescinded_mappings_ids = petitioned_rescinded_mappings_ids )
self._Execute( 'DELETE FROM tag_sibling_petitions WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM tag_parent_petitions WHERE service_id = ?;', ( service_id, ) )
elif service.GetServiceType() in ( HC.FILE_REPOSITORY, HC.IPFS ):
self.modules_files_storage.DeletePending( service_id )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
self.pub_service_updates_after_commit( { service_key : [ HydrusData.ServiceUpdate( HC.SERVICE_UPDATE_DELETE_PENDING ) ] } )
def _DeleteService( self, service_id ):
service = self.modules_services.GetService( service_id )
service_key = service.GetServiceKey()
service_type = service.GetServiceType()
# for a long time, much of this was done with foreign keys, which had to be turned on especially for this operation
# however, this seemed to cause some immense temp drive space bloat when dropping the mapping tables, as there seems to be a trigger/foreign reference check for every row to be deleted
# so now we just blat all tables and trust in the Lord that we don't forget to add any new ones in future
self._Execute( 'DELETE FROM local_ratings WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM recent_tags WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM service_info WHERE service_id = ?;', ( service_id, ) )
self._DeleteServiceDropFiles( service_id, service_type )
if service_type in HC.REPOSITORIES:
self.modules_repositories.DropRepositoryTables( service_id )
self._DeleteServiceDropMappings( service_id, service_type )
if service_type in HC.REAL_TAG_SERVICES:
interested_service_ids = set( self.modules_tag_display.GetInterestedServiceIds( service_id ) )
interested_service_ids.discard( service_id ) # lmao, not any more!
self.modules_tag_parents.Drop( service_id )
self.modules_tag_siblings.Drop( service_id )
if len( interested_service_ids ) > 0:
self.modules_tag_display.RegenerateTagSiblingsAndParentsCache( only_these_service_ids = interested_service_ids )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
for file_service_id in file_service_ids:
self.modules_tag_search.Drop( file_service_id, service_id )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self.modules_tag_search.Drop( service_id, tag_service_id )
self.modules_services.DeleteService( service_id )
service_update = HydrusData.ServiceUpdate( HC.SERVICE_UPDATE_RESET )
service_keys_to_service_updates = { service_key : [ service_update ] }
self.pub_service_updates_after_commit( service_keys_to_service_updates )
def _DeleteServiceDirectory( self, service_id, dirname ):
directory_id = self.modules_texts.GetTextId( dirname )
self._Execute( 'DELETE FROM service_directories WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) )
self._Execute( 'DELETE FROM service_directory_file_map WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) )
def _DeleteServiceDropFiles( self, service_id, service_type ):
if service_type == HC.FILE_REPOSITORY:
self._Execute( 'DELETE FROM remote_thumbnails WHERE service_id = ?;', ( service_id, ) )
if service_type == HC.IPFS:
self._Execute( 'DELETE FROM service_filenames WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM service_directories WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM service_directory_file_map WHERE service_id = ?;', ( service_id, ) )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
self.modules_files_storage.DropFilesTables( service_id )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsDrop( service_id, tag_service_id )
def _DeleteServiceDropMappings( self, service_id, service_type ):
if service_type in HC.REAL_TAG_SERVICES:
self.modules_mappings_storage.DropMappingsTables( service_id )
self._CacheCombinedFilesMappingsDrop( service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for file_service_id in file_service_ids:
self._CacheSpecificMappingsDrop( file_service_id, service_id )
def _DeleteServiceInfo( self, service_key = None, types_to_delete = None ):
predicates = []
if service_key is not None:
service_id = self.modules_services.GetServiceId( service_key )
predicates.append( 'service_id = {}'.format( service_id ) )
if types_to_delete is not None:
predicates.append( 'info_type IN {}'.format( HydrusData.SplayListForDB( types_to_delete ) ) )
if len( predicates ) > 0:
predicates_string = ' WHERE {}'.format( ' AND '.join( predicates ) )
else:
predicates_string = ''
self._Execute( 'DELETE FROM service_info{};'.format( predicates_string ) )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
def _DisplayCatastrophicError( self, text ):
message = 'The db encountered a serious error! This is going to be written to the log as well, but here it is for a screenshot:'
message += os.linesep * 2
message += text
HydrusData.DebugPrint( message )
self._controller.SafeShowCriticalMessage( 'hydrus db failed', message )
def _DoAfterJobWork( self ):
for service_keys_to_content_updates in self._after_job_content_update_jobs:
self._weakref_media_result_cache.ProcessContentUpdates( service_keys_to_content_updates )
self._cursor_transaction_wrapper.pub_after_job( 'content_updates_gui', service_keys_to_content_updates )
if len( self._regen_tags_managers_hash_ids ) > 0:
hash_ids_to_do = self._weakref_media_result_cache.FilterFiles( self._regen_tags_managers_hash_ids )
if len( hash_ids_to_do ) > 0:
hash_ids_to_tags_managers = self._GetForceRefreshTagsManagers( hash_ids_to_do )
self._weakref_media_result_cache.SilentlyTakeNewTagsManagers( hash_ids_to_tags_managers )
if len( self._regen_tags_managers_tag_ids ) > 0:
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = self._regen_tags_managers_tag_ids )
tags = { tag_ids_to_tags[ tag_id ] for tag_id in self._regen_tags_managers_tag_ids }
hash_ids_to_do = self._weakref_media_result_cache.FilterFilesWithTags( tags )
if len( hash_ids_to_do ) > 0:
hash_ids_to_tags_managers = self._GetForceRefreshTagsManagers( hash_ids_to_do )
self._weakref_media_result_cache.SilentlyTakeNewTagsManagers( hash_ids_to_tags_managers )
self._cursor_transaction_wrapper.pub_after_job( 'refresh_all_tag_presentation_gui' )
HydrusDB.HydrusDB._DoAfterJobWork( self )
def _DuplicatesGetRandomPotentialDuplicateHashes( self, file_search_context: ClientSearch.FileSearchContext, both_files_match, pixel_dupes_preference, max_hamming_distance ):
db_location_context = self.modules_files_storage.GetDBLocationContext( file_search_context.GetLocationContext() )
is_complicated_search = False
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_table_name:
# first we get a sample of current potential pairs in the db, given our limiting search context
allowed_hash_ids = None
preferred_hash_ids = None
if file_search_context.IsJustSystemEverything() or file_search_context.HasNoPredicates():
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnEverythingSearchResults( db_location_context, pixel_dupes_preference, max_hamming_distance )
else:
is_complicated_search = True
query_hash_ids = self._GetHashIdsFromQuery( file_search_context, apply_implicit_limit = False )
if both_files_match:
allowed_hash_ids = query_hash_ids
else:
preferred_hash_ids = query_hash_ids
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( temp_table_name ), ( ( hash_id, ) for hash_id in query_hash_ids ) )
self._AnalyzeTempTable( temp_table_name )
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnSearchResults( db_location_context, temp_table_name, both_files_match, pixel_dupes_preference, max_hamming_distance )
potential_media_ids = set()
# distinct important here for the search results table join
for ( smaller_media_id, larger_media_id ) in self._Execute( 'SELECT DISTINCT smaller_media_id, larger_media_id FROM {};'.format( table_join ) ):
potential_media_ids.add( smaller_media_id )
potential_media_ids.add( larger_media_id )
if len( potential_media_ids ) >= 1000:
break
# now let's randomly select a file in these medias
potential_media_ids = list( potential_media_ids )
random.shuffle( potential_media_ids )
chosen_hash_id = None
for potential_media_id in potential_media_ids:
best_king_hash_id = self.modules_files_duplicates.DuplicatesGetBestKingId( potential_media_id, db_location_context, allowed_hash_ids = allowed_hash_ids, preferred_hash_ids = preferred_hash_ids )
if best_king_hash_id is not None:
chosen_hash_id = best_king_hash_id
break
if chosen_hash_id is None:
return []
hash = self.modules_hashes_local_cache.GetHash( chosen_hash_id )
if is_complicated_search and both_files_match:
allowed_hash_ids = query_hash_ids
else:
allowed_hash_ids = None
location_context = file_search_context.GetLocationContext()
return self.modules_files_duplicates.DuplicatesGetFileHashesByDuplicateType( location_context, hash, HC.DUPLICATE_POTENTIAL, allowed_hash_ids = allowed_hash_ids, preferred_hash_ids = preferred_hash_ids )
def _DuplicatesGetPotentialDuplicatePairsForFiltering( self, file_search_context: ClientSearch.FileSearchContext, both_files_match, pixel_dupes_preference, max_hamming_distance ):
# we need to batch non-intersecting decisions here to keep it simple at the gui-level
# we also want to maximise per-decision value
# now we will fetch some unknown pairs
db_location_context = self.modules_files_storage.GetDBLocationContext( file_search_context.GetLocationContext() )
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_table_name:
allowed_hash_ids = None
preferred_hash_ids = None
if file_search_context.IsJustSystemEverything() or file_search_context.HasNoPredicates():
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnEverythingSearchResults( db_location_context, pixel_dupes_preference, max_hamming_distance )
else:
query_hash_ids = self._GetHashIdsFromQuery( file_search_context, apply_implicit_limit = False )
if both_files_match:
allowed_hash_ids = query_hash_ids
else:
preferred_hash_ids = query_hash_ids
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( temp_table_name ), ( ( hash_id, ) for hash_id in query_hash_ids ) )
self._AnalyzeTempTable( temp_table_name )
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnSearchResults( db_location_context, temp_table_name, both_files_match, pixel_dupes_preference, max_hamming_distance )
# distinct important here for the search results table join
result = self._Execute( 'SELECT DISTINCT smaller_media_id, larger_media_id, distance FROM {} LIMIT 2500;'.format( table_join ) ).fetchall()
MAX_BATCH_SIZE = HG.client_controller.new_options.GetInteger( 'duplicate_filter_max_batch_size' )
batch_of_pairs_of_media_ids = []
seen_media_ids = set()
distances_to_pairs = HydrusData.BuildKeyToListDict( ( ( distance, ( smaller_media_id, larger_media_id ) ) for ( smaller_media_id, larger_media_id, distance ) in result ) )
distances = sorted( distances_to_pairs.keys() )
# we want to preference pairs that have the smallest distance between them. deciding on more similar files first helps merge dupes before dealing with alts so reduces potentials more quickly
for distance in distances:
result_pairs_for_this_distance = distances_to_pairs[ distance ]
# convert them into possible groups per each possible 'master hash_id', and value them
master_media_ids_to_groups = collections.defaultdict( list )
for pair in result_pairs_for_this_distance:
( smaller_media_id, larger_media_id ) = pair
master_media_ids_to_groups[ smaller_media_id ].append( pair )
master_media_ids_to_groups[ larger_media_id ].append( pair )
master_hash_ids_to_values = collections.Counter()
for ( media_id, pairs ) in master_media_ids_to_groups.items():
# negative so we later serve up smallest groups first
# we shall say for now that smaller groups are more useful to front-load because it lets us solve simple problems first
master_hash_ids_to_values[ media_id ] = - len( pairs )
# now let's add decision groups to our batch
# we exclude hashes we have seen before in each batch so we aren't treading over ground that was implicitly solved by a previous decision in the batch
for ( master_media_id, count ) in master_hash_ids_to_values.most_common():
if master_media_id in seen_media_ids:
continue
seen_media_ids_for_this_master_media_id = set()
for pair in master_media_ids_to_groups[ master_media_id ]:
( smaller_media_id, larger_media_id ) = pair
if smaller_media_id in seen_media_ids or larger_media_id in seen_media_ids:
continue
seen_media_ids_for_this_master_media_id.add( smaller_media_id )
seen_media_ids_for_this_master_media_id.add( larger_media_id )
batch_of_pairs_of_media_ids.append( pair )
if len( batch_of_pairs_of_media_ids ) >= MAX_BATCH_SIZE:
break
seen_media_ids.update( seen_media_ids_for_this_master_media_id )
if len( batch_of_pairs_of_media_ids ) >= MAX_BATCH_SIZE:
break
if len( batch_of_pairs_of_media_ids ) >= MAX_BATCH_SIZE:
break
seen_hash_ids = set()
media_ids_to_best_king_ids = {}
for media_id in seen_media_ids:
best_king_hash_id = self.modules_files_duplicates.DuplicatesGetBestKingId( media_id, db_location_context, allowed_hash_ids = allowed_hash_ids, preferred_hash_ids = preferred_hash_ids )
if best_king_hash_id is not None:
seen_hash_ids.add( best_king_hash_id )
media_ids_to_best_king_ids[ media_id ] = best_king_hash_id
batch_of_pairs_of_hash_ids = [ ( media_ids_to_best_king_ids[ smaller_media_id ], media_ids_to_best_king_ids[ larger_media_id ] ) for ( smaller_media_id, larger_media_id ) in batch_of_pairs_of_media_ids if smaller_media_id in media_ids_to_best_king_ids and larger_media_id in media_ids_to_best_king_ids ]
hash_ids_to_hashes = self.modules_hashes_local_cache.GetHashIdsToHashes( hash_ids = seen_hash_ids )
batch_of_pairs_of_hashes = [ ( hash_ids_to_hashes[ hash_id_a ], hash_ids_to_hashes[ hash_id_b ] ) for ( hash_id_a, hash_id_b ) in batch_of_pairs_of_hash_ids ]
return batch_of_pairs_of_hashes
def _DuplicatesGetPotentialDuplicatesCount( self, file_search_context, both_files_match, pixel_dupes_preference, max_hamming_distance ):
db_location_context = self.modules_files_storage.GetDBLocationContext( file_search_context.GetLocationContext() )
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_table_name:
if file_search_context.IsJustSystemEverything() or file_search_context.HasNoPredicates():
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnEverythingSearchResults( db_location_context, pixel_dupes_preference, max_hamming_distance )
else:
query_hash_ids = self._GetHashIdsFromQuery( file_search_context, apply_implicit_limit = False )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( temp_table_name ), ( ( hash_id, ) for hash_id in query_hash_ids ) )
self._AnalyzeTempTable( temp_table_name )
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnSearchResults( db_location_context, temp_table_name, both_files_match, pixel_dupes_preference, max_hamming_distance )
# distinct important here for the search results table join
( potential_duplicates_count, ) = self._Execute( 'SELECT COUNT( * ) FROM ( SELECT DISTINCT smaller_media_id, larger_media_id FROM {} );'.format( table_join ) ).fetchone()
return potential_duplicates_count
def _DuplicatesSetDuplicatePairStatus( self, pair_info ):
for ( duplicate_type, hash_a, hash_b, service_keys_to_content_updates ) in pair_info:
if len( service_keys_to_content_updates ) > 0:
self._ProcessContentUpdates( service_keys_to_content_updates )
hash_id_a = self.modules_hashes_local_cache.GetHashId( hash_a )
hash_id_b = self.modules_hashes_local_cache.GetHashId( hash_b )
media_id_a = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_a )
media_id_b = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_b )
smaller_media_id = min( media_id_a, media_id_b )
larger_media_id = max( media_id_a, media_id_b )
# this shouldn't be strictly needed, but lets do it here anyway to catch unforeseen problems
# it is ok to remove this even if we are just about to add it back in--this clears out invalid pairs and increases priority with distance 0
self._Execute( 'DELETE FROM potential_duplicate_pairs WHERE smaller_media_id = ? AND larger_media_id = ?;', ( smaller_media_id, larger_media_id ) )
if hash_id_a == hash_id_b:
continue
if duplicate_type in ( HC.DUPLICATE_FALSE_POSITIVE, HC.DUPLICATE_ALTERNATE ):
if duplicate_type == HC.DUPLICATE_FALSE_POSITIVE:
alternates_group_id_a = self.modules_files_duplicates.DuplicatesGetAlternatesGroupId( media_id_a )
alternates_group_id_b = self.modules_files_duplicates.DuplicatesGetAlternatesGroupId( media_id_b )
self.modules_files_duplicates.DuplicatesSetFalsePositive( alternates_group_id_a, alternates_group_id_b )
elif duplicate_type == HC.DUPLICATE_ALTERNATE:
if media_id_a == media_id_b:
king_hash_id = self.modules_files_duplicates.DuplicatesGetKingHashId( media_id_a )
hash_id_to_remove = hash_id_b if king_hash_id == hash_id_a else hash_id_a
self.modules_files_duplicates.DuplicatesRemoveMediaIdMember( hash_id_to_remove )
media_id_a = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_a )
media_id_b = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_b )
smaller_media_id = min( media_id_a, media_id_b )
larger_media_id = max( media_id_a, media_id_b )
self.modules_files_duplicates.DuplicatesSetAlternates( media_id_a, media_id_b )
elif duplicate_type in ( HC.DUPLICATE_BETTER, HC.DUPLICATE_WORSE, HC.DUPLICATE_SAME_QUALITY ):
if duplicate_type == HC.DUPLICATE_WORSE:
( hash_id_a, hash_id_b ) = ( hash_id_b, hash_id_a )
( media_id_a, media_id_b ) = ( media_id_b, media_id_a )
duplicate_type = HC.DUPLICATE_BETTER
king_hash_id_a = self.modules_files_duplicates.DuplicatesGetKingHashId( media_id_a )
king_hash_id_b = self.modules_files_duplicates.DuplicatesGetKingHashId( media_id_b )
if duplicate_type == HC.DUPLICATE_BETTER:
if media_id_a == media_id_b:
if hash_id_b == king_hash_id_b:
# user manually set that a > King A, hence we are setting a new king within a group
self.modules_files_duplicates.DuplicatesSetKing( hash_id_a, media_id_a )
else:
if hash_id_b != king_hash_id_b:
# user manually set that a member of A is better than a non-King of B. remove b from B and merge it into A
self.modules_files_duplicates.DuplicatesRemoveMediaIdMember( hash_id_b )
media_id_b = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_b )
# b is now the King of its new group
# a member of A is better than King B, hence B can merge into A
self.modules_files_duplicates.DuplicatesMergeMedias( media_id_a, media_id_b )
elif duplicate_type == HC.DUPLICATE_SAME_QUALITY:
if media_id_a != media_id_b:
a_is_king = hash_id_a == king_hash_id_a
b_is_king = hash_id_b == king_hash_id_b
if not ( a_is_king or b_is_king ):
# if neither file is the king, remove B from B and merge it into A
self.modules_files_duplicates.DuplicatesRemoveMediaIdMember( hash_id_b )
media_id_b = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_b )
superior_media_id = media_id_a
mergee_media_id = media_id_b
elif not a_is_king:
# if one of our files is not the king, merge into that group, as the king of that is better than all of the other
superior_media_id = media_id_a
mergee_media_id = media_id_b
elif not b_is_king:
superior_media_id = media_id_b
mergee_media_id = media_id_a
else:
# if both are king, merge into A
superior_media_id = media_id_a
mergee_media_id = media_id_b
self.modules_files_duplicates.DuplicatesMergeMedias( superior_media_id, mergee_media_id )
elif duplicate_type == HC.DUPLICATE_POTENTIAL:
potential_duplicate_media_ids_and_distances = [ ( media_id_b, 0 ) ]
self.modules_files_duplicates.DuplicatesAddPotentialDuplicates( media_id_a, potential_duplicate_media_ids_and_distances )
def _FilterExistingTags( self, service_key, tags ):
service_id = self.modules_services.GetServiceId( service_key )
tag_ids_to_tags = { self.modules_tags.GetTagId( tag ) : tag for tag in tags }
tag_ids = set( tag_ids_to_tags.keys() )
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_id_table_name:
counts = self.modules_mappings_counts.GetCountsForTags( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, service_id, temp_tag_id_table_name )
existing_tag_ids = [ tag_id for ( tag_id, current_count, pending_count ) in counts if current_count > 0 ]
filtered_tags = { tag_ids_to_tags[ tag_id ] for tag_id in existing_tag_ids }
return filtered_tags
def _FilterExistingUpdateMappings( self, tag_service_id, mappings_ids, action ):
if len( mappings_ids ) == 0:
return mappings_ids
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
culled_mappings_ids = []
for ( tag_id, hash_ids ) in mappings_ids:
if len( hash_ids ) == 0:
continue
elif len( hash_ids ) == 1:
( hash_id, ) = hash_ids
if action == HC.CONTENT_UPDATE_ADD:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( current_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
valid_hash_ids = hash_ids
else:
continue
elif action == HC.CONTENT_UPDATE_DELETE:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( deleted_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
valid_hash_ids = hash_ids
else:
continue
elif action == HC.CONTENT_UPDATE_PEND:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( current_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( pending_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
valid_hash_ids = hash_ids
else:
continue
else:
continue
elif action == HC.CONTENT_UPDATE_RESCIND_PEND:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( pending_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
continue
else:
valid_hash_ids = hash_ids
else:
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
if action == HC.CONTENT_UPDATE_ADD:
existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, current_mappings_table_name ), ( tag_id, ) ) )
valid_hash_ids = set( hash_ids ).difference( existing_hash_ids )
elif action == HC.CONTENT_UPDATE_DELETE:
existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, deleted_mappings_table_name ), ( tag_id, ) ) )
valid_hash_ids = set( hash_ids ).difference( existing_hash_ids )
elif action == HC.CONTENT_UPDATE_PEND:
existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, current_mappings_table_name ), ( tag_id, ) ) )
existing_hash_ids.update( self._STI( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, pending_mappings_table_name ), ( tag_id, ) ) ) )
valid_hash_ids = set( hash_ids ).difference( existing_hash_ids )
elif action == HC.CONTENT_UPDATE_RESCIND_PEND:
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, pending_mappings_table_name ), ( tag_id, ) ) )
if len( valid_hash_ids ) > 0:
culled_mappings_ids.append( ( tag_id, valid_hash_ids ) )
return culled_mappings_ids
def _FilterForFileDeleteLock( self, service_id, hash_ids ):
# eventually extend this to the metadata conditional object
if HG.client_controller.new_options.GetBoolean( 'delete_lock_for_archived_files' ):
service = self.modules_services.GetService( service_id )
if service.GetServiceType() in HC.LOCAL_FILE_SERVICES:
hash_ids = set( hash_ids ).intersection( self.modules_files_metadata_basic.inbox_hash_ids )
return hash_ids
def _FilterHashesByService( self, file_service_key: bytes, hashes: typing.Sequence[ bytes ] ) -> typing.List[ bytes ]:
# returns hashes in order, to be nice to UI
if file_service_key == CC.COMBINED_FILE_SERVICE_KEY:
return list( hashes )
service_id = self.modules_services.GetServiceId( file_service_key )
hashes_to_hash_ids = { hash : self.modules_hashes_local_cache.GetHashId( hash ) for hash in hashes if self.modules_hashes.HasHash( hash ) }
valid_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, set( hashes_to_hash_ids.values() ), HC.CONTENT_STATUS_CURRENT )
return [ hash for hash in hashes if hash in hashes_to_hash_ids and hashes_to_hash_ids[ hash ] in valid_hash_ids ]
def _FixLogicallyInconsistentMappings( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
total_fixed = 0
try:
job_key.SetStatusTitle( 'fixing logically inconsistent mappings' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'fixing {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
time.sleep( 0.01 )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
#
both_current_and_pending_mappings = list(
HydrusData.BuildKeyToSetDict(
self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( tag_id, hash_id );'.format( pending_mappings_table_name, current_mappings_table_name ) )
).items()
)
total_fixed += sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in both_current_and_pending_mappings ) )
self._UpdateMappings( tag_service_id, pending_rescinded_mappings_ids = both_current_and_pending_mappings )
#
both_deleted_and_petitioned_mappings = list(
HydrusData.BuildKeyToSetDict(
self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( tag_id, hash_id );'.format( petitioned_mappings_table_name, deleted_mappings_table_name ) )
).items()
)
total_fixed += sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in both_deleted_and_petitioned_mappings ) )
self._UpdateMappings( tag_service_id, petitioned_rescinded_mappings_ids = both_deleted_and_petitioned_mappings )
finally:
if total_fixed == 0:
HydrusData.ShowText( 'No inconsistent mappings found!' )
else:
self._Execute( 'DELETE FROM service_info where info_type IN ( ?, ? );', ( HC.SERVICE_INFO_NUM_PENDING_MAPPINGS, HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS ) )
self._controller.pub( 'notify_new_pending' )
HydrusData.ShowText( 'Found {} bad mappings! They _should_ be deleted, and your pending counts should be updated.'.format( HydrusData.ToHumanInt( total_fixed ) ) )
job_key.DeleteVariable( 'popup_text_2' )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _GenerateDBJob( self, job_type, synchronous, action, *args, **kwargs ):
return JobDatabaseClient( job_type, synchronous, action, *args, **kwargs )
def _GeneratePredicatesFromTagIdsAndCounts( self, tag_display_type: int, display_tag_service_id: int, tag_ids_to_full_counts, inclusive, job_key = None ):
tag_ids = set( tag_ids_to_full_counts.keys() )
predicates = []
if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE:
if display_tag_service_id != self.modules_services.combined_tag_service_id:
tag_ids_to_ideal_tag_ids = self.modules_tag_siblings.GetTagsToIdeals( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, tag_ids )
tag_ids_that_are_sibling_chained = self.modules_tag_siblings.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, tag_ids )
tag_ids_to_ideal_tag_ids_for_siblings = { tag_id : ideal_tag_id for ( tag_id, ideal_tag_id ) in tag_ids_to_ideal_tag_ids.items() if tag_id in tag_ids_that_are_sibling_chained }
ideal_tag_ids_to_sibling_chain_tag_ids = self.modules_tag_siblings.GetIdealsToChains( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, set( tag_ids_to_ideal_tag_ids_for_siblings.values() ) )
#
ideal_tag_ids = set( tag_ids_to_ideal_tag_ids.values() )
ideal_tag_ids_that_are_parent_chained = self.modules_tag_parents.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, ideal_tag_ids )
tag_ids_to_ideal_tag_ids_for_parents = { tag_id : ideal_tag_id for ( tag_id, ideal_tag_id ) in tag_ids_to_ideal_tag_ids.items() if ideal_tag_id in ideal_tag_ids_that_are_parent_chained }
ideal_tag_ids_to_ancestor_tag_ids = self.modules_tag_parents.GetTagsToAncestors( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, set( tag_ids_to_ideal_tag_ids_for_parents.values() ) )
else:
# shouldn't ever happen with storage display
tag_ids_to_ideal_tag_ids_for_siblings = {}
tag_ids_to_ideal_tag_ids_for_parents = {}
ideal_tag_ids_to_sibling_chain_tag_ids = {}
ideal_tag_ids_to_ancestor_tag_ids = {}
tag_ids_we_want_to_look_up = set( tag_ids )
tag_ids_we_want_to_look_up.update( itertools.chain.from_iterable( ideal_tag_ids_to_sibling_chain_tag_ids.values() ) )
tag_ids_we_want_to_look_up.update( itertools.chain.from_iterable( ideal_tag_ids_to_ancestor_tag_ids.values() ) )
if job_key is not None and job_key.IsCancelled():
return []
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = tag_ids_we_want_to_look_up )
if job_key is not None and job_key.IsCancelled():
return []
ideal_tag_ids_to_chain_tags = { ideal_tag_id : { tag_ids_to_tags[ chain_tag_id ] for chain_tag_id in chain_tag_ids } for ( ideal_tag_id, chain_tag_ids ) in ideal_tag_ids_to_sibling_chain_tag_ids.items() }
ideal_tag_ids_to_ancestor_tags = { ideal_tag_id : { tag_ids_to_tags[ ancestor_tag_id ] for ancestor_tag_id in ancestor_tag_ids } for ( ideal_tag_id, ancestor_tag_ids ) in ideal_tag_ids_to_ancestor_tag_ids.items() }
for ( tag_id, ( min_current_count, max_current_count, min_pending_count, max_pending_count ) ) in tag_ids_to_full_counts.items():
tag = tag_ids_to_tags[ tag_id ]
predicate = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, value = tag, inclusive = inclusive, count = ClientSearch.PredicateCount( min_current_count, min_pending_count, max_current_count, max_pending_count ) )
if tag_id in tag_ids_to_ideal_tag_ids_for_siblings:
ideal_tag_id = tag_ids_to_ideal_tag_ids_for_siblings[ tag_id ]
if ideal_tag_id != tag_id:
predicate.SetIdealSibling( tag_ids_to_tags[ ideal_tag_id ] )
predicate.SetKnownSiblings( ideal_tag_ids_to_chain_tags[ ideal_tag_id ] )
if tag_id in tag_ids_to_ideal_tag_ids_for_parents:
ideal_tag_id = tag_ids_to_ideal_tag_ids_for_parents[ tag_id ]
parents = ideal_tag_ids_to_ancestor_tags[ ideal_tag_id ]
if len( parents ) > 0:
predicate.SetKnownParents( parents )
predicates.append( predicate )
elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL:
tag_ids_to_known_chain_tag_ids = collections.defaultdict( set )
if display_tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( display_tag_service_id, )
for search_tag_service_id in search_tag_service_ids:
tag_ids_that_are_sibling_chained = self.modules_tag_siblings.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, search_tag_service_id, tag_ids )
tag_ids_to_ideal_tag_ids_for_siblings = self.modules_tag_siblings.GetTagsToIdeals( ClientTags.TAG_DISPLAY_ACTUAL, search_tag_service_id, tag_ids_that_are_sibling_chained )
ideal_tag_ids = set( tag_ids_to_ideal_tag_ids_for_siblings.values() )
ideal_tag_ids_to_sibling_chain_tag_ids = self.modules_tag_siblings.GetIdealsToChains( ClientTags.TAG_DISPLAY_ACTUAL, search_tag_service_id, ideal_tag_ids )
for ( tag_id, ideal_tag_id ) in tag_ids_to_ideal_tag_ids_for_siblings.items():
tag_ids_to_known_chain_tag_ids[ tag_id ].update( ideal_tag_ids_to_sibling_chain_tag_ids[ ideal_tag_id ] )
tag_ids_we_want_to_look_up = set( tag_ids ).union( itertools.chain.from_iterable( tag_ids_to_known_chain_tag_ids.values() ) )
if job_key is not None and job_key.IsCancelled():
return []
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = tag_ids_we_want_to_look_up )
if job_key is not None and job_key.IsCancelled():
return []
for ( tag_id, ( min_current_count, max_current_count, min_pending_count, max_pending_count ) ) in tag_ids_to_full_counts.items():
tag = tag_ids_to_tags[ tag_id ]
predicate = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, value = tag, inclusive = inclusive, count = ClientSearch.PredicateCount( min_current_count, min_pending_count, max_current_count, max_pending_count ) )
if tag_id in tag_ids_to_known_chain_tag_ids:
chain_tags = { tag_ids_to_tags[ chain_tag_id ] for chain_tag_id in tag_ids_to_known_chain_tag_ids[ tag_id ] }
predicate.SetKnownSiblings( chain_tags )
predicates.append( predicate )
return predicates
def _GetAllTagIds( self, leaf: ClientDBServices.FileSearchContextLeaf, job_key = None ):
tag_ids = set()
query = '{};'.format( self.modules_tag_search.GetQueryPhraseForTagIds( leaf.file_service_id, leaf.tag_service_id ) )
cursor = self._Execute( query )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
loop_of_tag_ids = self._STS( HydrusDB.ReadFromCancellableCursor( cursor, 1024, cancelled_hook = cancelled_hook ) )
if job_key is not None and job_key.IsCancelled():
return set()
tag_ids.update( loop_of_tag_ids )
return tag_ids
def _GetAutocompleteCountEstimate( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ], include_current_tags: bool, include_pending_tags: bool ):
count = 0
if not include_current_tags and not include_pending_tags:
return count
( current_count, pending_count ) = self._GetAutocompleteCountEstimateStatuses( tag_display_type, tag_service_id, file_service_id, tag_ids )
if include_current_tags:
count += current_count
if include_current_tags:
count += pending_count
return count
def _GetAutocompleteCountEstimateStatuses( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ] ):
include_current_tags = True
include_pending_tags = True
ids_to_count = self.modules_mappings_counts.GetCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current_tags, include_pending_tags )
current_count = 0
pending_count = 0
for ( current_min, current_max, pending_min, pending_max ) in ids_to_count.values():
current_count += current_min
pending_count += pending_min
return ( current_count, pending_count )
def _GetAutocompleteTagIdsLeaf( self, tag_display_type: int, leaf: ClientDBServices.FileSearchContextLeaf, search_text, exact_match, job_key = None ):
if search_text == '':
return set()
( namespace, half_complete_searchable_subtag ) = HydrusTags.SplitTag( search_text )
if half_complete_searchable_subtag == '':
return set()
if namespace == '*':
namespace = ''
if exact_match:
if '*' in namespace or '*' in half_complete_searchable_subtag:
return []
if namespace == '':
namespace_ids = []
elif '*' in namespace:
namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace )
else:
if not self.modules_tags.NamespaceExists( namespace ):
return set()
namespace_ids = ( self.modules_tags.GetNamespaceId( namespace ), )
if half_complete_searchable_subtag == '*':
if namespace == '':
# hellmode 'get all tags' search
tag_ids = self._GetAllTagIds( leaf, job_key = job_key )
else:
tag_ids = self._GetTagIdsFromNamespaceIds( leaf, namespace_ids, job_key = job_key )
else:
tag_ids = set()
with self._MakeTemporaryIntegerTable( [], 'subtag_id' ) as temp_subtag_ids_table_name:
self.modules_tag_search.GetSubtagIdsFromWildcardIntoTable( leaf.file_service_id, leaf.tag_service_id, half_complete_searchable_subtag, temp_subtag_ids_table_name, job_key = job_key )
if namespace == '':
loop_of_tag_ids = self._GetTagIdsFromSubtagIdsTable( leaf.file_service_id, leaf.tag_service_id, temp_subtag_ids_table_name, job_key = job_key )
else:
with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
loop_of_tag_ids = self._GetTagIdsFromNamespaceIdsSubtagIdsTables( leaf.file_service_id, leaf.tag_service_id, temp_namespace_ids_table_name, temp_subtag_ids_table_name, job_key = job_key )
tag_ids.update( loop_of_tag_ids )
# now fetch siblings, add to set
if not isinstance( tag_ids, set ):
tag_ids = set( tag_ids )
tag_ids_without_siblings = list( tag_ids )
seen_ideal_tag_ids = collections.defaultdict( set )
for batch_of_tag_ids in HydrusData.SplitListIntoChunks( tag_ids_without_siblings, 10240 ):
with self._MakeTemporaryIntegerTable( batch_of_tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
if job_key is not None and job_key.IsCancelled():
return set()
with self._MakeTemporaryIntegerTable( [], 'ideal_tag_id' ) as temp_ideal_tag_ids_table_name:
self.modules_tag_siblings.FilterChainedIdealsIntoTable( ClientTags.TAG_DISPLAY_ACTUAL, leaf.tag_service_id, temp_tag_ids_table_name, temp_ideal_tag_ids_table_name )
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_chained_tag_ids_table_name:
self.modules_tag_siblings.GetChainsMembersFromIdealsTables( ClientTags.TAG_DISPLAY_ACTUAL, leaf.tag_service_id, temp_ideal_tag_ids_table_name, temp_chained_tag_ids_table_name )
tag_ids.update( self._STI( self._Execute( 'SELECT tag_id FROM {};'.format( temp_chained_tag_ids_table_name ) ) ) )
return tag_ids
def _GetAutocompletePredicates(
self,
tag_display_type: int,
file_search_context: ClientSearch.FileSearchContext,
search_text: str = '',
exact_match = False,
inclusive = True,
add_namespaceless = False,
search_namespaces_into_full_tags = False,
zero_count_ok = False,
job_key = None
):
location_context = file_search_context.GetLocationContext()
tag_search_context = file_search_context.GetTagSearchContext()
display_tag_service_id = self.modules_services.GetServiceId( tag_search_context.display_service_key )
if tag_search_context.IsAllKnownTags() and location_context.IsAllKnownFiles():
return []
include_current = tag_search_context.include_current_tags
include_pending = tag_search_context.include_pending_tags
all_predicates = []
file_search_context_branch = self._GetFileSearchContextBranch( file_search_context )
for leaf in file_search_context_branch.IterateLeaves():
tag_ids = self._GetAutocompleteTagIdsLeaf( tag_display_type, leaf, search_text, exact_match, job_key = job_key )
if ':' not in search_text and search_namespaces_into_full_tags and not exact_match:
# 'char' -> 'character:samus aran'
special_search_text = '{}*:*'.format( search_text )
tag_ids.update( self._GetAutocompleteTagIdsLeaf( tag_display_type, leaf, special_search_text, exact_match, job_key = job_key ) )
if job_key is not None and job_key.IsCancelled():
return []
domain_is_cross_referenced = leaf.file_service_id != self.modules_services.combined_deleted_file_service_id
for group_of_tag_ids in HydrusData.SplitIteratorIntoChunks( tag_ids, 1000 ):
if job_key is not None and job_key.IsCancelled():
return []
ids_to_count = self.modules_mappings_counts.GetCounts( tag_display_type, leaf.tag_service_id, leaf.file_service_id, group_of_tag_ids, include_current, include_pending, domain_is_cross_referenced = domain_is_cross_referenced, zero_count_ok = zero_count_ok, job_key = job_key )
if len( ids_to_count ) == 0:
continue
#
predicates = self._GeneratePredicatesFromTagIdsAndCounts( tag_display_type, display_tag_service_id, ids_to_count, inclusive, job_key = job_key )
all_predicates.extend( predicates )
if job_key is not None and job_key.IsCancelled():
return []
predicates = ClientSearch.MergePredicates( all_predicates, add_namespaceless = add_namespaceless )
return predicates
def _GetBonedStats( self ):
boned_stats = {}
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_hash_id_table_name:
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
self._Execute( 'INSERT INTO {} ( hash_id ) SELECT hash_id FROM {};'.format( temp_hash_id_table_name, current_files_table_name ) )
for service_id in ( self.modules_services.trash_service_id, self.modules_services.local_update_service_id ):
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
self._Execute( 'DELETE FROM {} WHERE hash_id IN ( SELECT hash_id FROM {} );'.format( temp_hash_id_table_name, current_files_table_name ) )
( num_total, size_total ) = self._Execute( 'SELECT COUNT( hash_id ), SUM( size ) FROM {} CROSS JOIN files_info USING ( hash_id );'.format( temp_hash_id_table_name ) ).fetchone()
( num_inbox, size_inbox ) = self._Execute( 'SELECT COUNT( hash_id ), SUM( size ) FROM files_info NATURAL JOIN {} NATURAL JOIN file_inbox;'.format( temp_hash_id_table_name ) ).fetchone()
if size_total is None:
size_total = 0
if size_inbox is None:
size_inbox = 0
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_hash_id_table_name:
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_DELETED )
self._Execute( 'INSERT INTO {} ( hash_id ) SELECT hash_id FROM {};'.format( temp_hash_id_table_name, deleted_files_table_name ) )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.trash_service_id, HC.CONTENT_STATUS_CURRENT )
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id ) SELECT hash_id FROM {};'.format( temp_hash_id_table_name, current_files_table_name ) )
( num_deleted, size_deleted ) = self._Execute( 'SELECT COUNT( hash_id ), SUM( size ) FROM {} CROSS JOIN files_info USING ( hash_id );'.format( temp_hash_id_table_name ) ).fetchone()
if size_deleted is None:
size_deleted = 0
num_archive = num_total - num_inbox
size_archive = size_total - size_inbox
boned_stats[ 'num_inbox' ] = num_inbox
boned_stats[ 'num_archive' ] = num_archive
boned_stats[ 'num_deleted' ] = num_deleted
boned_stats[ 'size_inbox' ] = size_inbox
boned_stats[ 'size_archive' ] = size_archive
boned_stats[ 'size_deleted' ] = size_deleted
canvas_types_to_total_viewtimes = { canvas_type : ( views, viewtime ) for ( canvas_type, views, viewtime ) in self._Execute( 'SELECT canvas_type, SUM( views ), SUM( viewtime ) FROM file_viewing_stats GROUP BY canvas_type;' ) }
if CC.CANVAS_PREVIEW not in canvas_types_to_total_viewtimes:
canvas_types_to_total_viewtimes[ CC.CANVAS_PREVIEW ] = ( 0, 0 )
if CC.CANVAS_MEDIA_VIEWER not in canvas_types_to_total_viewtimes:
canvas_types_to_total_viewtimes[ CC.CANVAS_MEDIA_VIEWER ] = ( 0, 0 )
total_viewtime = canvas_types_to_total_viewtimes[ CC.CANVAS_MEDIA_VIEWER ] + canvas_types_to_total_viewtimes[ CC.CANVAS_PREVIEW ]
#
earliest_import_time = 0
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
result = self._Execute( 'SELECT MIN( timestamp ) FROM {};'.format( current_files_table_name ) ).fetchone()
if result is not None and result[0] is not None:
earliest_import_time = result[0]
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_DELETED )
result = self._Execute( 'SELECT MIN( original_timestamp ) FROM {};'.format( deleted_files_table_name ) ).fetchone()
if result is not None and result[0] is not None:
if earliest_import_time == 0:
earliest_import_time = result[0]
else:
earliest_import_time = min( earliest_import_time, result[0] )
if earliest_import_time > 0:
boned_stats[ 'earliest_import_time' ] = earliest_import_time
#
boned_stats[ 'total_viewtime' ] = total_viewtime
total_alternate_files = sum( ( count for ( alternates_group_id, count ) in self._Execute( 'SELECT alternates_group_id, COUNT( * ) FROM alternate_file_group_members GROUP BY alternates_group_id;' ) if count > 1 ) )
total_duplicate_files = sum( ( count for ( media_id, count ) in self._Execute( 'SELECT media_id, COUNT( * ) FROM duplicate_file_members GROUP BY media_id;' ) if count > 1 ) )
location_context = ClientLocation.GetLocationContextForAllLocalMedia()
db_location_context = self.modules_files_storage.GetDBLocationContext( location_context )
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnFileService( db_location_context )
( total_potential_pairs, ) = self._Execute( 'SELECT COUNT( * ) FROM ( SELECT DISTINCT smaller_media_id, larger_media_id FROM {} );'.format( table_join ) ).fetchone()
boned_stats[ 'total_alternate_files' ] = total_alternate_files
boned_stats[ 'total_duplicate_files' ] = total_duplicate_files
boned_stats[ 'total_potential_pairs' ] = total_potential_pairs
return boned_stats
def _GetClientFilesLocations( self ):
result = { prefix : HydrusPaths.ConvertPortablePathToAbsPath( location ) for ( prefix, location ) in self._Execute( 'SELECT prefix, location FROM client_files_locations;' ) }
if len( result ) < 512:
message = 'When fetching the directories where your files are stored, the database discovered some entries were missing!'
message += os.linesep * 2
message += 'Default values will now be inserted. If you have previously migrated your files or thumbnails, and assuming this is occuring on boot, you will next be presented with a dialog to remap them to the correct location.'
message += os.linesep * 2
message += 'If this is not happening on client boot, you should kill the hydrus process right now, as a serious hard drive fault has likely recently occurred.'
self._DisplayCatastrophicError( message )
client_files_default = os.path.join( self._db_dir, 'client_files' )
HydrusPaths.MakeSureDirectoryExists( client_files_default )
location = HydrusPaths.ConvertAbsPathToPortablePath( client_files_default )
for prefix in HydrusData.IterateHexPrefixes():
self._Execute( 'INSERT OR IGNORE INTO client_files_locations ( prefix, location ) VALUES ( ?, ? );', ( 'f' + prefix, location ) )
self._Execute( 'INSERT OR IGNORE INTO client_files_locations ( prefix, location ) VALUES ( ?, ? );', ( 't' + prefix, location ) )
return result
def _GetFileHistory( self, num_steps: int ):
# get all sorts of stats and present them in ( timestamp, cumulative_num ) tuple pairs
file_history = {}
# first let's do current files. we increment when added, decrement when we know removed
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
current_timestamps = self._STL( self._Execute( 'SELECT timestamp FROM {};'.format( current_files_table_name ) ) )
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_DELETED )
since_deleted = self._STL( self._Execute( 'SELECT original_timestamp FROM {} WHERE original_timestamp IS NOT NULL;'.format( deleted_files_table_name ) ) )
current_timestamps.extend( since_deleted )
current_timestamps.sort()
deleted_timestamps = self._STL( self._Execute( 'SELECT timestamp FROM {} WHERE timestamp IS NOT NULL ORDER BY timestamp ASC;'.format( deleted_files_table_name ) ) )
combined_timestamps_with_delta = [ ( timestamp, 1 ) for timestamp in current_timestamps ]
combined_timestamps_with_delta.extend( ( ( timestamp, -1 ) for timestamp in deleted_timestamps ) )
combined_timestamps_with_delta.sort()
current_file_history = []
if len( combined_timestamps_with_delta ) > 0:
if len( combined_timestamps_with_delta ) < 2:
step_gap = 1
else:
step_gap = max( ( combined_timestamps_with_delta[-1][0] - combined_timestamps_with_delta[0][0] ) // num_steps, 1 )
total_current_files = 0
step_timestamp = combined_timestamps_with_delta[0][0]
for ( timestamp, delta ) in combined_timestamps_with_delta:
if timestamp > step_timestamp + step_gap:
current_file_history.append( ( step_timestamp, total_current_files ) )
step_timestamp = timestamp
total_current_files += delta
file_history[ 'current' ] = current_file_history
# now deleted times. we will pre-populate total_num_files with non-timestamped records
( total_deleted_files, ) = self._Execute( 'SELECT COUNT( * ) FROM {} WHERE timestamp IS NULL;'.format( deleted_files_table_name ) ).fetchone()
deleted_file_history = []
if len( deleted_timestamps ) > 0:
if len( deleted_timestamps ) < 2:
step_gap = 1
else:
step_gap = max( ( deleted_timestamps[-1] - deleted_timestamps[0] ) // num_steps, 1 )
step_timestamp = deleted_timestamps[0]
for deleted_timestamp in deleted_timestamps:
if deleted_timestamp > step_timestamp + step_gap:
deleted_file_history.append( ( step_timestamp, total_deleted_files ) )
step_timestamp = deleted_timestamp
total_deleted_files += 1
file_history[ 'deleted' ] = deleted_file_history
# and inbox, which will work backwards since we have numbers for archiving. several subtle differences here
( total_inbox_files, ) = self._Execute( 'SELECT COUNT( * ) FROM file_inbox;' ).fetchone()
archive_timestamps = self._STL( self._Execute( 'SELECT archived_timestamp FROM archive_timestamps ORDER BY archived_timestamp ASC;' ) )
inbox_file_history = []
if len( archive_timestamps ) > 0:
if len( archive_timestamps ) < 2:
step_gap = 1
else:
step_gap = max( ( archive_timestamps[-1] - archive_timestamps[0] ) // num_steps, 1 )
archive_timestamps.reverse()
step_timestamp = archive_timestamps[0]
for archived_timestamp in archive_timestamps:
if archived_timestamp < step_timestamp - step_gap:
inbox_file_history.append( ( archived_timestamp, total_inbox_files ) )
step_timestamp = archived_timestamp
total_inbox_files += 1
inbox_file_history.reverse()
file_history[ 'inbox' ] = inbox_file_history
return file_history
def _GetFileNotes( self, hash ):
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
names_to_notes = { name : note for ( name, note ) in self._Execute( 'SELECT label, note FROM file_notes, labels, notes ON ( file_notes.name_id = labels.label_id AND file_notes.note_id = notes.note_id ) WHERE hash_id = ?;', ( hash_id, ) ) }
return names_to_notes
def _GetFileSearchContextBranch( self, file_search_context: ClientSearch.FileSearchContext ) -> ClientDBServices.FileSearchContextBranch:
location_context = file_search_context.GetLocationContext()
tag_search_context = file_search_context.GetTagSearchContext()
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
search_file_service_ids = []
for file_service_key in file_service_keys:
try:
search_file_service_id = self.modules_services.GetServiceId( file_service_key )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A query was run for a file service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
continue
search_file_service_ids.append( search_file_service_id )
if tag_search_context.IsAllKnownTags():
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
try:
search_tag_service_ids = ( self.modules_services.GetServiceId( tag_search_context.service_key ), )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A query was run for a tag service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
search_tag_service_ids = []
return ClientDBServices.FileSearchContextBranch( file_search_context, search_file_service_ids, search_tag_service_ids, file_location_is_cross_referenced )
def _GetFileSystemPredicates( self, file_search_context: ClientSearch.FileSearchContext, force_system_everything = False ):
location_context = file_search_context.GetLocationContext()
system_everything_limit = 10000
system_everything_suffix = ''
predicates = []
system_everythings = [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING ) ]
blank_pred_types = {
ClientSearch.PREDICATE_TYPE_SYSTEM_NUM_TAGS,
ClientSearch.PREDICATE_TYPE_SYSTEM_LIMIT,
ClientSearch.PREDICATE_TYPE_SYSTEM_KNOWN_URLS,
ClientSearch.PREDICATE_TYPE_SYSTEM_HASH,
ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_SERVICE,
ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_RELATIONSHIPS,
ClientSearch.PREDICATE_TYPE_SYSTEM_TAG_AS_NUMBER,
ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_VIEWING_STATS
}
if len( self.modules_services.GetServiceIds( HC.RATINGS_SERVICES ) ) > 0:
blank_pred_types.add( ClientSearch.PREDICATE_TYPE_SYSTEM_RATING )
if location_context.IsAllKnownFiles():
tag_service_key = file_search_context.GetTagSearchContext().service_key
if tag_service_key == CC.COMBINED_TAG_SERVICE_KEY:
# this shouldn't happen, combined on both sides, but let's do our best anyway
if force_system_everything or self._controller.new_options.GetBoolean( 'always_show_system_everything' ):
predicates.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING ) )
else:
service_id = self.modules_services.GetServiceId( tag_service_key )
service_type = self.modules_services.GetServiceType( service_id )
service_info = self._GetServiceInfoSpecific( service_id, service_type, { HC.SERVICE_INFO_NUM_FILES }, calculate_missing = False )
if HC.SERVICE_INFO_NUM_FILES in service_info:
num_everything = service_info[ HC.SERVICE_INFO_NUM_FILES ]
system_everythings.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_everything ) ) )
else:
# specific file service(s)
jobs = []
jobs.extend( ( ( file_service_key, HC.CONTENT_STATUS_CURRENT ) for file_service_key in location_context.current_service_keys ) )
jobs.extend( ( ( file_service_key, HC.CONTENT_STATUS_DELETED ) for file_service_key in location_context.deleted_service_keys ) )
file_repo_preds = []
inbox_archive_preds = []
we_saw_a_file_repo = False
for ( file_service_key, status ) in jobs:
service_id = self.modules_services.GetServiceId( file_service_key )
service_type = self.modules_services.GetServiceType( service_id )
if service_type not in HC.FILE_SERVICES:
continue
if status == HC.CONTENT_STATUS_CURRENT:
service_info = self._GetServiceInfoSpecific( service_id, service_type, { HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_NUM_INBOX } )
num_everything = service_info[ HC.SERVICE_INFO_NUM_VIEWABLE_FILES ]
system_everythings.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_everything ) ) )
if location_context.IncludesDeleted():
# inbox/archive and local/remote are too difficult to get good numbers for and merge for deleted, so we'll exclude if this is a mix
continue
num_inbox = service_info[ HC.SERVICE_INFO_NUM_INBOX ]
num_archive = num_everything - num_inbox
if service_type == HC.FILE_REPOSITORY:
we_saw_a_file_repo = True
num_local = self.modules_files_storage.GetNumLocal( service_id )
num_not_local = num_everything - num_local
else:
num_local = num_everything
num_not_local = 0
file_repo_preds.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_LOCAL, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_local ) ) )
file_repo_preds.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_NOT_LOCAL, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_not_local ) ) )
num_archive = num_local - num_inbox
inbox_archive_preds.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_INBOX, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_inbox ) ) )
inbox_archive_preds.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_ARCHIVE, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_archive ) ) )
elif status == HC.CONTENT_STATUS_DELETED:
service_info = self._GetServiceInfoSpecific( service_id, service_type, { HC.SERVICE_INFO_NUM_DELETED_FILES } )
num_everything = service_info[ HC.SERVICE_INFO_NUM_DELETED_FILES ]
system_everythings.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_everything ) ) )
if we_saw_a_file_repo:
predicates.extend( file_repo_preds )
if len( inbox_archive_preds ) > 0:
inbox_archive_preds = ClientSearch.MergePredicates( inbox_archive_preds )
zero_counts = [ pred.GetCount().HasZeroCount() for pred in inbox_archive_preds ]
if True in zero_counts and self._controller.new_options.GetBoolean( 'filter_inbox_and_archive_predicates' ):
if False in zero_counts and location_context.IsOneDomain():
# something is in here, but we are hiding, so let's inform system everything
useful_pred = list( ( pred for pred in inbox_archive_preds if pred.GetCount().HasNonZeroCount() ) )[0]
if useful_pred.GetType() == ClientSearch.PREDICATE_TYPE_SYSTEM_INBOX:
system_everything_suffix = 'all in inbox'
else:
system_everything_suffix = 'all in archive'
else:
predicates.extend( inbox_archive_preds )
blank_pred_types.update( [
ClientSearch.PREDICATE_TYPE_SYSTEM_SIZE,
ClientSearch.PREDICATE_TYPE_SYSTEM_TIME,
ClientSearch.PREDICATE_TYPE_SYSTEM_DIMENSIONS,
ClientSearch.PREDICATE_TYPE_SYSTEM_DURATION,
ClientSearch.PREDICATE_TYPE_SYSTEM_HAS_AUDIO,
ClientSearch.PREDICATE_TYPE_SYSTEM_HAS_ICC_PROFILE,
ClientSearch.PREDICATE_TYPE_SYSTEM_NOTES,
ClientSearch.PREDICATE_TYPE_SYSTEM_NUM_WORDS,
ClientSearch.PREDICATE_TYPE_SYSTEM_MIME,
ClientSearch.PREDICATE_TYPE_SYSTEM_SIMILAR_TO
] )
if len( system_everythings ) > 0:
system_everythings = ClientSearch.MergePredicates( system_everythings )
system_everything = list( system_everythings )[0]
system_everything.SetCountTextSuffix( system_everything_suffix )
num_everything = system_everything.GetCount().GetMinCount()
if force_system_everything or ( num_everything <= system_everything_limit or self._controller.new_options.GetBoolean( 'always_show_system_everything' ) ):
predicates.append( system_everything )
predicates.extend( [ ClientSearch.Predicate( predicate_type ) for predicate_type in blank_pred_types ] )
predicates = ClientSearch.MergePredicates( predicates )
def sys_preds_key( s ):
t = s.GetType()
if t == ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING:
return ( 0, 0 )
elif t == ClientSearch.PREDICATE_TYPE_SYSTEM_INBOX:
return ( 1, 0 )
elif t == ClientSearch.PREDICATE_TYPE_SYSTEM_ARCHIVE:
return ( 2, 0 )
elif t == ClientSearch.PREDICATE_TYPE_SYSTEM_LOCAL:
return ( 3, 0 )
elif t == ClientSearch.PREDICATE_TYPE_SYSTEM_NOT_LOCAL:
return ( 4, 0 )
else:
return ( 5, s.ToString() )
predicates.sort( key = sys_preds_key )
return predicates
def _GetForceRefreshTagsManagers( self, hash_ids, hash_ids_to_current_file_service_ids = None ):
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
return self._GetForceRefreshTagsManagersWithTableHashIds( hash_ids, temp_table_name, hash_ids_to_current_file_service_ids = hash_ids_to_current_file_service_ids )
def _GetForceRefreshTagsManagersWithTableHashIds( self, hash_ids, hash_ids_table_name, hash_ids_to_current_file_service_ids = None ):
if hash_ids_to_current_file_service_ids is None:
hash_ids_to_current_file_service_ids = self.modules_files_storage.GetHashIdsToCurrentServiceIds( hash_ids_table_name )
common_file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, hash_ids_table_name, hash_ids_to_current_file_service_ids = hash_ids_to_current_file_service_ids )
#
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
storage_tag_data = []
display_tag_data = []
for ( common_file_service_id, batch_of_hash_ids ) in common_file_service_ids_to_hash_ids.items():
if len( batch_of_hash_ids ) == len( hash_ids ):
( batch_of_storage_tag_data, batch_of_display_tag_data ) = self._GetForceRefreshTagsManagersWithTableHashIdsTagData( common_file_service_id, tag_service_ids, hash_ids_table_name )
else:
with self._MakeTemporaryIntegerTable( batch_of_hash_ids, 'hash_id' ) as temp_batch_hash_ids_table_name:
( batch_of_storage_tag_data, batch_of_display_tag_data ) = self._GetForceRefreshTagsManagersWithTableHashIdsTagData( common_file_service_id, tag_service_ids, temp_batch_hash_ids_table_name )
storage_tag_data.extend( batch_of_storage_tag_data )
display_tag_data.extend( batch_of_display_tag_data )
seen_tag_ids = { tag_id for ( hash_id, ( tag_service_id, status, tag_id ) ) in storage_tag_data }
seen_tag_ids.update( ( tag_id for ( hash_id, ( tag_service_id, status, tag_id ) ) in display_tag_data ) )
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = seen_tag_ids )
service_ids_to_service_keys = self.modules_services.GetServiceIdsToServiceKeys()
hash_ids_to_raw_storage_tag_data = HydrusData.BuildKeyToListDict( storage_tag_data )
hash_ids_to_raw_display_tag_data = HydrusData.BuildKeyToListDict( display_tag_data )
hash_ids_to_tag_managers = {}
for hash_id in hash_ids:
# service_id, status, tag_id
raw_storage_tag_data = hash_ids_to_raw_storage_tag_data[ hash_id ]
# service_id -> ( status, tag )
service_ids_to_storage_tag_data = HydrusData.BuildKeyToListDict( ( ( tag_service_id, ( status, tag_ids_to_tags[ tag_id ] ) ) for ( tag_service_id, status, tag_id ) in raw_storage_tag_data ) )
service_keys_to_statuses_to_storage_tags = collections.defaultdict(
HydrusData.default_dict_set,
{ service_ids_to_service_keys[ tag_service_id ] : HydrusData.BuildKeyToSetDict( status_and_tag ) for ( tag_service_id, status_and_tag ) in service_ids_to_storage_tag_data.items() }
)
# service_id, status, tag_id
raw_display_tag_data = hash_ids_to_raw_display_tag_data[ hash_id ]
# service_id -> ( status, tag )
service_ids_to_display_tag_data = HydrusData.BuildKeyToListDict( ( ( tag_service_id, ( status, tag_ids_to_tags[ tag_id ] ) ) for ( tag_service_id, status, tag_id ) in raw_display_tag_data ) )
service_keys_to_statuses_to_display_tags = collections.defaultdict(
HydrusData.default_dict_set,
{ service_ids_to_service_keys[ tag_service_id ] : HydrusData.BuildKeyToSetDict( status_and_tag ) for ( tag_service_id, status_and_tag ) in service_ids_to_display_tag_data.items() }
)
tags_manager = ClientMediaManagers.TagsManager( service_keys_to_statuses_to_storage_tags, service_keys_to_statuses_to_display_tags )
hash_ids_to_tag_managers[ hash_id ] = tags_manager
return hash_ids_to_tag_managers
def _GetForceRefreshTagsManagersWithTableHashIdsTagData( self, common_file_service_id, tag_service_ids, hash_ids_table_name ):
storage_tag_data = []
display_tag_data = []
for tag_service_id in tag_service_ids:
statuses_to_table_names = self.modules_mappings_storage.GetFastestStorageMappingTableNames( common_file_service_id, tag_service_id )
for ( status, mappings_table_name ) in statuses_to_table_names.items():
# temp hashes to mappings
storage_tag_data.extend( ( hash_id, ( tag_service_id, status, tag_id ) ) for ( hash_id, tag_id ) in self._Execute( 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, mappings_table_name ) ) )
if common_file_service_id != self.modules_services.combined_file_service_id:
( cache_current_display_mappings_table_name, cache_pending_display_mappings_table_name ) = ClientDBMappingsCacheSpecificDisplay.GenerateSpecificDisplayMappingsCacheTableNames( common_file_service_id, tag_service_id )
# temp hashes to mappings
display_tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_CURRENT, tag_id ) ) for ( hash_id, tag_id ) in self._Execute( 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, cache_current_display_mappings_table_name ) ) )
display_tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_PENDING, tag_id ) ) for ( hash_id, tag_id ) in self._Execute( 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, cache_pending_display_mappings_table_name ) ) )
if common_file_service_id == self.modules_services.combined_file_service_id:
# this is likely a 'all known files' query, which means we are in deep water without a cache
# time to compute manually, which is semi hell mode, but not dreadful
current_and_pending_storage_tag_data = [ ( hash_id, ( tag_service_id, status, tag_id ) ) for ( hash_id, ( tag_service_id, status, tag_id ) ) in storage_tag_data if status in ( HC.CONTENT_STATUS_CURRENT, HC.CONTENT_STATUS_PENDING ) ]
seen_service_ids_to_seen_tag_ids = HydrusData.BuildKeyToSetDict( ( ( tag_service_id, tag_id ) for ( hash_id, ( tag_service_id, status, tag_id ) ) in current_and_pending_storage_tag_data ) )
seen_service_ids_to_tag_ids_to_implied_tag_ids = { tag_service_id : self.modules_tag_display.GetTagsToImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids ) for ( tag_service_id, tag_ids ) in seen_service_ids_to_seen_tag_ids.items() }
display_tag_data = []
for ( hash_id, ( tag_service_id, status, tag_id ) ) in current_and_pending_storage_tag_data:
display_tag_data.extend( ( ( hash_id, ( tag_service_id, status, implied_tag_id ) ) for implied_tag_id in seen_service_ids_to_tag_ids_to_implied_tag_ids[ tag_service_id ][ tag_id ] ) )
return ( storage_tag_data, display_tag_data )
def _GetHashIdsAndNonZeroTagCounts( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, hash_ids, namespace_wildcard = None, job_key = None ):
if namespace_wildcard == '*':
namespace_wildcard = None
if namespace_wildcard is None:
namespace_ids = []
else:
namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace_wildcard )
with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
mapping_and_tag_table_names = set()
for file_service_key in file_service_keys:
mapping_and_tag_table_names.update( self._GetMappingAndTagTables( tag_display_type, file_service_key, tag_search_context ) )
# reason why I (JOIN each table) rather than (join the UNION) is based on previous hell with having query planner figure out a "( a UNION b UNION c ) NATURAL JOIN stuff" situation
# although the following sometimes makes certifiable 2KB ( 6 UNION * 4-table ) queries, it actually works fast
# OK, a new problem is mass UNION leads to terrible cancelability because the first row cannot be fetched until the first n - 1 union queries are done
# I tried some gubbins to try to do a pseudo table-union rather than query union and do 'get files->distinct tag count for this union of tables, and fetch hash_ids first on the union', but did not have luck
# so NOW we are just going to do it in bits of files mate. this also reduces memory use from the distinct-making UNION with large numbers of hash_ids
results = []
BLOCK_SIZE = max( 64, int( len( hash_ids ) ** 0.5 ) ) # go for square root for now
for group_of_hash_ids in HydrusData.SplitIteratorIntoChunks( hash_ids, BLOCK_SIZE ):
with self._MakeTemporaryIntegerTable( group_of_hash_ids, 'hash_id' ) as hash_ids_table_name:
if namespace_wildcard is None:
# temp hashes to mappings
select_statements = [ 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id )'.format( hash_ids_table_name, mappings_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
else:
# temp hashes to mappings to tags to namespaces
select_statements = [ 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id ) CROSS JOIN {} USING ( tag_id ) CROSS JOIN {} USING ( namespace_id )'.format( hash_ids_table_name, mappings_table_name, tags_table_name, temp_namespace_ids_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
unions = '( {} )'.format( ' UNION '.join( select_statements ) )
query = 'SELECT hash_id, COUNT( tag_id ) FROM {} GROUP BY hash_id;'.format( unions )
cursor = self._Execute( query )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
loop_of_results = HydrusDB.ReadFromCancellableCursor( cursor, 64, cancelled_hook = cancelled_hook )
if job_key is not None and job_key.IsCancelled():
return results
results.extend( loop_of_results )
return results
def _GetHashIdsFromFileViewingStatistics( self, view_type, viewing_locations, operator, viewing_value ):
# only works for positive values like '> 5'. won't work for '= 0' or '< 1' since those are absent from the table
include_media = 'media' in viewing_locations
include_preview = 'preview' in viewing_locations
canvas_type_predicate = '1=1'
group_by_phrase = ''
having_phrase = ''
if view_type == 'views':
content_phrase = 'views'
elif view_type == 'viewtime':
content_phrase = 'viewtime'
if include_media and include_preview:
group_by_phrase = ' GROUP BY hash_id'
if view_type == 'views':
content_phrase = 'SUM( views )'
elif view_type == 'viewtime':
content_phrase = 'SUM( viewtime )'
elif include_media:
canvas_type_predicate = 'canvas_type = {}'.format( CC.CANVAS_MEDIA_VIEWER )
elif include_preview:
canvas_type_predicate = 'canvas_type = {}'.format( CC.CANVAS_PREVIEW )
else:
return []
if operator == CC.UNICODE_ALMOST_EQUAL_TO:
lower_bound = int( 0.8 * viewing_value )
upper_bound = int( 1.2 * viewing_value )
test_phrase = '{} BETWEEN {} AND {}'.format( content_phrase, str( lower_bound ), str( upper_bound ) )
else:
test_phrase = '{} {} {}'.format( content_phrase, operator, str( viewing_value ) )
if include_media and include_preview:
select_statement = 'SELECT hash_id FROM file_viewing_stats {} HAVING {};'.format( group_by_phrase, test_phrase )
else:
select_statement = 'SELECT hash_id FROM file_viewing_stats WHERE {} AND {}{};'.format( test_phrase, canvas_type_predicate, group_by_phrase )
hash_ids = self._STS( self._Execute( select_statement ) )
return hash_ids
def _GetHashIdsFromNamespaceIdsSubtagIds( self, tag_display_type: int, file_service_key, tag_search_context: ClientSearch.TagSearchContext, namespace_ids, subtag_ids, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
tag_ids = self._GetTagIdsFromNamespaceIdsSubtagIds( file_service_id, tag_service_id, namespace_ids, subtag_ids, job_key = job_key )
return self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsFromNamespaceIdsSubtagIdsTables( self, tag_display_type: int, file_service_key, tag_search_context: ClientSearch.TagSearchContext, namespace_ids_table_name, subtag_ids_table_name, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
tag_ids = self._GetTagIdsFromNamespaceIdsSubtagIdsTables( file_service_id, tag_service_id, namespace_ids_table_name, subtag_ids_table_name, job_key = job_key )
return self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsFromNoteName( self, name: str, hash_ids_table_name: str ):
label_id = self.modules_texts.GetLabelId( name )
# as note name is rare, we force this to run opposite to typical: notes to temp hashes
return self._STS( self._Execute( 'SELECT hash_id FROM file_notes CROSS JOIN {} USING ( hash_id ) WHERE name_id = ?;'.format( hash_ids_table_name ), ( label_id, ) ) )
def _GetHashIdsFromNumNotes( self, min_num_notes: typing.Optional[ int ], max_num_notes: typing.Optional[ int ], hash_ids_table_name: str ):
has_notes = max_num_notes is None and min_num_notes == 1
not_has_notes = ( min_num_notes is None or min_num_notes == 0 ) and max_num_notes is not None and max_num_notes == 0
if has_notes or not_has_notes:
has_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} WHERE EXISTS ( SELECT 1 FROM file_notes WHERE file_notes.hash_id = {}.hash_id );'.format( hash_ids_table_name, hash_ids_table_name ) ) )
if has_notes:
hash_ids = has_hash_ids
else:
all_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( hash_ids_table_name ) ) )
hash_ids = all_hash_ids.difference( has_hash_ids )
else:
if min_num_notes is None:
filt = lambda c: c <= max_num_notes
elif max_num_notes is None:
filt = lambda c: min_num_notes <= c
else:
filt = lambda c: min_num_notes <= c <= max_num_notes
# temp hashes to notes
query = 'SELECT hash_id, COUNT( * ) FROM {} CROSS JOIN file_notes USING ( hash_id ) GROUP BY hash_id;'.format( hash_ids_table_name )
hash_ids = { hash_id for ( hash_id, count ) in self._Execute( query ) if filt( count ) }
return hash_ids
def _GetHashIdsFromQuery( self, file_search_context: ClientSearch.FileSearchContext, job_key = None, query_hash_ids: typing.Optional[ set ] = None, apply_implicit_limit = True, sort_by = None, limit_sort_by = None ):
if job_key is None:
job_key = ClientThreading.JobKey( cancellable = True )
if query_hash_ids is not None:
query_hash_ids = set( query_hash_ids )
have_cross_referenced_file_locations = False
self._controller.ResetIdleTimer()
system_predicates = file_search_context.GetSystemPredicates()
location_context = file_search_context.GetLocationContext()
tag_search_context = file_search_context.GetTagSearchContext()
tag_service_key = tag_search_context.service_key
include_current_tags = tag_search_context.include_current_tags
include_pending_tags = tag_search_context.include_pending_tags
if not location_context.SearchesAnything():
return set()
current_file_service_ids = set()
for current_service_key in location_context.current_service_keys:
try:
current_file_service_id = self.modules_services.GetServiceId( current_service_key )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A file search query was run for a file service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
return set()
current_file_service_ids.add( current_file_service_id )
deleted_file_service_ids = set()
for deleted_service_key in location_context.deleted_service_keys:
try:
deleted_file_service_id = self.modules_services.GetServiceId( deleted_service_key )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A file search query was run for a file service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
return set()
deleted_file_service_ids.add( deleted_file_service_id )
db_location_context = self.modules_files_storage.GetDBLocationContext( location_context )
try:
tag_service_id = self.modules_services.GetServiceId( tag_service_key )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A file search query was run for a tag service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
return set()
tags_to_include = file_search_context.GetTagsToInclude()
tags_to_exclude = file_search_context.GetTagsToExclude()
namespaces_to_include = file_search_context.GetNamespacesToInclude()
namespaces_to_exclude = file_search_context.GetNamespacesToExclude()
wildcards_to_include = file_search_context.GetWildcardsToInclude()
wildcards_to_exclude = file_search_context.GetWildcardsToExclude()
simple_preds = system_predicates.GetSimpleInfo()
king_filter = system_predicates.GetKingFilter()
or_predicates = file_search_context.GetORPredicates()
need_file_domain_cross_reference = not location_context.IsAllKnownFiles()
there_are_tags_to_search = len( tags_to_include ) > 0 or len( namespaces_to_include ) > 0 or len( wildcards_to_include ) > 0
# ok, let's set up the big list of simple search preds
files_info_predicates = []
if 'min_size' in simple_preds:
files_info_predicates.append( 'size > ' + str( simple_preds[ 'min_size' ] ) )
if 'size' in simple_preds:
files_info_predicates.append( 'size = ' + str( simple_preds[ 'size' ] ) )
if 'not_size' in simple_preds:
files_info_predicates.append( 'size != ' + str( simple_preds[ 'not_size' ] ) )
if 'max_size' in simple_preds:
files_info_predicates.append( 'size < ' + str( simple_preds[ 'max_size' ] ) )
if 'mimes' in simple_preds:
mimes = simple_preds[ 'mimes' ]
if len( mimes ) == 1:
( mime, ) = mimes
files_info_predicates.append( 'mime = ' + str( mime ) )
else:
files_info_predicates.append( 'mime IN ' + HydrusData.SplayListForDB( mimes ) )
if 'has_audio' in simple_preds:
has_audio = simple_preds[ 'has_audio' ]
files_info_predicates.append( 'has_audio = {}'.format( int( has_audio ) ) )
if 'min_width' in simple_preds:
files_info_predicates.append( 'width > ' + str( simple_preds[ 'min_width' ] ) )
if 'width' in simple_preds:
files_info_predicates.append( 'width = ' + str( simple_preds[ 'width' ] ) )
if 'not_width' in simple_preds:
files_info_predicates.append( 'width != ' + str( simple_preds[ 'not_width' ] ) )
if 'max_width' in simple_preds:
files_info_predicates.append( 'width < ' + str( simple_preds[ 'max_width' ] ) )
if 'min_height' in simple_preds:
files_info_predicates.append( 'height > ' + str( simple_preds[ 'min_height' ] ) )
if 'height' in simple_preds:
files_info_predicates.append( 'height = ' + str( simple_preds[ 'height' ] ) )
if 'not_height' in simple_preds:
files_info_predicates.append( 'height != ' + str( simple_preds[ 'not_height' ] ) )
if 'max_height' in simple_preds:
files_info_predicates.append( 'height < ' + str( simple_preds[ 'max_height' ] ) )
if 'min_num_pixels' in simple_preds:
files_info_predicates.append( 'width * height > ' + str( simple_preds[ 'min_num_pixels' ] ) )
if 'num_pixels' in simple_preds:
files_info_predicates.append( 'width * height = ' + str( simple_preds[ 'num_pixels' ] ) )
if 'not_num_pixels' in simple_preds:
files_info_predicates.append( 'width * height != ' + str( simple_preds[ 'not_num_pixels' ] ) )
if 'max_num_pixels' in simple_preds:
files_info_predicates.append( 'width * height < ' + str( simple_preds[ 'max_num_pixels' ] ) )
if 'min_ratio' in simple_preds:
( ratio_width, ratio_height ) = simple_preds[ 'min_ratio' ]
files_info_predicates.append( '( width * 1.0 ) / height > ' + str( float( ratio_width ) ) + ' / ' + str( ratio_height ) )
if 'ratio' in simple_preds:
( ratio_width, ratio_height ) = simple_preds[ 'ratio' ]
files_info_predicates.append( '( width * 1.0 ) / height = ' + str( float( ratio_width ) ) + ' / ' + str( ratio_height ) )
if 'not_ratio' in simple_preds:
( ratio_width, ratio_height ) = simple_preds[ 'not_ratio' ]
files_info_predicates.append( '( width * 1.0 ) / height != ' + str( float( ratio_width ) ) + ' / ' + str( ratio_height ) )
if 'max_ratio' in simple_preds:
( ratio_width, ratio_height ) = simple_preds[ 'max_ratio' ]
files_info_predicates.append( '( width * 1.0 ) / height < ' + str( float( ratio_width ) ) + ' / ' + str( ratio_height ) )
if 'min_num_words' in simple_preds: files_info_predicates.append( 'num_words > ' + str( simple_preds[ 'min_num_words' ] ) )
if 'num_words' in simple_preds:
num_words = simple_preds[ 'num_words' ]
if num_words == 0: files_info_predicates.append( '( num_words IS NULL OR num_words = 0 )' )
else: files_info_predicates.append( 'num_words = ' + str( num_words ) )
if 'not_num_words' in simple_preds:
num_words = simple_preds[ 'not_num_words' ]
files_info_predicates.append( '( num_words IS NULL OR num_words != {} )'.format( num_words ) )
if 'max_num_words' in simple_preds:
max_num_words = simple_preds[ 'max_num_words' ]
if max_num_words == 0: files_info_predicates.append( 'num_words < ' + str( max_num_words ) )
else: files_info_predicates.append( '( num_words < ' + str( max_num_words ) + ' OR num_words IS NULL )' )
if 'min_duration' in simple_preds: files_info_predicates.append( 'duration > ' + str( simple_preds[ 'min_duration' ] ) )
if 'duration' in simple_preds:
duration = simple_preds[ 'duration' ]
if duration == 0:
files_info_predicates.append( '( duration = 0 OR duration IS NULL )' )
else:
files_info_predicates.append( 'duration = ' + str( duration ) )
if 'not_duration' in simple_preds:
duration = simple_preds[ 'not_duration' ]
files_info_predicates.append( '( duration IS NULL OR duration != {} )'.format( duration ) )
if 'max_duration' in simple_preds:
max_duration = simple_preds[ 'max_duration' ]
if max_duration == 0: files_info_predicates.append( 'duration < ' + str( max_duration ) )
else: files_info_predicates.append( '( duration < ' + str( max_duration ) + ' OR duration IS NULL )' )
if 'min_framerate' in simple_preds or 'framerate' in simple_preds or 'max_framerate' in simple_preds or 'not_framerate' in simple_preds:
if 'not_framerate' in simple_preds:
pred = '( duration IS NULL OR num_frames = 0 OR ( duration IS NOT NULL AND duration != 0 AND num_frames != 0 AND num_frames IS NOT NULL AND {} ) )'
min_framerate_sql = simple_preds[ 'not_framerate' ] * 0.95
max_framerate_sql = simple_preds[ 'not_framerate' ] * 1.05
pred = pred.format( '( num_frames * 1.0 ) / ( duration / 1000.0 ) NOT BETWEEN {} AND {}'.format( min_framerate_sql, max_framerate_sql ) )
else:
min_framerate_sql = None
max_framerate_sql = None
pred = '( duration IS NOT NULL AND duration != 0 AND num_frames != 0 AND num_frames IS NOT NULL AND {} )'
if 'min_framerate' in simple_preds:
min_framerate_sql = simple_preds[ 'min_framerate' ] * 1.05
if 'framerate' in simple_preds:
min_framerate_sql = simple_preds[ 'framerate' ] * 0.95
max_framerate_sql = simple_preds[ 'framerate' ] * 1.05
if 'max_framerate' in simple_preds:
max_framerate_sql = simple_preds[ 'max_framerate' ] * 0.95
if min_framerate_sql is None:
pred = pred.format( '( num_frames * 1.0 ) / ( duration / 1000.0 ) < {}'.format( max_framerate_sql ) )
elif max_framerate_sql is None:
pred = pred.format( '( num_frames * 1.0 ) / ( duration / 1000.0 ) > {}'.format( min_framerate_sql ) )
else:
pred = pred.format( '( num_frames * 1.0 ) / ( duration / 1000.0 ) BETWEEN {} AND {}'.format( min_framerate_sql, max_framerate_sql ) )
files_info_predicates.append( pred )
if 'min_num_frames' in simple_preds: files_info_predicates.append( 'num_frames > ' + str( simple_preds[ 'min_num_frames' ] ) )
if 'num_frames' in simple_preds:
num_frames = simple_preds[ 'num_frames' ]
if num_frames == 0: files_info_predicates.append( '( num_frames IS NULL OR num_frames = 0 )' )
else: files_info_predicates.append( 'num_frames = ' + str( num_frames ) )
if 'not_num_frames' in simple_preds:
num_frames = simple_preds[ 'not_num_frames' ]
files_info_predicates.append( '( num_frames IS NULL OR num_frames != {} )'.format( num_frames ) )
if 'max_num_frames' in simple_preds:
max_num_frames = simple_preds[ 'max_num_frames' ]
if max_num_frames == 0: files_info_predicates.append( 'num_frames < ' + str( max_num_frames ) )
else: files_info_predicates.append( '( num_frames < ' + str( max_num_frames ) + ' OR num_frames IS NULL )' )
there_are_simple_files_info_preds_to_search_for = len( files_info_predicates ) > 0
# start with some quick ways to populate query_hash_ids
def intersection_update_qhi( query_hash_ids, some_hash_ids, force_create_new_set = False ) -> set:
if query_hash_ids is None:
if not isinstance( some_hash_ids, set ) or force_create_new_set:
some_hash_ids = set( some_hash_ids )
return some_hash_ids
else:
query_hash_ids.intersection_update( some_hash_ids )
return query_hash_ids
#
def do_or_preds( or_predicates, query_hash_ids ) -> set:
# better typically to sort by fewest num of preds first, establishing query_hash_ids for longer chains
def or_sort_key( p ):
return len( p.GetValue() )
or_predicates = sorted( or_predicates, key = or_sort_key )
for or_predicate in or_predicates:
# blue eyes OR green eyes
or_query_hash_ids = set()
for or_subpredicate in or_predicate.GetValue():
# blue eyes
or_search_context = file_search_context.Duplicate()
or_search_context.SetPredicates( [ or_subpredicate ] )
# I pass current query_hash_ids here to make these inefficient sub-searches (like -tag) potentially much faster
or_query_hash_ids.update( self._GetHashIdsFromQuery( or_search_context, job_key, query_hash_ids = query_hash_ids, apply_implicit_limit = False, sort_by = None, limit_sort_by = None ) )
if job_key.IsCancelled():
return set()
query_hash_ids = intersection_update_qhi( query_hash_ids, or_query_hash_ids )
return query_hash_ids
#
done_or_predicates = len( or_predicates ) == 0
# OR round one--if nothing else will be fast, let's prep query_hash_ids now
if not done_or_predicates and not ( there_are_tags_to_search or there_are_simple_files_info_preds_to_search_for ):
query_hash_ids = do_or_preds( or_predicates, query_hash_ids )
have_cross_referenced_file_locations = True
done_or_predicates = True
#
if 'hash' in simple_preds:
( search_hashes, search_hash_type, inclusive ) = simple_preds[ 'hash' ]
if inclusive:
if search_hash_type == 'sha256':
matching_sha256_hashes = [ search_hash for search_hash in search_hashes if self.modules_hashes.HasHash( search_hash ) ]
else:
matching_sha256_hashes = self.modules_hashes.GetFileHashes( search_hashes, search_hash_type, 'sha256' )
specific_hash_ids = self.modules_hashes_local_cache.GetHashIds( matching_sha256_hashes )
query_hash_ids = intersection_update_qhi( query_hash_ids, specific_hash_ids )
#
if need_file_domain_cross_reference:
# in future we will hang an explicit service off this predicate and specify import/deleted time
# for now we'll wangle a compromise and just check all, and if domain is deleted, then search deletion time
import_timestamp_predicates = []
if 'min_import_timestamp' in simple_preds: import_timestamp_predicates.append( 'timestamp >= ' + str( simple_preds[ 'min_import_timestamp' ] ) )
if 'max_import_timestamp' in simple_preds: import_timestamp_predicates.append( 'timestamp <= ' + str( simple_preds[ 'max_import_timestamp' ] ) )
if len( import_timestamp_predicates ) > 0:
pred_string = ' AND '.join( import_timestamp_predicates )
table_names = []
table_names.extend( ( ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.GetServiceId( service_key ), HC.CONTENT_STATUS_CURRENT ) for service_key in location_context.current_service_keys ) )
table_names.extend( ( ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.GetServiceId( service_key ), HC.CONTENT_STATUS_DELETED ) for service_key in location_context.deleted_service_keys ) )
import_timestamp_hash_ids = set()
for table_name in table_names:
import_timestamp_hash_ids.update( self._STS( self._Execute( 'SELECT hash_id FROM {} WHERE {};'.format( table_name, pred_string ) ) ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, import_timestamp_hash_ids )
have_cross_referenced_file_locations = True
modified_timestamp_predicates = []
if 'min_modified_timestamp' in simple_preds: modified_timestamp_predicates.append( 'MIN( file_modified_timestamp ) >= ' + str( simple_preds[ 'min_modified_timestamp' ] ) )
if 'max_modified_timestamp' in simple_preds: modified_timestamp_predicates.append( 'MIN( file_modified_timestamp ) <= ' + str( simple_preds[ 'max_modified_timestamp' ] ) )
if len( modified_timestamp_predicates ) > 0:
pred_string = ' AND '.join( modified_timestamp_predicates )
q1 = 'SELECT hash_id, file_modified_timestamp FROM file_modified_timestamps'
q2 = 'SELECT hash_id, file_modified_timestamp FROM file_domain_modified_timestamps'
query = 'SELECT hash_id FROM ( {} UNION {} ) GROUP BY hash_id HAVING {};'.format( q1, q2, pred_string )
modified_timestamp_hash_ids = self._STS( self._Execute( query ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, modified_timestamp_hash_ids )
last_viewed_timestamp_predicates = []
if 'min_last_viewed_timestamp' in simple_preds: last_viewed_timestamp_predicates.append( 'last_viewed_timestamp >= ' + str( simple_preds[ 'min_last_viewed_timestamp' ] ) )
if 'max_last_viewed_timestamp' in simple_preds: last_viewed_timestamp_predicates.append( 'last_viewed_timestamp <= ' + str( simple_preds[ 'max_last_viewed_timestamp' ] ) )
if len( last_viewed_timestamp_predicates ) > 0:
pred_string = ' AND '.join( last_viewed_timestamp_predicates )
last_viewed_timestamp_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM file_viewing_stats WHERE canvas_type = ? AND {};'.format( pred_string ), ( CC.CANVAS_MEDIA_VIEWER, ) ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, last_viewed_timestamp_hash_ids )
#
if system_predicates.HasSimilarTo():
( similar_to_hashes, max_hamming ) = system_predicates.GetSimilarTo()
all_similar_hash_ids = set()
for similar_to_hash in similar_to_hashes:
hash_id = self.modules_hashes_local_cache.GetHashId( similar_to_hash )
similar_hash_ids_and_distances = self.modules_similar_files.Search( hash_id, max_hamming )
similar_hash_ids = [ similar_hash_id for ( similar_hash_id, distance ) in similar_hash_ids_and_distances ]
all_similar_hash_ids.update( similar_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, all_similar_hash_ids )
for ( operator, value, rating_service_key ) in system_predicates.GetRatingsPredicates():
service_id = self.modules_services.GetServiceId( rating_service_key )
if value == 'not rated':
continue
if value == 'rated':
rating_hash_ids = self._STI( self._Execute( 'SELECT hash_id FROM local_ratings WHERE service_id = ?;', ( service_id, ) ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, rating_hash_ids )
else:
service = HG.client_controller.services_manager.GetService( rating_service_key )
if service.GetServiceType() == HC.LOCAL_RATING_LIKE:
half_a_star_value = 0.5
else:
one_star_value = service.GetOneStarValue()
half_a_star_value = one_star_value / 2
if isinstance( value, str ):
value = float( value )
# floats are a pain! as is storing rating as 0.0-1.0 and then allowing number of stars to change!
if operator == CC.UNICODE_ALMOST_EQUAL_TO:
predicate = str( ( value - half_a_star_value ) * 0.8 ) + ' < rating AND rating < ' + str( ( value + half_a_star_value ) * 1.2 )
elif operator == '<':
predicate = 'rating <= ' + str( value - half_a_star_value )
elif operator == '>':
predicate = 'rating > ' + str( value + half_a_star_value )
elif operator == '=':
predicate = str( value - half_a_star_value ) + ' < rating AND rating <= ' + str( value + half_a_star_value )
rating_hash_ids = self._STI( self._Execute( 'SELECT hash_id FROM local_ratings WHERE service_id = ? AND ' + predicate + ';', ( service_id, ) ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, rating_hash_ids )
is_inbox = system_predicates.MustBeInbox()
if is_inbox:
query_hash_ids = intersection_update_qhi( query_hash_ids, self.modules_files_metadata_basic.inbox_hash_ids, force_create_new_set = True )
for ( operator, num_relationships, dupe_type ) in system_predicates.GetDuplicateRelationshipCountPredicates():
only_do_zero = ( operator in ( '=', CC.UNICODE_ALMOST_EQUAL_TO ) and num_relationships == 0 ) or ( operator == '<' and num_relationships == 1 )
include_zero = operator == '<'
if only_do_zero:
continue
elif include_zero:
continue
else:
dupe_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, operator, num_relationships, dupe_type )
query_hash_ids = intersection_update_qhi( query_hash_ids, dupe_hash_ids )
have_cross_referenced_file_locations = True
for ( view_type, viewing_locations, operator, viewing_value ) in system_predicates.GetFileViewingStatsPredicates():
only_do_zero = ( operator in ( '=', CC.UNICODE_ALMOST_EQUAL_TO ) and viewing_value == 0 ) or ( operator == '<' and viewing_value == 1 )
include_zero = operator == '<'
if only_do_zero:
continue
elif include_zero:
continue
else:
viewing_hash_ids = self._GetHashIdsFromFileViewingStatistics( view_type, viewing_locations, operator, viewing_value )
query_hash_ids = intersection_update_qhi( query_hash_ids, viewing_hash_ids )
# first tags
if there_are_tags_to_search:
def sort_longest_tag_first_key( s ):
return ( 1 if HydrusTags.IsUnnamespaced( s ) else 0, -len( s ) )
tags_to_include = list( tags_to_include )
tags_to_include.sort( key = sort_longest_tag_first_key )
for tag in tags_to_include:
if query_hash_ids is None:
tag_query_hash_ids = self._GetHashIdsFromTag( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, tag, job_key = job_key )
elif is_inbox and len( query_hash_ids ) == len( self.modules_files_metadata_basic.inbox_hash_ids ):
tag_query_hash_ids = self._GetHashIdsFromTag( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, tag, hash_ids = self.modules_files_metadata_basic.inbox_hash_ids, hash_ids_table_name = 'file_inbox', job_key = job_key )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
tag_query_hash_ids = self._GetHashIdsFromTag( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, tag, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, tag_query_hash_ids )
have_cross_referenced_file_locations = True
if len( query_hash_ids ) == 0:
return query_hash_ids
namespaces_to_include = list( namespaces_to_include )
namespaces_to_include.sort( key = lambda n: -len( n ) )
for namespace in namespaces_to_include:
if query_hash_ids is None or ( is_inbox and len( query_hash_ids ) == len( self.modules_files_metadata_basic.inbox_hash_ids ) ):
namespace_query_hash_ids = self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace_wildcard = namespace, job_key = job_key )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
namespace_query_hash_ids = self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace_wildcard = namespace, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, namespace_query_hash_ids )
have_cross_referenced_file_locations = True
if len( query_hash_ids ) == 0:
return query_hash_ids
wildcards_to_include = list( wildcards_to_include )
wildcards_to_include.sort( key = lambda w: -len( w ) )
for wildcard in wildcards_to_include:
if query_hash_ids is None:
wildcard_query_hash_ids = self._GetHashIdsFromWildcardComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, wildcard, job_key = job_key )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
wildcard_query_hash_ids = self._GetHashIdsFromWildcardComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, wildcard, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, wildcard_query_hash_ids )
have_cross_referenced_file_locations = True
if len( query_hash_ids ) == 0:
return query_hash_ids
#
# OR round two--if file preds will not be fast, let's step in to reduce the file domain search space
if not done_or_predicates and not there_are_simple_files_info_preds_to_search_for:
query_hash_ids = do_or_preds( or_predicates, query_hash_ids )
have_cross_referenced_file_locations = True
done_or_predicates = True
# now the simple preds and desperate last shot to populate query_hash_ids
done_files_info_predicates = False
we_need_some_results = query_hash_ids is None
we_need_to_cross_reference = need_file_domain_cross_reference and not have_cross_referenced_file_locations
if we_need_some_results or we_need_to_cross_reference:
if location_context.IsAllKnownFiles():
query_hash_ids = intersection_update_qhi( query_hash_ids, self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, job_key = job_key ) )
else:
files_table_name = db_location_context.files_table_name
if len( files_info_predicates ) == 0:
files_info_predicates.insert( 0, '1=1' )
else:
# if a file is missing a files_info row, we can't search it with a file system pred. it is just unknown
files_table_name = '{} NATURAL JOIN files_info'.format( files_table_name )
if query_hash_ids is None:
query_hash_ids = intersection_update_qhi( query_hash_ids, self._STS( self._Execute( 'SELECT hash_id FROM {} WHERE {};'.format( files_table_name, ' AND '.join( files_info_predicates ) ) ) ) )
else:
if is_inbox and len( query_hash_ids ) == len( self.modules_files_metadata_basic.inbox_hash_ids ):
query_hash_ids = intersection_update_qhi( query_hash_ids, self._STS( self._Execute( 'SELECT hash_id FROM {} NATURAL JOIN {} WHERE {};'.format( 'file_inbox', files_table_name, ' AND '.join( files_info_predicates ) ) ) ) )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
query_hash_ids = intersection_update_qhi( query_hash_ids, self._STS( self._Execute( 'SELECT hash_id FROM {} NATURAL JOIN {} WHERE {};'.format( temp_table_name, files_table_name, ' AND '.join( files_info_predicates ) ) ) ) )
have_cross_referenced_file_locations = True
done_files_info_predicates = True
# at this point, query_hash_ids has something in it
if 'hash' in simple_preds:
( search_hashes, search_hash_type, inclusive ) = simple_preds[ 'hash' ]
if not inclusive:
if search_hash_type == 'sha256':
matching_sha256_hashes = [ search_hash for search_hash in search_hashes if self.modules_hashes.HasHash( search_hash ) ]
else:
matching_sha256_hashes = self.modules_hashes.GetFileHashes( search_hashes, search_hash_type, 'sha256' )
specific_hash_ids = self.modules_hashes_local_cache.GetHashIds( matching_sha256_hashes )
query_hash_ids.difference_update( specific_hash_ids )
if 'has_icc_profile' in simple_preds:
has_icc_profile = simple_preds[ 'has_icc_profile' ]
has_icc_profile_has_ids = self.modules_files_metadata_basic.GetHasICCProfileHashIds( query_hash_ids )
if has_icc_profile:
query_hash_ids.intersection_update( has_icc_profile_has_ids )
else:
query_hash_ids.difference_update( has_icc_profile_has_ids )
if system_predicates.MustBeArchive():
query_hash_ids.difference_update( self.modules_files_metadata_basic.inbox_hash_ids )
if king_filter is not None and king_filter:
king_hash_ids = self.modules_files_duplicates.DuplicatesFilterKingHashIds( query_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, king_hash_ids )
if there_are_simple_files_info_preds_to_search_for and not done_files_info_predicates:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
predicate_string = ' AND '.join( files_info_predicates )
select = 'SELECT hash_id FROM {} NATURAL JOIN files_info WHERE {};'.format( temp_table_name, predicate_string )
files_info_hash_ids = self._STI( self._Execute( select ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, files_info_hash_ids )
done_files_info_predicates = True
if job_key.IsCancelled():
return set()
#
# OR round three--final chance to kick in, and the preferred one. query_hash_ids is now set, so this shouldn't be super slow for most scenarios
if not done_or_predicates:
query_hash_ids = do_or_preds( or_predicates, query_hash_ids )
done_or_predicates = True
# hide update files
if location_context.IsAllLocalFiles():
repo_update_hash_ids = set( self.modules_files_storage.GetCurrentHashIdsList( self.modules_services.local_update_service_id ) )
query_hash_ids.difference_update( repo_update_hash_ids )
# now subtract bad results
if len( tags_to_exclude ) + len( namespaces_to_exclude ) + len( wildcards_to_exclude ) > 0:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
for tag in tags_to_exclude:
unwanted_hash_ids = self._GetHashIdsFromTag( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, tag, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids.difference_update( unwanted_hash_ids )
if len( query_hash_ids ) == 0:
return query_hash_ids
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( temp_table_name ), ( ( hash_id, ) for hash_id in unwanted_hash_ids ) )
for namespace in namespaces_to_exclude:
unwanted_hash_ids = self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace_wildcard = namespace, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids.difference_update( unwanted_hash_ids )
if len( query_hash_ids ) == 0:
return query_hash_ids
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( temp_table_name ), ( ( hash_id, ) for hash_id in unwanted_hash_ids ) )
for wildcard in wildcards_to_exclude:
unwanted_hash_ids = self._GetHashIdsFromWildcardComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, wildcard, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids.difference_update( unwanted_hash_ids )
if len( query_hash_ids ) == 0:
return query_hash_ids
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( temp_table_name ), ( ( hash_id, ) for hash_id in unwanted_hash_ids ) )
if job_key.IsCancelled():
return set()
#
( required_file_service_statuses, excluded_file_service_statuses ) = system_predicates.GetFileServiceStatuses()
for ( service_key, statuses ) in required_file_service_statuses.items():
service_id = self.modules_services.GetServiceId( service_key )
for status in statuses:
required_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, query_hash_ids, status )
query_hash_ids = intersection_update_qhi( query_hash_ids, required_hash_ids )
for ( service_key, statuses ) in excluded_file_service_statuses.items():
service_id = self.modules_services.GetServiceId( service_key )
for status in statuses:
excluded_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, query_hash_ids, status )
query_hash_ids.difference_update( excluded_hash_ids )
#
for ( operator, value, service_key ) in system_predicates.GetRatingsPredicates():
service_id = self.modules_services.GetServiceId( service_key )
if value == 'not rated':
query_hash_ids.difference_update( self._STI( self._Execute( 'SELECT hash_id FROM local_ratings WHERE service_id = ?;', ( service_id, ) ) ) )
if king_filter is not None and not king_filter:
king_hash_ids = self.modules_files_duplicates.DuplicatesFilterKingHashIds( query_hash_ids )
query_hash_ids.difference_update( king_hash_ids )
for ( operator, num_relationships, dupe_type ) in system_predicates.GetDuplicateRelationshipCountPredicates():
only_do_zero = ( operator in ( '=', CC.UNICODE_ALMOST_EQUAL_TO ) and num_relationships == 0 ) or ( operator == '<' and num_relationships == 1 )
include_zero = operator == '<'
if only_do_zero:
nonzero_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, '>', 0, dupe_type )
query_hash_ids.difference_update( nonzero_hash_ids )
elif include_zero:
nonzero_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, '>', 0, dupe_type )
zero_hash_ids = query_hash_ids.difference( nonzero_hash_ids )
accurate_except_zero_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, operator, num_relationships, dupe_type )
hash_ids = zero_hash_ids.union( accurate_except_zero_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, hash_ids )
min_num_notes = None
max_num_notes = None
if 'num_notes' in simple_preds:
min_num_notes = simple_preds[ 'num_notes' ]
max_num_notes = min_num_notes
else:
if 'min_num_notes' in simple_preds:
min_num_notes = simple_preds[ 'min_num_notes' ] + 1
if 'max_num_notes' in simple_preds:
max_num_notes = simple_preds[ 'max_num_notes' ] - 1
if min_num_notes is not None or max_num_notes is not None:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
num_notes_hash_ids = self._GetHashIdsFromNumNotes( min_num_notes, max_num_notes, temp_table_name )
query_hash_ids = intersection_update_qhi( query_hash_ids, num_notes_hash_ids )
if 'has_note_names' in simple_preds:
inclusive_note_names = simple_preds[ 'has_note_names' ]
for note_name in inclusive_note_names:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
notes_hash_ids = self._GetHashIdsFromNoteName( note_name, temp_table_name )
query_hash_ids = intersection_update_qhi( query_hash_ids, notes_hash_ids )
if 'not_has_note_names' in simple_preds:
exclusive_note_names = simple_preds[ 'not_has_note_names' ]
for note_name in exclusive_note_names:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
notes_hash_ids = self._GetHashIdsFromNoteName( note_name, temp_table_name )
query_hash_ids.difference_update( notes_hash_ids )
for ( view_type, viewing_locations, operator, viewing_value ) in system_predicates.GetFileViewingStatsPredicates():
only_do_zero = ( operator in ( '=', CC.UNICODE_ALMOST_EQUAL_TO ) and viewing_value == 0 ) or ( operator == '<' and viewing_value == 1 )
include_zero = operator == '<'
if only_do_zero:
nonzero_hash_ids = self._GetHashIdsFromFileViewingStatistics( view_type, viewing_locations, '>', 0 )
query_hash_ids.difference_update( nonzero_hash_ids )
elif include_zero:
nonzero_hash_ids = self._GetHashIdsFromFileViewingStatistics( view_type, viewing_locations, '>', 0 )
zero_hash_ids = query_hash_ids.difference( nonzero_hash_ids )
accurate_except_zero_hash_ids = self._GetHashIdsFromFileViewingStatistics( view_type, viewing_locations, operator, viewing_value )
hash_ids = zero_hash_ids.union( accurate_except_zero_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, hash_ids )
if job_key.IsCancelled():
return set()
#
file_location_is_all_local = self.modules_services.LocationContextIsCoveredByCombinedLocalFiles( location_context )
file_location_is_all_combined_local_files_deleted = location_context.IsOneDomain() and CC.COMBINED_LOCAL_FILE_SERVICE_KEY in location_context.deleted_service_keys
must_be_local = system_predicates.MustBeLocal() or system_predicates.MustBeArchive()
must_not_be_local = system_predicates.MustNotBeLocal()
if file_location_is_all_local:
# if must be all local, we are great already
if must_not_be_local:
query_hash_ids = set()
elif file_location_is_all_combined_local_files_deleted:
if must_be_local:
query_hash_ids = set()
elif must_be_local or must_not_be_local:
if must_be_local:
query_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( self.modules_services.combined_local_file_service_id, query_hash_ids, HC.CONTENT_STATUS_CURRENT )
elif must_not_be_local:
local_hash_ids = self.modules_files_storage.GetCurrentHashIdsList( self.modules_services.combined_local_file_service_id )
query_hash_ids.difference_update( local_hash_ids )
#
if 'known_url_rules' in simple_preds:
for ( operator, rule_type, rule ) in simple_preds[ 'known_url_rules' ]:
if rule_type == 'exact_match' or ( is_inbox and len( query_hash_ids ) == len( self.modules_files_metadata_basic.inbox_hash_ids ) ):
url_hash_ids = self._GetHashIdsFromURLRule( rule_type, rule )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
url_hash_ids = self._GetHashIdsFromURLRule( rule_type, rule, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name )
if operator: # inclusive
query_hash_ids = intersection_update_qhi( query_hash_ids, url_hash_ids )
else:
query_hash_ids.difference_update( url_hash_ids )
#
namespaces_to_tests = system_predicates.GetNumTagsNumberTests()
for ( namespace, number_tests ) in namespaces_to_tests.items():
is_zero = True in ( number_test.IsZero() for number_test in number_tests )
is_anything_but_zero = True in ( number_test.IsAnythingButZero() for number_test in number_tests )
specific_number_tests = [ number_test for number_test in number_tests if not ( number_test.IsZero() or number_test.IsAnythingButZero() ) ]
lambdas = [ number_test.GetLambda() for number_test in specific_number_tests ]
megalambda = lambda x: False not in ( l( x ) for l in lambdas )
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
nonzero_tag_query_hash_ids = set()
nonzero_tag_query_hash_ids_populated = False
if is_zero or is_anything_but_zero:
nonzero_tag_query_hash_ids = self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, hash_ids_table_name = temp_table_name, namespace_wildcard = namespace, job_key = job_key )
nonzero_tag_query_hash_ids_populated = True
if is_zero:
query_hash_ids.difference_update( nonzero_tag_query_hash_ids )
if is_anything_but_zero:
query_hash_ids = intersection_update_qhi( query_hash_ids, nonzero_tag_query_hash_ids )
if len( specific_number_tests ) > 0:
hash_id_tag_counts = self._GetHashIdsAndNonZeroTagCounts( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, query_hash_ids, namespace_wildcard = namespace, job_key = job_key )
good_tag_count_hash_ids = { hash_id for ( hash_id, count ) in hash_id_tag_counts if megalambda( count ) }
if megalambda( 0 ): # files with zero count are needed
if not nonzero_tag_query_hash_ids_populated:
nonzero_tag_query_hash_ids = { hash_id for ( hash_id, count ) in hash_id_tag_counts }
zero_hash_ids = query_hash_ids.difference( nonzero_tag_query_hash_ids )
good_tag_count_hash_ids.update( zero_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, good_tag_count_hash_ids )
if job_key.IsCancelled():
return set()
#
if 'min_tag_as_number' in simple_preds:
( namespace, num ) = simple_preds[ 'min_tag_as_number' ]
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
good_hash_ids = self._GetHashIdsThatHaveTagAsNumComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace, num, '>', hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, good_hash_ids )
if 'max_tag_as_number' in simple_preds:
( namespace, num ) = simple_preds[ 'max_tag_as_number' ]
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
good_hash_ids = self._GetHashIdsThatHaveTagAsNumComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace, num, '<', hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, good_hash_ids )
if job_key.IsCancelled():
return set()
#
query_hash_ids = list( query_hash_ids )
#
limit = system_predicates.GetLimit( apply_implicit_limit = apply_implicit_limit )
we_are_applying_limit = limit is not None and limit < len( query_hash_ids )
if we_are_applying_limit and limit_sort_by is not None and sort_by is None:
sort_by = limit_sort_by
did_sort = False
if sort_by is not None and not location_context.IsAllKnownFiles():
( did_sort, query_hash_ids ) = self._TryToSortHashIds( location_context, query_hash_ids, sort_by )
#
if we_are_applying_limit:
if not did_sort:
query_hash_ids = random.sample( query_hash_ids, limit )
else:
query_hash_ids = query_hash_ids[:limit]
return query_hash_ids
def _GetHashIdsFromSubtagIds( self, tag_display_type: int, file_service_key, tag_search_context: ClientSearch.TagSearchContext, subtag_ids, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
tag_ids = self._GetTagIdsFromSubtagIds( file_service_id, tag_service_id, subtag_ids, job_key = job_key )
return self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsFromSubtagIdsTable( self, tag_display_type: int, file_service_key, tag_search_context: ClientSearch.TagSearchContext, subtag_ids_table_name, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
tag_ids = self._GetTagIdsFromSubtagIdsTable( file_service_id, tag_service_id, subtag_ids_table_name, job_key = job_key )
return self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsFromTag( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, tag, hash_ids = None, hash_ids_table_name = None, allow_unnamespaced_to_fetch_namespaced = True, job_key = None ):
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
if not file_location_is_cross_referenced and hash_ids_table_name is not None:
file_location_is_cross_referenced = True
( namespace, subtag ) = HydrusTags.SplitTag( tag )
subtag_id = self.modules_tags.GetSubtagId( subtag )
if not self.modules_tags.SubtagExists( subtag ):
return set()
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
results = set()
for file_service_key in file_service_keys:
if namespace == '' and allow_unnamespaced_to_fetch_namespaced:
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_ids = self._GetTagIdsFromSubtagIds( file_service_id, tag_service_id, ( subtag_id, ) )
else:
if not self.modules_tags.TagExists( tag ):
return set()
tag_id = self.modules_tags.GetTagId( tag )
tag_ids = ( tag_id, )
some_results = self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
if len( results ) == 0:
results = some_results
else:
results.update( some_results )
if not file_location_is_cross_referenced:
results = self.modules_files_storage.FilterHashIds( location_context, results )
return results
def _GetHashIdsFromTagIds( self, tag_display_type: int, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext, tag_ids: typing.Collection[ int ], hash_ids = None, hash_ids_table_name = None, job_key = None ):
do_hash_table_join = False
if hash_ids_table_name is not None and hash_ids is not None:
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
file_service_id = self.modules_services.GetServiceId( file_service_key )
estimated_count = self._GetAutocompleteCountEstimate( tag_display_type, tag_service_id, file_service_id, tag_ids, tag_search_context.include_current_tags, tag_search_context.include_pending_tags )
# experimentally, file lookups are about 2.5x as slow as tag lookups
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( len( hash_ids ), estimated_count ):
do_hash_table_join = True
result_hash_ids = set()
table_names = self._GetMappingTables( tag_display_type, file_service_key, tag_search_context )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
if len( tag_ids ) == 1:
( tag_id, ) = tag_ids
if do_hash_table_join:
# temp hashes to mappings
queries = [ 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?'.format( hash_ids_table_name, table_name ) for table_name in table_names ]
else:
queries = [ 'SELECT hash_id FROM {} WHERE tag_id = ?;'.format( table_name ) for table_name in table_names ]
for query in queries:
cursor = self._Execute( query, ( tag_id, ) )
result_hash_ids.update( self._STI( HydrusDB.ReadFromCancellableCursor( cursor, 1024, cancelled_hook ) ) )
else:
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
if do_hash_table_join:
# temp hashes to mappings to temp tags
# old method, does not do EXISTS efficiently, it makes a list instead and checks that
# queries = [ 'SELECT hash_id FROM {} WHERE EXISTS ( SELECT 1 FROM {} CROSS JOIN {} USING ( tag_id ) WHERE {}.hash_id = {}.hash_id );'.format( hash_ids_table_name, table_name, temp_tag_ids_table_name, table_name, hash_ids_table_name ) for table_name in table_names ]
# new method, this seems to actually do the correlated scalar subquery, although it does seem to be sqlite voodoo
queries = [ 'SELECT hash_id FROM {} WHERE EXISTS ( SELECT 1 FROM {} WHERE {}.hash_id = {}.hash_id AND EXISTS ( SELECT 1 FROM {} WHERE {}.tag_id = {}.tag_id ) );'.format( hash_ids_table_name, table_name, table_name, hash_ids_table_name, temp_tag_ids_table_name, table_name, temp_tag_ids_table_name ) for table_name in table_names ]
else:
# temp tags to mappings
queries = [ 'SELECT hash_id FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_tag_ids_table_name, table_name ) for table_name in table_names ]
for query in queries:
cursor = self._Execute( query )
result_hash_ids.update( self._STI( HydrusDB.ReadFromCancellableCursor( cursor, 1024, cancelled_hook ) ) )
return result_hash_ids
def _GetHashIdsFromURLRule( self, rule_type, rule, hash_ids = None, hash_ids_table_name = None ):
if rule_type == 'exact_match':
url = rule
table_name = 'url_map NATURAL JOIN urls'
if hash_ids_table_name is not None and hash_ids is not None and len( hash_ids ) < 50000:
table_name += ' NATURAL JOIN {}'.format( hash_ids_table_name )
select = 'SELECT hash_id FROM {} WHERE url = ?;'.format( table_name )
result_hash_ids = self._STS( self._Execute( select, ( url, ) ) )
return result_hash_ids
elif rule_type in ( 'url_class', 'url_match' ):
url_class = rule
domain = url_class.GetDomain()
if url_class.MatchesSubdomains():
domain_ids = self.modules_urls.GetURLDomainAndSubdomainIds( domain )
else:
domain_ids = self.modules_urls.GetURLDomainAndSubdomainIds( domain, only_www_subdomains = True )
result_hash_ids = set()
with self._MakeTemporaryIntegerTable( domain_ids, 'domain_id' ) as temp_domain_table_name:
if hash_ids_table_name is not None and hash_ids is not None and len( hash_ids ) < 50000:
# if we aren't gonk mode with the number of files, temp hashes to url map to urls to domains
# next step here is irl profiling and a domain->url_count cache so I can decide whether to do this or not based on url domain count
select = 'SELECT hash_id, url FROM {} CROSS JOIN url_map USING ( hash_id ) CROSS JOIN urls USING ( url_id ) CROSS JOIN {} USING ( domain_id );'.format( hash_ids_table_name, temp_domain_table_name )
else:
# domains to urls to url map
select = 'SELECT hash_id, url FROM {} CROSS JOIN urls USING ( domain_id ) CROSS JOIN url_map USING ( url_id );'.format( temp_domain_table_name )
for ( hash_id, url ) in self._Execute( select ):
# this is actually insufficient, as more detailed url classes may match
if hash_id not in result_hash_ids and url_class.Matches( url ):
result_hash_ids.add( hash_id )
return result_hash_ids
elif rule_type in 'domain':
domain = rule
# if we search for site.com, we also want artist.site.com or www.site.com or cdn2.site.com
domain_ids = self.modules_urls.GetURLDomainAndSubdomainIds( domain )
result_hash_ids = set()
with self._MakeTemporaryIntegerTable( domain_ids, 'domain_id' ) as temp_domain_table_name:
if hash_ids_table_name is not None and hash_ids is not None and len( hash_ids ) < 50000:
# if we aren't gonk mode with the number of files, temp hashes to url map to urls to domains
# next step here is irl profiling and a domain->url_count cache so I can decide whether to do this or not based on url domain count
select = 'SELECT hash_id FROM {} CROSS JOIN url_map USING ( hash_id ) CROSS JOIN urls USING ( url_id ) CROSS JOIN {} USING ( domain_id )'.format( hash_ids_table_name, temp_domain_table_name )
else:
# domains to urls to url map
select = 'SELECT hash_id FROM {} CROSS JOIN urls USING ( domain_id ) CROSS JOIN url_map USING ( url_id );'.format( temp_domain_table_name )
result_hash_ids = self._STS( self._Execute( select ) )
return result_hash_ids
else:
regex = rule
if hash_ids_table_name is not None and hash_ids is not None and len( hash_ids ) < 50000:
# if we aren't gonk mode with the number of files, temp hashes to url map to urls
# next step here is irl profiling and a domain->url_count cache so I can decide whether to do this or not based on _TOTAL_ url count
select = 'SELECT hash_id, url FROM {} CROSS JOIN url_map USING ( hash_id ) CROSS JOIN urls USING ( url_id );'.format( hash_ids_table_name )
else:
select = 'SELECT hash_id, url FROM url_map NATURAL JOIN urls;'
result_hash_ids = set()
for ( hash_id, url ) in self._Execute( select ):
if hash_id not in result_hash_ids and re.search( regex, url ) is not None:
result_hash_ids.add( hash_id )
return result_hash_ids
def _GetHashIdsFromWildcardComplexLocation( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, wildcard, hash_ids = None, hash_ids_table_name = None, job_key = None ):
( namespace_wildcard, subtag_wildcard ) = HydrusTags.SplitTag( wildcard )
if namespace_wildcard in ( '*', '' ):
namespace_wildcard = None
if subtag_wildcard == '*':
return self._GetHashIdsThatHaveTagsComplexLocation( tag_display_type, location_context, tag_search_context, namespace_wildcard = namespace_wildcard, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
results = set()
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
if not file_location_is_cross_referenced and hash_ids_table_name is not None:
file_location_is_cross_referenced = True
if namespace_wildcard is None:
possible_namespace_ids = []
else:
possible_namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace_wildcard )
if len( possible_namespace_ids ) == 0:
return set()
with self._MakeTemporaryIntegerTable( possible_namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
if namespace_wildcard is None:
namespace_ids_table_name = None
else:
namespace_ids_table_name = temp_namespace_ids_table_name
for file_service_key in file_service_keys:
some_results = self._GetHashIdsFromWildcardSimpleLocation( tag_display_type, file_service_key, tag_search_context, subtag_wildcard, namespace_ids_table_name = namespace_ids_table_name, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
if len( results ) == 0:
results = some_results
else:
results.update( some_results )
if not file_location_is_cross_referenced:
results = self.modules_files_storage.FilterHashIds( location_context, results )
return results
def _GetHashIdsFromWildcardSimpleLocation( self, tag_display_type: int, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext, subtag_wildcard, namespace_ids_table_name = None, hash_ids = None, hash_ids_table_name = None, job_key = None ):
with self._MakeTemporaryIntegerTable( [], 'subtag_id' ) as temp_subtag_ids_table_name:
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
self.modules_tag_search.GetSubtagIdsFromWildcardIntoTable( file_service_id, tag_service_id, subtag_wildcard, temp_subtag_ids_table_name, job_key = job_key )
if namespace_ids_table_name is None:
return self._GetHashIdsFromSubtagIdsTable( tag_display_type, file_service_key, tag_search_context, temp_subtag_ids_table_name, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
else:
return self._GetHashIdsFromNamespaceIdsSubtagIdsTables( tag_display_type, file_service_key, tag_search_context, namespace_ids_table_name, temp_subtag_ids_table_name, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsThatHaveTagsComplexLocation( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, namespace_wildcard = None, hash_ids_table_name = None, job_key = None ):
if not location_context.SearchesAnything():
return set()
if namespace_wildcard == '*':
namespace_wildcard = None
if namespace_wildcard is None:
possible_namespace_ids = []
else:
possible_namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace_wildcard )
if len( possible_namespace_ids ) == 0:
return set()
results = set()
with self._MakeTemporaryIntegerTable( possible_namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
if namespace_wildcard is None:
namespace_ids_table_name = None
else:
namespace_ids_table_name = temp_namespace_ids_table_name
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
if not file_location_is_cross_referenced and hash_ids_table_name is not None:
file_location_is_cross_referenced = True
for file_service_key in file_service_keys:
some_results = self._GetHashIdsThatHaveTagsSimpleLocation( tag_display_type, file_service_key, tag_search_context, namespace_ids_table_name = namespace_ids_table_name, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
if len( results ) == 0:
results = some_results
else:
results.update( some_results )
if not file_location_is_cross_referenced:
results = self.modules_files_storage.FilterHashIds( location_context, results )
return results
def _GetHashIdsThatHaveTagsSimpleLocation( self, tag_display_type: int, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext, namespace_ids_table_name = None, hash_ids_table_name = None, job_key = None ):
mapping_and_tag_table_names = self._GetMappingAndTagTables( tag_display_type, file_service_key, tag_search_context )
if hash_ids_table_name is None:
if namespace_ids_table_name is None:
# hellmode
queries = [ 'SELECT DISTINCT hash_id FROM {};'.format( mappings_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
else:
# temp namespaces to tags to mappings
queries = [ 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( namespace_id ) CROSS JOIN {} USING ( tag_id );'.format( namespace_ids_table_name, tags_table_name, mappings_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
else:
if namespace_ids_table_name is None:
queries = [ 'SELECT hash_id FROM {} WHERE EXISTS ( SELECT 1 FROM {} WHERE {}.hash_id = {}.hash_id );'.format( hash_ids_table_name, mappings_table_name, mappings_table_name, hash_ids_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
else:
# temp hashes to mappings to tags to temp namespaces
# this was originally a 'WHERE EXISTS' thing, but doing that on a three way cross join is too complex for that to work well
# let's hope DISTINCT can save time too
queries = [ 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) CROSS JOIN {} USING ( tag_id ) CROSS JOIN {} USING ( namespace_id );'.format( hash_ids_table_name, mappings_table_name, tags_table_name, namespace_ids_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
nonzero_tag_hash_ids = set()
for query in queries:
cursor = self._Execute( query )
nonzero_tag_hash_ids.update( self._STI( HydrusDB.ReadFromCancellableCursor( cursor, 10240, cancelled_hook ) ) )
if job_key is not None and job_key.IsCancelled():
return set()
return nonzero_tag_hash_ids
def _GetHashIdsThatHaveTagAsNumComplexLocation( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, namespace, num, operator, hash_ids = None, hash_ids_table_name = None, job_key = None ):
if not location_context.SearchesAnything():
return set()
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
if not file_location_is_cross_referenced and hash_ids_table_name is not None:
file_location_is_cross_referenced = True
results = set()
for file_service_key in file_service_keys:
some_results = self._GetHashIdsThatHaveTagAsNumSimpleLocation( tag_display_type, file_service_key, tag_search_context, namespace, num, operator, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
if len( results ) == 0:
results = some_results
else:
results.update( some_results )
if not file_location_is_cross_referenced:
results = self.modules_files_storage.FilterHashIds( location_context, results )
return results
def _GetHashIdsThatHaveTagAsNumSimpleLocation( self, tag_display_type: int, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext, namespace, num, operator, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( tag_service_id, )
possible_subtag_ids = set()
for search_tag_service_id in search_tag_service_ids:
some_possible_subtag_ids = self.modules_tag_search.GetTagAsNumSubtagIds( file_service_id, search_tag_service_id, operator, num )
possible_subtag_ids.update( some_possible_subtag_ids )
if namespace == '':
return self._GetHashIdsFromSubtagIds( tag_display_type, file_service_key, tag_search_context, possible_subtag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
else:
namespace_id = self.modules_tags.GetNamespaceId( namespace )
possible_namespace_ids = { namespace_id }
return self._GetHashIdsFromNamespaceIdsSubtagIds( tag_display_type, file_service_key, tag_search_context, possible_namespace_ids, possible_subtag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdStatus( self, hash_id, prefix = '' ) -> ClientImportFiles.FileImportStatus:
if prefix != '':
prefix += ': '
hash = self.modules_hashes_local_cache.GetHash( hash_id )
( is_deleted, timestamp, file_deletion_reason ) = self.modules_files_storage.GetDeletionStatus( self.modules_services.combined_local_file_service_id, hash_id )
if is_deleted:
if timestamp is None:
note = 'Deleted from the client before delete times were tracked ({}).'.format( file_deletion_reason )
else:
note = 'Deleted from the client {} ({}), which was {} before this check.'.format( HydrusData.ConvertTimestampToPrettyTime( timestamp ), file_deletion_reason, HydrusData.BaseTimestampToPrettyTimeDelta( timestamp ) )
return ClientImportFiles.FileImportStatus( CC.STATUS_DELETED, hash, note = prefix + note )
result = self.modules_files_storage.GetCurrentTimestamp( self.modules_services.trash_service_id, hash_id )
if result is not None:
timestamp = result
note = 'Currently in trash ({}). Sent there at {}, which was {} before this check.'.format( file_deletion_reason, HydrusData.ConvertTimestampToPrettyTime( timestamp ), HydrusData.BaseTimestampToPrettyTimeDelta( timestamp, just_now_threshold = 0 ) )
return ClientImportFiles.FileImportStatus( CC.STATUS_DELETED, hash, note = prefix + note )
result = self.modules_files_storage.GetCurrentTimestamp( self.modules_services.combined_local_file_service_id, hash_id )
if result is not None:
timestamp = result
mime = self.modules_files_metadata_basic.GetMime( hash_id )
note = 'Imported at {}, which was {} before this check.'.format( HydrusData.ConvertTimestampToPrettyTime( timestamp ), HydrusData.BaseTimestampToPrettyTimeDelta( timestamp, just_now_threshold = 0 ) )
return ClientImportFiles.FileImportStatus( CC.STATUS_SUCCESSFUL_BUT_REDUNDANT, hash, mime = mime, note = prefix + note )
return ClientImportFiles.FileImportStatus( CC.STATUS_UNKNOWN, hash )
def _GetHashStatus( self, hash_type, hash, prefix = None ) -> ClientImportFiles.FileImportStatus:
if prefix is None:
prefix = hash_type + ' recognised'
if hash_type == 'sha256':
if not self.modules_hashes.HasHash( hash ):
f = ClientImportFiles.FileImportStatus.STATICGetUnknownStatus()
f.hash = hash
return f
else:
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
else:
try:
hash_id = self.modules_hashes.GetHashIdFromExtraHash( hash_type, hash )
except HydrusExceptions.DataMissing:
return ClientImportFiles.FileImportStatus.STATICGetUnknownStatus()
return self._GetHashIdStatus( hash_id, prefix = prefix )
def _GetIdealClientFilesLocations( self ):
locations_to_ideal_weights = {}
for ( portable_location, weight ) in self._Execute( 'SELECT location, weight FROM ideal_client_files_locations;' ):
abs_location = HydrusPaths.ConvertPortablePathToAbsPath( portable_location )
locations_to_ideal_weights[ abs_location ] = weight
result = self._Execute( 'SELECT location FROM ideal_thumbnail_override_location;' ).fetchone()
if result is None:
abs_ideal_thumbnail_override_location = None
else:
( portable_ideal_thumbnail_override_location, ) = result
abs_ideal_thumbnail_override_location = HydrusPaths.ConvertPortablePathToAbsPath( portable_ideal_thumbnail_override_location )
return ( locations_to_ideal_weights, abs_ideal_thumbnail_override_location )
def _GetMaintenanceDue( self, stop_time ):
jobs_to_do = []
# analyze
names_to_analyze = self.modules_db_maintenance.GetTableNamesDueAnalysis()
if len( names_to_analyze ) > 0:
jobs_to_do.append( 'analyze ' + HydrusData.ToHumanInt( len( names_to_analyze ) ) + ' table_names' )
similar_files_due = self.modules_similar_files.MaintenanceDue()
if similar_files_due:
jobs_to_do.append( 'similar files work' )
return jobs_to_do
def _GetMappingTables( self, tag_display_type, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_key = tag_search_context.service_key
if tag_service_key == CC.COMBINED_TAG_SERVICE_KEY:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = [ self.modules_services.GetServiceId( tag_service_key ) ]
current_tables = []
pending_tables = []
for tag_service_id in tag_service_ids:
if file_service_id == self.modules_services.combined_file_service_id:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
current_tables.append( current_mappings_table_name )
pending_tables.append( pending_mappings_table_name )
else:
if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE:
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
current_tables.append( cache_current_mappings_table_name )
pending_tables.append( cache_pending_mappings_table_name )
elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL:
( cache_current_display_mappings_table_name, cache_pending_display_mappings_table_name ) = ClientDBMappingsCacheSpecificDisplay.GenerateSpecificDisplayMappingsCacheTableNames( file_service_id, tag_service_id )
current_tables.append( cache_current_display_mappings_table_name )
pending_tables.append( cache_pending_display_mappings_table_name )
table_names = []
if tag_search_context.include_current_tags:
table_names.extend( current_tables )
if tag_search_context.include_pending_tags:
table_names.extend( pending_tables )
return table_names
def _GetMappingAndTagTables( self, tag_display_type, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_key = tag_search_context.service_key
if tag_service_key == CC.COMBINED_TAG_SERVICE_KEY:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = [ self.modules_services.GetServiceId( tag_service_key ) ]
current_tables = []
pending_tables = []
for tag_service_id in tag_service_ids:
tags_table_name = self.modules_tag_search.GetTagsTableName( file_service_id, tag_service_id )
if file_service_id == self.modules_services.combined_file_service_id:
# yo this does not support tag_display_actual--big tricky problem
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
current_tables.append( ( current_mappings_table_name, tags_table_name ) )
pending_tables.append( ( pending_mappings_table_name, tags_table_name ) )
else:
if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE:
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
current_tables.append( ( cache_current_mappings_table_name, tags_table_name ) )
pending_tables.append( ( cache_pending_mappings_table_name, tags_table_name ) )
elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL:
( cache_current_display_mappings_table_name, cache_pending_display_mappings_table_name ) = ClientDBMappingsCacheSpecificDisplay.GenerateSpecificDisplayMappingsCacheTableNames( file_service_id, tag_service_id )
current_tables.append( ( cache_current_display_mappings_table_name, tags_table_name ) )
pending_tables.append( ( cache_pending_display_mappings_table_name, tags_table_name ) )
table_names = []
if tag_search_context.include_current_tags:
table_names.extend( current_tables )
if tag_search_context.include_pending_tags:
table_names.extend( pending_tables )
return table_names
def _GetMediaPredicates( self, tag_search_context: ClientSearch.TagSearchContext, tags_to_counts, inclusive, job_key = None ):
display_tag_service_id = self.modules_services.GetServiceId( tag_search_context.display_service_key )
max_current_count = None
max_pending_count = None
tag_ids_to_full_counts = {}
showed_bad_tag_error = False
for ( i, ( tag, ( current_count, pending_count ) ) ) in enumerate( tags_to_counts.items() ):
try:
tag_id = self.modules_tags.GetTagId( tag )
except HydrusExceptions.TagSizeException:
if not showed_bad_tag_error:
showed_bad_tag_error = True
HydrusData.ShowText( 'Hey, you seem to have an invalid tag in view right now! Please run the \'repair invalid tags\' routine under the \'database\' menu asap!' )
continue
tag_ids_to_full_counts[ tag_id ] = ( current_count, max_current_count, pending_count, max_pending_count )
if i % 100 == 0:
if job_key is not None and job_key.IsCancelled():
return []
if job_key is not None and job_key.IsCancelled():
return []
predicates = self._GeneratePredicatesFromTagIdsAndCounts( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, tag_ids_to_full_counts, inclusive, job_key = job_key )
return predicates
def _GetMediaResults( self, hash_ids: typing.Iterable[ int ], sorted = False ):
( cached_media_results, missing_hash_ids ) = self._weakref_media_result_cache.GetMediaResultsAndMissing( hash_ids )
if len( missing_hash_ids ) > 0:
# get first detailed results
missing_hash_ids_to_hashes = self.modules_hashes_local_cache.GetHashIdsToHashes( hash_ids = missing_hash_ids )
with self._MakeTemporaryIntegerTable( missing_hash_ids, 'hash_id' ) as temp_table_name:
# everything here is temp hashes to metadata
hash_ids_to_info = { hash_id : ClientMediaManagers.FileInfoManager( hash_id, missing_hash_ids_to_hashes[ hash_id ], size, mime, width, height, duration, num_frames, has_audio, num_words ) for ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) in self._Execute( 'SELECT * FROM {} CROSS JOIN files_info USING ( hash_id );'.format( temp_table_name ) ) }
( hash_ids_to_current_file_service_ids_and_timestamps,
hash_ids_to_deleted_file_service_ids_and_timestamps,
hash_ids_to_pending_file_service_ids,
hash_ids_to_petitioned_file_service_ids
) = self.modules_files_storage.GetHashIdsToServiceInfoDicts( temp_table_name )
hash_ids_to_urls = HydrusData.BuildKeyToSetDict( self._Execute( 'SELECT hash_id, url FROM {} CROSS JOIN url_map USING ( hash_id ) CROSS JOIN urls USING ( url_id );'.format( temp_table_name ) ) )
hash_ids_to_service_ids_and_filenames = HydrusData.BuildKeyToListDict( ( ( hash_id, ( service_id, filename ) ) for ( hash_id, service_id, filename ) in self._Execute( 'SELECT hash_id, service_id, filename FROM {} CROSS JOIN service_filenames USING ( hash_id );'.format( temp_table_name ) ) ) )
hash_ids_to_local_ratings = HydrusData.BuildKeyToListDict( ( ( hash_id, ( service_id, rating ) ) for ( service_id, hash_id, rating ) in self._Execute( 'SELECT service_id, hash_id, rating FROM {} CROSS JOIN local_ratings USING ( hash_id );'.format( temp_table_name ) ) ) )
hash_ids_to_names_and_notes = HydrusData.BuildKeyToListDict( ( ( hash_id, ( name, note ) ) for ( hash_id, name, note ) in self._Execute( 'SELECT file_notes.hash_id, label, note FROM {} CROSS JOIN file_notes USING ( hash_id ), labels, notes ON ( file_notes.name_id = labels.label_id AND file_notes.note_id = notes.note_id );'.format( temp_table_name ) ) ) )
hash_ids_to_file_viewing_stats = HydrusData.BuildKeyToListDict( ( ( hash_id, ( canvas_type, last_viewed_timestamp, views, viewtime ) ) for ( hash_id, canvas_type, last_viewed_timestamp, views, viewtime ) in self._Execute( 'SELECT hash_id, canvas_type, last_viewed_timestamp, views, viewtime FROM {} CROSS JOIN file_viewing_stats USING ( hash_id );'.format( temp_table_name ) ) ) )
hash_ids_to_file_viewing_stats_managers = { hash_id : ClientMediaManagers.FileViewingStatsManager( file_viewing_stats ) for ( hash_id, file_viewing_stats ) in hash_ids_to_file_viewing_stats.items() }
hash_ids_to_file_modified_timestamps = dict( self._Execute( 'SELECT hash_id, file_modified_timestamp FROM {} CROSS JOIN file_modified_timestamps USING ( hash_id );'.format( temp_table_name ) ) )
hash_ids_to_domain_modified_timestamps = HydrusData.BuildKeyToListDict( ( ( hash_id, ( domain, timestamp ) ) for ( hash_id, domain, timestamp ) in self._Execute( 'SELECT hash_id, domain, file_modified_timestamp FROM {} CROSS JOIN file_domain_modified_timestamps USING ( hash_id ) CROSS JOIN url_domains USING ( domain_id );'.format( temp_table_name ) ) ) )
hash_ids_to_archive_timestamps = dict( self._Execute( 'SELECT hash_id, archived_timestamp FROM {} CROSS JOIN archive_timestamps USING ( hash_id );'.format( temp_table_name ) ) )
hash_ids_to_local_file_deletion_reasons = self.modules_files_storage.GetHashIdsToFileDeletionReasons( temp_table_name )
hash_ids_to_current_file_service_ids = { hash_id : [ file_service_id for ( file_service_id, timestamp ) in file_service_ids_and_timestamps ] for ( hash_id, file_service_ids_and_timestamps ) in hash_ids_to_current_file_service_ids_and_timestamps.items() }
hash_ids_to_tags_managers = self._GetForceRefreshTagsManagersWithTableHashIds( missing_hash_ids, temp_table_name, hash_ids_to_current_file_service_ids = hash_ids_to_current_file_service_ids )
# build it
service_ids_to_service_keys = self.modules_services.GetServiceIdsToServiceKeys()
missing_media_results = []
for hash_id in missing_hash_ids:
tags_manager = hash_ids_to_tags_managers[ hash_id ]
#
current_file_service_keys_to_timestamps = { service_ids_to_service_keys[ service_id ] : timestamp for ( service_id, timestamp ) in hash_ids_to_current_file_service_ids_and_timestamps[ hash_id ] }
deleted_file_service_keys_to_timestamps = { service_ids_to_service_keys[ service_id ] : ( timestamp, original_timestamp ) for ( service_id, timestamp, original_timestamp ) in hash_ids_to_deleted_file_service_ids_and_timestamps[ hash_id ] }
pending_file_service_keys = { service_ids_to_service_keys[ service_id ] for service_id in hash_ids_to_pending_file_service_ids[ hash_id ] }
petitioned_file_service_keys = { service_ids_to_service_keys[ service_id ] for service_id in hash_ids_to_petitioned_file_service_ids[ hash_id ] }
inbox = hash_id in self.modules_files_metadata_basic.inbox_hash_ids
urls = hash_ids_to_urls[ hash_id ]
service_ids_to_filenames = HydrusData.BuildKeyToListDict( hash_ids_to_service_ids_and_filenames[ hash_id ] )
service_keys_to_filenames = { service_ids_to_service_keys[ service_id ] : filenames for ( service_id, filenames ) in list(service_ids_to_filenames.items()) }
timestamp_manager = ClientMediaManagers.TimestampManager()
if hash_id in hash_ids_to_file_modified_timestamps:
timestamp_manager.SetFileModifiedTimestamp( hash_ids_to_file_modified_timestamps[ hash_id ] )
if hash_id in hash_ids_to_domain_modified_timestamps:
for ( domain, modified_timestamp ) in hash_ids_to_domain_modified_timestamps[ hash_id ]:
timestamp_manager.SetDomainModifiedTimestamp( domain, modified_timestamp )
if hash_id in hash_ids_to_archive_timestamps:
timestamp_manager.SetArchivedTimestamp( hash_ids_to_archive_timestamps[ hash_id ] )
if hash_id in hash_ids_to_local_file_deletion_reasons:
local_file_deletion_reason = hash_ids_to_local_file_deletion_reasons[ hash_id ]
else:
local_file_deletion_reason = None
locations_manager = ClientMediaManagers.LocationsManager(
current_file_service_keys_to_timestamps,
deleted_file_service_keys_to_timestamps,
pending_file_service_keys,
petitioned_file_service_keys,
inbox = inbox,
urls = urls,
service_keys_to_filenames = service_keys_to_filenames,
timestamp_manager = timestamp_manager,
local_file_deletion_reason = local_file_deletion_reason
)
#
local_ratings = { service_ids_to_service_keys[ service_id ] : rating for ( service_id, rating ) in hash_ids_to_local_ratings[ hash_id ] }
ratings_manager = ClientMediaManagers.RatingsManager( local_ratings )
#
if hash_id in hash_ids_to_names_and_notes:
names_to_notes = dict( hash_ids_to_names_and_notes[ hash_id ] )
else:
names_to_notes = dict()
notes_manager = ClientMediaManagers.NotesManager( names_to_notes )
#
if hash_id in hash_ids_to_file_viewing_stats_managers:
file_viewing_stats_manager = hash_ids_to_file_viewing_stats_managers[ hash_id ]
else:
file_viewing_stats_manager = ClientMediaManagers.FileViewingStatsManager.STATICGenerateEmptyManager()
#
if hash_id in hash_ids_to_info:
file_info_manager = hash_ids_to_info[ hash_id ]
else:
hash = missing_hash_ids_to_hashes[ hash_id ]
file_info_manager = ClientMediaManagers.FileInfoManager( hash_id, hash )
missing_media_results.append( ClientMediaResult.MediaResult( file_info_manager, tags_manager, locations_manager, ratings_manager, notes_manager, file_viewing_stats_manager ) )
self._weakref_media_result_cache.AddMediaResults( missing_media_results )
cached_media_results.extend( missing_media_results )
media_results = cached_media_results
if sorted:
hash_ids_to_media_results = { media_result.GetHashId() : media_result for media_result in media_results }
media_results = [ hash_ids_to_media_results[ hash_id ] for hash_id in hash_ids if hash_id in hash_ids_to_media_results ]
return media_results
def _GetMediaResultFromHash( self, hash ) -> ClientMediaResult.MediaResult:
media_results = self._GetMediaResultsFromHashes( [ hash ] )
return media_results[0]
def _GetMediaResultsFromHashes( self, hashes: typing.Collection[ bytes ], sorted: bool = False ) -> typing.List[ ClientMediaResult.MediaResult ]:
query_hash_ids = set( self.modules_hashes_local_cache.GetHashIds( hashes ) )
media_results = self._GetMediaResults( query_hash_ids )
if sorted:
if len( hashes ) > len( query_hash_ids ):
hashes = HydrusData.DedupeList( hashes )
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
media_results = [ hashes_to_media_results[ hash ] for hash in hashes if hash in hashes_to_media_results ]
return media_results
def _GetNumsPending( self ):
services = self.modules_services.GetServices( ( HC.TAG_REPOSITORY, HC.FILE_REPOSITORY, HC.IPFS ) )
pendings = {}
for service in services:
service_key = service.GetServiceKey()
service_type = service.GetServiceType()
service_id = self.modules_services.GetServiceId( service_key )
info_types = set()
if service_type in ( HC.FILE_REPOSITORY, HC.IPFS ):
info_types = { HC.SERVICE_INFO_NUM_PENDING_FILES, HC.SERVICE_INFO_NUM_PETITIONED_FILES }
elif service_type == HC.TAG_REPOSITORY:
info_types = { HC.SERVICE_INFO_NUM_PENDING_MAPPINGS, HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS, HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS, HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS, HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS, HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS }
pendings[ service_key ] = self._GetServiceInfoSpecific( service_id, service_type, info_types )
return pendings
def _GetOptions( self ):
result = self._Execute( 'SELECT options FROM options;' ).fetchone()
if result is None:
options = ClientDefaults.GetClientDefaultOptions()
self._Execute( 'INSERT INTO options ( options ) VALUES ( ? );', ( options, ) )
else:
( options, ) = result
default_options = ClientDefaults.GetClientDefaultOptions()
for key in default_options:
if key not in options: options[ key ] = default_options[ key ]
return options
def _GetPending( self, service_key, content_types ):
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
if service_type in HC.REPOSITORIES:
account = service.GetAccount()
client_to_server_update = HydrusNetwork.ClientToServerUpdate()
if service_type == HC.TAG_REPOSITORY:
if HC.CONTENT_TYPE_MAPPINGS in content_types:
if account.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_CREATE ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( service_id )
pending_dict = HydrusData.BuildKeyToListDict( self._Execute( 'SELECT tag_id, hash_id FROM ' + pending_mappings_table_name + ' ORDER BY tag_id LIMIT 100;' ) )
pending_mapping_ids = list( pending_dict.items() )
# dealing with a scary situation when (due to some bug) mappings are current and pending. they get uploaded, but the content update makes no changes, so we cycle infitely!
addable_pending_mapping_ids = self._FilterExistingUpdateMappings( service_id, pending_mapping_ids, HC.CONTENT_UPDATE_ADD )
pending_mapping_weight = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in pending_mapping_ids ) )
addable_pending_mapping_weight = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in addable_pending_mapping_ids ) )
if pending_mapping_weight != addable_pending_mapping_weight:
message = 'Hey, while going through the pending tags to upload, it seemed some were simultaneously already in the \'current\' state. This looks like a bug.'
message += os.linesep * 2
message += 'Please run _database->check and repair->fix logically inconsistent mappings_. If everything seems good after that and you do not get this message again, you should be all fixed. If not, you may need to regenerate your mappings storage cache under the \'database\' menu. If that does not work, hydev would like to know about it!'
HydrusData.ShowText( message )
raise HydrusExceptions.VetoException( 'Logically inconsistent mappings detected!' )
for ( tag_id, hash_ids ) in pending_mapping_ids:
tag = self.modules_tags_local_cache.GetTag( tag_id )
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPINGS, ( tag, hashes ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PEND, content )
if account.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_PETITION ):
petitioned_dict = HydrusData.BuildKeyToListDict( [ ( ( tag_id, reason_id ), hash_id ) for ( tag_id, hash_id, reason_id ) in self._Execute( 'SELECT tag_id, hash_id, reason_id FROM ' + petitioned_mappings_table_name + ' ORDER BY reason_id LIMIT 100;' ) ] )
petitioned_mapping_ids = list( petitioned_dict.items() )
# dealing with a scary situation when (due to some bug) mappings are deleted and petitioned. they get uploaded, but the content update makes no changes, so we cycle infitely!
deletable_and_petitioned_mappings = self._FilterExistingUpdateMappings(
service_id,
[ ( tag_id, hash_ids ) for ( ( tag_id, reason_id ), hash_ids ) in petitioned_mapping_ids ],
HC.CONTENT_UPDATE_DELETE
)
petitioned_mapping_weight = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in petitioned_mapping_ids ) )
deletable_petitioned_mapping_weight = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in deletable_and_petitioned_mappings ) )
if petitioned_mapping_weight != deletable_petitioned_mapping_weight:
message = 'Hey, while going through the petitioned tags to upload, it seemed some were simultaneously already in the \'deleted\' state. This looks like a bug.'
message += os.linesep * 2
message += 'Please run _database->check and repair->fix logically inconsistent mappings_. If everything seems good after that and you do not get this message again, you should be all fixed. If not, you may need to regenerate your mappings storage cache under the \'database\' menu. If that does not work, hydev would like to know about it!'
HydrusData.ShowText( message )
raise HydrusExceptions.VetoException( 'Logically inconsistent mappings detected!' )
for ( ( tag_id, reason_id ), hash_ids ) in petitioned_mapping_ids:
tag = self.modules_tags_local_cache.GetTag( tag_id )
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPINGS, ( tag, hashes ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PETITION, content, reason )
if HC.CONTENT_TYPE_TAG_PARENTS in content_types:
if account.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_PETITION ):
pending = self._Execute( 'SELECT child_tag_id, parent_tag_id, reason_id FROM tag_parent_petitions WHERE service_id = ? AND status = ? ORDER BY reason_id LIMIT 1;', ( service_id, HC.CONTENT_STATUS_PENDING ) ).fetchall()
for ( child_tag_id, parent_tag_id, reason_id ) in pending:
child_tag = self.modules_tags_local_cache.GetTag( child_tag_id )
parent_tag = self.modules_tags_local_cache.GetTag( parent_tag_id )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_PARENTS, ( child_tag, parent_tag ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PEND, content, reason )
petitioned = self._Execute( 'SELECT child_tag_id, parent_tag_id, reason_id FROM tag_parent_petitions WHERE service_id = ? AND status = ? ORDER BY reason_id LIMIT 100;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) ).fetchall()
for ( child_tag_id, parent_tag_id, reason_id ) in petitioned:
child_tag = self.modules_tags_local_cache.GetTag( child_tag_id )
parent_tag = self.modules_tags_local_cache.GetTag( parent_tag_id )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_PARENTS, ( child_tag, parent_tag ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PETITION, content, reason )
if HC.CONTENT_TYPE_TAG_SIBLINGS in content_types:
if account.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_PETITION ):
pending = self._Execute( 'SELECT bad_tag_id, good_tag_id, reason_id FROM tag_sibling_petitions WHERE service_id = ? AND status = ? ORDER BY reason_id LIMIT 100;', ( service_id, HC.CONTENT_STATUS_PENDING ) ).fetchall()
for ( bad_tag_id, good_tag_id, reason_id ) in pending:
bad_tag = self.modules_tags_local_cache.GetTag( bad_tag_id )
good_tag = self.modules_tags_local_cache.GetTag( good_tag_id )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_SIBLINGS, ( bad_tag, good_tag ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PEND, content, reason )
petitioned = self._Execute( 'SELECT bad_tag_id, good_tag_id, reason_id FROM tag_sibling_petitions WHERE service_id = ? AND status = ? ORDER BY reason_id LIMIT 100;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) ).fetchall()
for ( bad_tag_id, good_tag_id, reason_id ) in petitioned:
bad_tag = self.modules_tags_local_cache.GetTag( bad_tag_id )
good_tag = self.modules_tags_local_cache.GetTag( good_tag_id )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_SIBLINGS, ( bad_tag, good_tag ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PETITION, content, reason )
elif service_type == HC.FILE_REPOSITORY:
if HC.CONTENT_TYPE_FILES in content_types:
if account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_CREATE ):
result = self.modules_files_storage.GetAPendingHashId( service_id )
if result is not None:
hash_id = result
media_result = self._GetMediaResults( ( hash_id, ) )[ 0 ]
return media_result
if account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_PETITION ):
petitioned_rows = self.modules_files_storage.GetSomePetitionedRows( service_id )
for ( reason_id, hash_ids ) in petitioned_rows:
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_FILES, hashes )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PETITION, content, reason )
if client_to_server_update.HasContent():
return client_to_server_update
elif service_type == HC.IPFS:
result = self.modules_files_storage.GetAPendingHashId( service_id )
if result is not None:
hash_id = result
media_result = self._GetMediaResults( ( hash_id, ) )[ 0 ]
return media_result
while True:
result = self.modules_files_storage.GetAPetitionedHashId( service_id )
if result is None:
break
else:
hash_id = result
hash = self.modules_hashes_local_cache.GetHash( hash_id )
try:
multihash = self._GetServiceFilename( service_id, hash_id )
except HydrusExceptions.DataMissing:
# somehow this file exists in ipfs (or at least is petitioned), but there is no multihash.
# this is probably due to a legacy sync issue
# so lets just process that now and continue
# in future we'll have ipfs service sync to repopulate missing filenames
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, ( hash, ) )
service_keys_to_content_updates = { service_key : [ content_update ] }
self._ProcessContentUpdates( service_keys_to_content_updates )
continue
return ( hash, multihash )
return None
def _GetPossibleAdditionalDBFilenames( self ):
paths = HydrusDB.HydrusDB._GetPossibleAdditionalDBFilenames( self )
paths.append( 'mpv.conf' )
return paths
def _GetRecentTags( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
# we could be clever and do LIMIT and ORDER BY in the delete, but not all compilations of SQLite have that turned on, so let's KISS
tag_ids_to_timestamp = { tag_id : timestamp for ( tag_id, timestamp ) in self._Execute( 'SELECT tag_id, timestamp FROM recent_tags WHERE service_id = ?;', ( service_id, ) ) }
def sort_key( key ):
return tag_ids_to_timestamp[ key ]
newest_first = list(tag_ids_to_timestamp.keys())
newest_first.sort( key = sort_key, reverse = True )
num_we_want = HG.client_controller.new_options.GetNoneableInteger( 'num_recent_tags' )
if num_we_want == None:
num_we_want = 20
decayed = newest_first[ num_we_want : ]
if len( decayed ) > 0:
self._ExecuteMany( 'DELETE FROM recent_tags WHERE service_id = ? AND tag_id = ?;', ( ( service_id, tag_id ) for tag_id in decayed ) )
sorted_recent_tag_ids = newest_first[ : num_we_want ]
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = sorted_recent_tag_ids )
sorted_recent_tags = [ tag_ids_to_tags[ tag_id ] for tag_id in sorted_recent_tag_ids ]
return sorted_recent_tags
def _GetRelatedTags( self, service_key, skip_hash, search_tags, max_results, max_time_to_take ):
stop_time_for_finding_files = HydrusData.GetNowPrecise() + ( max_time_to_take / 2 )
stop_time_for_finding_tags = HydrusData.GetNowPrecise() + ( max_time_to_take / 2 )
service_id = self.modules_services.GetServiceId( service_key )
skip_hash_id = self.modules_hashes_local_cache.GetHashId( skip_hash )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( service_id )
tag_ids = [ self.modules_tags.GetTagId( tag ) for tag in search_tags ]
random.shuffle( tag_ids )
hash_ids_counter = collections.Counter()
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_table_name:
# temp tags to mappings
cursor = self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_table_name, current_mappings_table_name ) )
cancelled_hook = lambda: HydrusData.TimeHasPassedPrecise( stop_time_for_finding_files )
for ( hash_id, ) in HydrusDB.ReadFromCancellableCursor( cursor, 128, cancelled_hook = cancelled_hook ):
hash_ids_counter[ hash_id ] += 1
if skip_hash_id in hash_ids_counter:
del hash_ids_counter[ skip_hash_id ]
#
if len( hash_ids_counter ) == 0:
return []
# this stuff is often 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1.....
# the 1 stuff often produces large quantities of the same very popular tag, so your search for [ 'eva', 'female' ] will produce 'touhou' because so many 2hu images have 'female'
# so we want to do a 'soft' intersect, only picking the files that have the greatest number of shared search_tags
# this filters to only the '2' results, which gives us eva females and their hair colour and a few choice other popular tags for that particular domain
[ ( gumpf, largest_count ) ] = hash_ids_counter.most_common( 1 )
hash_ids = [ hash_id for ( hash_id, current_count ) in hash_ids_counter.items() if current_count > largest_count * 0.8 ]
counter = collections.Counter()
random.shuffle( hash_ids )
for hash_id in hash_ids:
for tag_id in self._STI( self._Execute( 'SELECT tag_id FROM ' + current_mappings_table_name + ' WHERE hash_id = ?;', ( hash_id, ) ) ):
counter[ tag_id ] += 1
if HydrusData.TimeHasPassedPrecise( stop_time_for_finding_tags ):
break
#
for tag_id in tag_ids:
if tag_id in counter:
del counter[ tag_id ]
results = counter.most_common( max_results )
inclusive = True
pending_count = 0
tag_ids_to_full_counts = { tag_id : ( current_count, None, pending_count, None ) for ( tag_id, current_count ) in results }
predicates = self._GeneratePredicatesFromTagIdsAndCounts( ClientTags.TAG_DISPLAY_STORAGE, service_id, tag_ids_to_full_counts, inclusive )
return predicates
def _GetRepositoryThumbnailHashesIDoNotHave( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
needed_hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} NATURAL JOIN files_info WHERE mime IN {} EXCEPT SELECT hash_id FROM remote_thumbnails WHERE service_id = ?;'.format( current_files_table_name, HydrusData.SplayListForDB( HC.MIMES_WITH_THUMBNAILS ) ), ( service_id, ) ) )
needed_hashes = []
client_files_manager = HG.client_controller.client_files_manager
for hash_id in needed_hash_ids:
hash = self.modules_hashes_local_cache.GetHash( hash_id )
if client_files_manager.LocklessHasThumbnail( hash ):
self._Execute( 'INSERT OR IGNORE INTO remote_thumbnails ( service_id, hash_id ) VALUES ( ?, ? );', ( service_id, hash_id ) )
else:
needed_hashes.append( hash )
if len( needed_hashes ) == 10000:
return needed_hashes
return needed_hashes
def _GetServiceDirectoryHashes( self, service_key, dirname ):
service_id = self.modules_services.GetServiceId( service_key )
directory_id = self.modules_texts.GetTextId( dirname )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM service_directory_file_map WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) ) )
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
return hashes
def _GetServiceDirectoriesInfo( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
incomplete_info = self._Execute( 'SELECT directory_id, num_files, total_size, note FROM service_directories WHERE service_id = ?;', ( service_id, ) ).fetchall()
info = [ ( self.modules_texts.GetText( directory_id ), num_files, total_size, note ) for ( directory_id, num_files, total_size, note ) in incomplete_info ]
return info
def _GetServiceFilename( self, service_id, hash_id ):
result = self._Execute( 'SELECT filename FROM service_filenames WHERE service_id = ? AND hash_id = ?;', ( service_id, hash_id ) ).fetchone()
if result is None:
raise HydrusExceptions.DataMissing( 'Service filename not found!' )
( filename, ) = result
return filename
def _GetServiceFilenames( self, service_key, hashes ):
service_id = self.modules_services.GetServiceId( service_key )
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
result = sorted( ( filename for ( filename, ) in self._Execute( 'SELECT filename FROM service_filenames WHERE service_id = ? AND hash_id IN ' + HydrusData.SplayListForDB( hash_ids ) + ';', ( service_id, ) ) ) )
return result
def _GetServiceInfo( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
if service_type in ( HC.COMBINED_LOCAL_FILE, HC.LOCAL_FILE_DOMAIN, HC.FILE_REPOSITORY ):
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_TOTAL_SIZE, HC.SERVICE_INFO_NUM_DELETED_FILES }
elif service_type == HC.LOCAL_FILE_TRASH_DOMAIN:
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_TOTAL_SIZE }
elif service_type == HC.IPFS:
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_TOTAL_SIZE }
elif service_type == HC.LOCAL_TAG:
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_TAGS, HC.SERVICE_INFO_NUM_MAPPINGS }
elif service_type == HC.TAG_REPOSITORY:
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_TAGS, HC.SERVICE_INFO_NUM_MAPPINGS, HC.SERVICE_INFO_NUM_DELETED_MAPPINGS }
elif service_type in ( HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ):
info_types = { HC.SERVICE_INFO_NUM_FILES }
elif service_type == HC.LOCAL_BOORU:
info_types = { HC.SERVICE_INFO_NUM_SHARES }
else:
info_types = set()
service_info = self._GetServiceInfoSpecific( service_id, service_type, info_types )
return service_info
def _GetServiceInfoSpecific( self, service_id, service_type, info_types, calculate_missing = True ):
info_types = set( info_types )
results = { info_type : info for ( info_type, info ) in self._Execute( 'SELECT info_type, info FROM service_info WHERE service_id = ? AND info_type IN ' + HydrusData.SplayListForDB( info_types ) + ';', ( service_id, ) ) }
if len( results ) != len( info_types ) and calculate_missing:
info_types_hit = list( results.keys() )
info_types_missed = info_types.difference( info_types_hit )
for info_type in info_types_missed:
info = None
result = None
save_it = True
if service_type in HC.FILE_SERVICES:
if info_type in ( HC.SERVICE_INFO_NUM_PENDING_FILES, HC.SERVICE_INFO_NUM_PETITIONED_FILES ):
save_it = False
if info_type == HC.SERVICE_INFO_NUM_FILES:
info = self.modules_files_storage.GetCurrentFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_VIEWABLE_FILES:
info = self.modules_files_storage.GetCurrentFilesCount( service_id, only_viewable = True )
elif info_type == HC.SERVICE_INFO_TOTAL_SIZE:
info = self.modules_files_storage.GetCurrentFilesTotalSize( service_id )
elif info_type == HC.SERVICE_INFO_NUM_DELETED_FILES:
info = self.modules_files_storage.GetDeletedFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_PENDING_FILES:
info = self.modules_files_storage.GetPendingFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_PETITIONED_FILES:
info = self.modules_files_storage.GetPetitionedFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_INBOX:
info = self.modules_files_storage.GetCurrentFilesInboxCount( service_id )
elif service_type in HC.REAL_TAG_SERVICES:
if info_type in ( HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS, HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS, HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS, HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS ):
save_it = False
if info_type == HC.SERVICE_INFO_NUM_FILES:
info = self.modules_mappings_storage.GetCurrentFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_TAGS:
info = self.modules_tag_search.GetTagCount( self.modules_services.combined_file_service_id, service_id )
elif info_type == HC.SERVICE_INFO_NUM_MAPPINGS:
info = self.modules_mappings_counts.GetTotalCurrentCount( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, service_id )
elif info_type == HC.SERVICE_INFO_NUM_PENDING_MAPPINGS:
# since pending is nearly always far smaller rowcount than current, if I pull this from a/c table, it is a HUGE waste of time and not faster than counting the raw table rows!
info = self.modules_mappings_storage.GetPendingMappingsCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_DELETED_MAPPINGS:
# since pending is nearly always far smaller rowcount than current, if I pull this from a/c table, it is a HUGE waste of time and not faster than counting the raw table rows!
info = self.modules_mappings_storage.GetDeletedMappingsCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS:
# since pending is nearly always far smaller rowcount than current, if I pull this from a/c table, it is a HUGE waste of time and not faster than counting the raw table rows!
info = self.modules_mappings_storage.GetPetitionedMappingsCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM tag_sibling_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PENDING ) ).fetchone()
elif info_type == HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM tag_sibling_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) ).fetchone()
elif info_type == HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM tag_parent_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PENDING ) ).fetchone()
elif info_type == HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM tag_parent_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) ).fetchone()
elif service_type in ( HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ):
if info_type == HC.SERVICE_INFO_NUM_FILES:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM local_ratings WHERE service_id = ?;', ( service_id, ) ).fetchone()
elif service_type == HC.LOCAL_BOORU:
if info_type == HC.SERVICE_INFO_NUM_SHARES:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM yaml_dumps WHERE dump_type = ?;', ( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU, ) ).fetchone()
if info is None:
info = 0
if save_it:
self._Execute( 'INSERT INTO service_info ( service_id, info_type, info ) VALUES ( ?, ?, ? );', ( service_id, info_type, info ) )
results[ info_type ] = info
return results
def _GetSiteId( self, name ):
result = self._Execute( 'SELECT site_id FROM imageboard_sites WHERE name = ?;', ( name, ) ).fetchone()
if result is None:
self._Execute( 'INSERT INTO imageboard_sites ( name ) VALUES ( ? );', ( name, ) )
site_id = self._GetLastRowId()
else:
( site_id, ) = result
return site_id
def _GetTagIdsFromNamespaceIds( self, leaf: ClientDBServices.FileSearchContextLeaf, namespace_ids: typing.Collection[ int ], job_key = None ):
if len( namespace_ids ) == 0:
return set()
final_result_tag_ids = set()
with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
tags_table_name = self.modules_tag_search.GetTagsTableName( leaf.file_service_id, leaf.tag_service_id )
if len( namespace_ids ) == 1:
( namespace_id, ) = namespace_ids
cursor = self._Execute( 'SELECT tag_id FROM {} WHERE namespace_id = ?;'.format( tags_table_name ), ( namespace_id, ) )
else:
# temp namespaces to tags
cursor = self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( namespace_id );'.format( temp_namespace_ids_table_name, tags_table_name ) )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
result_tag_ids = self._STS( HydrusDB.ReadFromCancellableCursor( cursor, 128, cancelled_hook = cancelled_hook ) )
if job_key is not None:
if job_key.IsCancelled():
return set()
final_result_tag_ids.update( result_tag_ids )
return final_result_tag_ids
def _GetTagIdsFromNamespaceIdsSubtagIds( self, file_service_id: int, tag_service_id: int, namespace_ids: typing.Collection[ int ], subtag_ids: typing.Collection[ int ], job_key = None ):
if len( namespace_ids ) == 0 or len( subtag_ids ) == 0:
return set()
with self._MakeTemporaryIntegerTable( subtag_ids, 'subtag_id' ) as temp_subtag_ids_table_name:
with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
return self._GetTagIdsFromNamespaceIdsSubtagIdsTables( file_service_id, tag_service_id, temp_namespace_ids_table_name, temp_subtag_ids_table_name, job_key = job_key )
def _GetTagIdsFromNamespaceIdsSubtagIdsTables( self, file_service_id: int, tag_service_id: int, namespace_ids_table_name: str, subtag_ids_table_name: str, job_key = None ):
final_result_tag_ids = set()
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( tag_service_id, )
for search_tag_service_id in search_tag_service_ids:
tags_table_name = self.modules_tag_search.GetTagsTableName( file_service_id, search_tag_service_id )
# temp subtags to tags to temp namespaces
cursor = self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( subtag_id ) CROSS JOIN {} USING ( namespace_id );'.format( subtag_ids_table_name, tags_table_name, namespace_ids_table_name ) )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
result_tag_ids = self._STS( HydrusDB.ReadFromCancellableCursor( cursor, 128, cancelled_hook = cancelled_hook ) )
if job_key is not None:
if job_key.IsCancelled():
return set()
final_result_tag_ids.update( result_tag_ids )
return final_result_tag_ids
def _GetTagIdsFromSubtagIds( self, file_service_id: int, tag_service_id: int, subtag_ids: typing.Collection[ int ], job_key = None ):
if len( subtag_ids ) == 0:
return set()
with self._MakeTemporaryIntegerTable( subtag_ids, 'subtag_id' ) as temp_subtag_ids_table_name:
return self._GetTagIdsFromSubtagIdsTable( file_service_id, tag_service_id, temp_subtag_ids_table_name, job_key = job_key )
def _GetTagIdsFromSubtagIdsTable( self, file_service_id: int, tag_service_id: int, subtag_ids_table_name: str, job_key = None ):
final_result_tag_ids = set()
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( tag_service_id, )
for search_tag_service_id in search_tag_service_ids:
tags_table_name = self.modules_tag_search.GetTagsTableName( file_service_id, search_tag_service_id )
# temp subtags to tags
cursor = self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( subtag_id );'.format( subtag_ids_table_name, tags_table_name ) )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
result_tag_ids = self._STS( HydrusDB.ReadFromCancellableCursor( cursor, 128, cancelled_hook = cancelled_hook ) )
if job_key is not None:
if job_key.IsCancelled():
return set()
final_result_tag_ids.update( result_tag_ids )
return final_result_tag_ids
def _GetTrashHashes( self, limit = None, minimum_age = None ):
if limit is None:
limit_phrase = ''
else:
limit_phrase = ' LIMIT ' + str( limit )
if minimum_age is None:
age_phrase = ' ORDER BY timestamp ASC' # when deleting until trash is small enough, let's delete oldest first
else:
timestamp_cutoff = HydrusData.GetNow() - minimum_age
age_phrase = ' WHERE timestamp < ' + str( timestamp_cutoff )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.trash_service_id, HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {}{}{};'.format( current_files_table_name, age_phrase, limit_phrase ) ) )
hash_ids = self._FilterForFileDeleteLock( self.modules_services.trash_service_id, hash_ids )
if HG.db_report_mode:
message = 'When asked for '
if limit is None:
message += 'all the'
else:
message += 'at most ' + HydrusData.ToHumanInt( limit )
message += ' trash files,'
if minimum_age is not None:
message += ' with minimum age ' + ClientData.TimestampToPrettyTimeDelta( timestamp_cutoff, just_now_threshold = 0 ) + ','
message += ' I found ' + HydrusData.ToHumanInt( len( hash_ids ) ) + '.'
HydrusData.ShowText( message )
return self.modules_hashes_local_cache.GetHashes( hash_ids )
def _GetURLStatuses( self, url ) -> typing.List[ ClientImportFiles.FileImportStatus ]:
search_urls = ClientNetworkingFunctions.GetSearchURLs( url )
hash_ids = set()
for search_url in search_urls:
results = self._STS( self._Execute( 'SELECT hash_id FROM url_map NATURAL JOIN urls WHERE url = ?;', ( search_url, ) ) )
hash_ids.update( results )
try:
results = [ self._GetHashIdStatus( hash_id, prefix = 'url recognised' ) for hash_id in hash_ids ]
except:
return []
return results
def _GetWithAndWithoutTagsForFilesFileCount( self, status, tag_service_id, with_these_tag_ids, without_these_tag_ids, hash_ids, hash_ids_table_name, file_service_ids_to_hash_ids ):
# ok, given this selection of files, how many of them on current/pending have any of these tags but not any these, real fast?
count = 0
with self._MakeTemporaryIntegerTable( with_these_tag_ids, 'tag_id' ) as temp_with_these_tag_ids_table_name:
with self._MakeTemporaryIntegerTable( without_these_tag_ids, 'tag_id' ) as temp_without_these_tag_ids_table_name:
for ( file_service_id, batch_of_hash_ids ) in file_service_ids_to_hash_ids.items():
if len( batch_of_hash_ids ) == len( hash_ids ):
subcount = self._GetWithAndWithoutTagsForFilesFileCountFileService( status, file_service_id, tag_service_id, with_these_tag_ids, temp_with_these_tag_ids_table_name, without_these_tag_ids, temp_without_these_tag_ids_table_name, hash_ids, hash_ids_table_name )
else:
with self._MakeTemporaryIntegerTable( batch_of_hash_ids, 'hash_id' ) as temp_batch_hash_ids_table_name:
subcount = self._GetWithAndWithoutTagsForFilesFileCountFileService( status, file_service_id, tag_service_id, with_these_tag_ids, temp_with_these_tag_ids_table_name, without_these_tag_ids, temp_without_these_tag_ids_table_name, batch_of_hash_ids, temp_batch_hash_ids_table_name )
count += subcount
return count
def _GetWithAndWithoutTagsForFilesFileCountFileService( self, status, file_service_id, tag_service_id, with_these_tag_ids, with_these_tag_ids_table_name, without_these_tag_ids, without_these_tag_ids_table_name, hash_ids, hash_ids_table_name ):
# ପୁରୁଣା ଲୋକଙ୍କ ଶକ୍ତି ଦ୍ୱାରା, ଏହି କ୍ରସ୍ କାର୍ଯ୍ୟରେ ଯୋଗ ଦିଅନ୍ତୁ |
# ok, given this selection of files, how many of them on current/pending have any of these tags but not any these, real fast?
statuses_to_table_names = self.modules_mappings_storage.GetFastestStorageMappingTableNames( file_service_id, tag_service_id )
( current_with_tag_ids, current_with_tag_ids_weight, pending_with_tag_ids, pending_with_tag_ids_weight ) = self.modules_mappings_counts.GetCurrentPendingPositiveCountsAndWeights( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, with_these_tag_ids, tag_ids_table_name = with_these_tag_ids_table_name )
( current_without_tag_ids, current_without_tag_ids_weight, pending_without_tag_ids, pending_without_tag_ids_weight ) = self.modules_mappings_counts.GetCurrentPendingPositiveCountsAndWeights( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, without_these_tag_ids, tag_ids_table_name = without_these_tag_ids_table_name )
mappings_table_name = statuses_to_table_names[ status ]
if status == HC.CONTENT_STATUS_CURRENT:
with_tag_ids = current_with_tag_ids
with_tag_ids_weight = current_with_tag_ids_weight
without_tag_ids = current_without_tag_ids
without_tag_ids_weight = current_without_tag_ids_weight
elif status == HC.CONTENT_STATUS_PENDING:
with_tag_ids = pending_with_tag_ids
with_tag_ids_weight = pending_with_tag_ids_weight
without_tag_ids = pending_without_tag_ids
without_tag_ids_weight = pending_without_tag_ids_weight
if with_tag_ids_weight == 0:
# nothing there, so nothing to do!
return 0
hash_ids_weight = len( hash_ids )
# in order to reduce overhead, we go full meme and do a bunch of different situations
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_with_tag_ids_table_name:
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_without_tag_ids_table_name:
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( hash_ids_weight, with_tag_ids_weight ):
select_with_weight = hash_ids_weight
else:
select_with_weight = with_tag_ids_weight
if len( with_tag_ids ) == 1:
( with_tag_id, ) = with_tag_ids
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( hash_ids_weight, with_tag_ids_weight ):
# temp files to mappings
select_with_hash_ids_on_storage = 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = {}'.format( hash_ids_table_name, mappings_table_name, with_tag_id )
else:
# mappings to temp files
select_with_hash_ids_on_storage = 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = {}'.format( mappings_table_name, hash_ids_table_name, with_tag_id )
else:
# distinct as with many tags hashes can appear twice (e.g. two siblings on the same file)
self._ExecuteMany( 'INSERT INTO {} ( tag_id ) VALUES ( ? );'.format( temp_with_tag_ids_table_name ), ( ( with_tag_id, ) for with_tag_id in with_tag_ids ) )
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( hash_ids_weight, with_tag_ids_weight ):
# temp files to mappings to temp tags
select_with_hash_ids_on_storage = 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) CROSS JOIN {} USING ( tag_id )'.format( hash_ids_table_name, mappings_table_name, temp_with_tag_ids_table_name )
else:
# temp tags to mappings to temp files
select_with_hash_ids_on_storage = 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( tag_id ) CROSS JOIN {} USING ( hash_id )'.format( temp_with_tag_ids_table_name, mappings_table_name, hash_ids_table_name )
if without_tag_ids_weight == 0:
table_phrase = '({})'.format( select_with_hash_ids_on_storage )
else:
# WARNING, WARNING: Big Brain Query, potentially great/awful
# note that in the 'clever/file join' situation, the number of total mappings is many, but we are dealing with a few files
# in that situation, we want to say 'for every file in this list, check if it exists'. this is the 'NOT EXISTS' thing
# when we have lots of files, tag lookups are generally faster, so easier just to search by that tag in one go and check each file against that subquery result. this is 'hash_id NOT IN'
if len( without_tag_ids ) == 1:
( without_tag_id, ) = without_tag_ids
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( select_with_weight, without_tag_ids_weight ):
# (files to) mappings
hash_id_not_in_storage_without = 'NOT EXISTS ( SELECT 1 FROM {} as mt2 WHERE mt1.hash_id = mt2.hash_id and tag_id = {} )'.format( mappings_table_name, without_tag_id )
else:
hash_id_not_in_storage_without = 'hash_id NOT IN ( SELECT hash_id FROM {} WHERE tag_id = {} )'.format( mappings_table_name, without_tag_id )
else:
self._ExecuteMany( 'INSERT INTO {} ( tag_id ) VALUES ( ? );'.format( temp_without_tag_ids_table_name ), ( ( without_tag_id, ) for without_tag_id in without_tag_ids ) )
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( select_with_weight, without_tag_ids_weight ):
# (files to) mappings to temp tags
hash_id_not_in_storage_without = 'NOT EXISTS ( SELECT 1 FROM {} as mt2 CROSS JOIN {} USING ( tag_id ) WHERE mt1.hash_id = mt2.hash_id )'.format( mappings_table_name, temp_without_tag_ids_table_name )
else:
# temp tags to mappings to temp files
hash_id_not_in_storage_without = 'hash_id NOT IN ( SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( tag_id ) )'.format( temp_without_tag_ids_table_name, mappings_table_name )
table_phrase = '({}) as mt1 WHERE {}'.format( select_with_hash_ids_on_storage, hash_id_not_in_storage_without )
query = 'SELECT COUNT ( * ) FROM {};'.format( table_phrase )
( count, ) = self._Execute( query ).fetchone()
return count
def _GetWithAndWithoutTagsFileCountCombined( self, tag_service_id, with_these_tag_ids, without_these_tag_ids ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
statuses_to_count = collections.Counter()
( current_with_tag_ids, current_with_tag_ids_weight, pending_with_tag_ids, pending_with_tag_ids_weight ) = self.modules_mappings_counts.GetCurrentPendingPositiveCountsAndWeights( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, with_these_tag_ids )
( current_without_tag_ids, current_without_tag_ids_weight, pending_without_tag_ids, pending_without_tag_ids_weight ) = self.modules_mappings_counts.GetCurrentPendingPositiveCountsAndWeights( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, without_these_tag_ids )
jobs = []
jobs.append( ( HC.CONTENT_STATUS_CURRENT, current_mappings_table_name, current_with_tag_ids, current_with_tag_ids_weight, current_without_tag_ids, current_without_tag_ids_weight ) )
jobs.append( ( HC.CONTENT_STATUS_PENDING, pending_mappings_table_name, pending_with_tag_ids, pending_with_tag_ids_weight, pending_without_tag_ids, pending_without_tag_ids_weight ) )
for ( status, mappings_table_name, with_tag_ids, with_tag_ids_weight, without_tag_ids, without_tag_ids_weight ) in jobs:
if with_tag_ids_weight == 0:
# nothing there, so nothing to do!
continue
if without_tag_ids_weight == 0 and len( with_tag_ids ) == 1:
statuses_to_count[ status ] = with_tag_ids_weight
continue
if len( with_tag_ids ) > 1:
# ok, when we are using with_tag_ids_weight as a 'this is how long the hash_ids list is' in later weight calculations, it does not account for overlap
# in real world data, bad siblings tend to have a count of anywhere from 8% to 600% of the ideal (30-50% is common), but the overlap is significant, often 98%
# so just to fudge this number a bit better, let's multiply it by 0.75
with_tag_ids_weight = int( with_tag_ids_weight * 0.75 )
# ultimately here, we are doing "delete all display mappings with hash_ids that have a storage mapping for a removee tag and no storage mappings for a keep tag
# in order to reduce overhead, we go full meme and do a bunch of different situations
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_with_tag_ids_table_name:
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_without_tag_ids_table_name:
if len( with_tag_ids ) == 1:
( with_tag_id, ) = with_tag_ids
select_with_hash_ids_on_storage = 'SELECT hash_id FROM {} WHERE tag_id = {}'.format( mappings_table_name, with_tag_id )
else:
self._ExecuteMany( 'INSERT INTO {} ( tag_id ) VALUES ( ? );'.format( temp_with_tag_ids_table_name ), ( ( with_tag_id, ) for with_tag_id in with_tag_ids ) )
# temp tags to mappings
select_with_hash_ids_on_storage = 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( tag_id )'.format( temp_with_tag_ids_table_name, mappings_table_name )
if without_tag_ids_weight == 0:
table_phrase = '({})'.format( select_with_hash_ids_on_storage )
else:
# WARNING, WARNING: Big Brain Query, potentially great/awful
# note that in the 'clever/file join' situation, the number of total mappings is many, but we are deleting a few
# we want to precisely scan the status of the potential hashes to delete, not scan through them all to see what not to do
# therefore, we do NOT EXISTS, which just scans the parts, rather than NOT IN, which does the whole query and then checks against all results
if len( without_tag_ids ) == 1:
( without_tag_id, ) = without_tag_ids
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( with_tag_ids_weight, without_tag_ids_weight ):
hash_id_not_in_storage_without = 'NOT EXISTS ( SELECT 1 FROM {} as mt2 WHERE mt1.hash_id = mt2.hash_id and tag_id = {} )'.format( mappings_table_name, without_tag_id )
else:
hash_id_not_in_storage_without = 'hash_id NOT IN ( SELECT hash_id FROM {} WHERE tag_id = {} )'.format( mappings_table_name, without_tag_id )
else:
self._ExecuteMany( 'INSERT INTO {} ( tag_id ) VALUES ( ? );'.format( temp_without_tag_ids_table_name ), ( ( without_tag_id, ) for without_tag_id in without_tag_ids ) )
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( with_tag_ids_weight, without_tag_ids_weight ):
# (files to) mappings to temp tags
hash_id_not_in_storage_without = 'NOT EXISTS ( SELECT 1 FROM {} as mt2 CROSS JOIN {} USING ( tag_id ) WHERE mt1.hash_id = mt2.hash_id )'.format( mappings_table_name, temp_without_tag_ids_table_name )
else:
# temp tags to mappings
hash_id_not_in_storage_without = 'hash_id NOT IN ( SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( tag_id ) )'.format( temp_without_tag_ids_table_name, mappings_table_name )
table_phrase = '({}) as mt1 WHERE {}'.format( select_with_hash_ids_on_storage, hash_id_not_in_storage_without )
query = 'SELECT COUNT ( * ) FROM {};'.format( table_phrase )
( count, ) = self._Execute( query ).fetchone()
statuses_to_count[ status ] = count
current_count = statuses_to_count[ HC.CONTENT_STATUS_CURRENT ]
pending_count = statuses_to_count[ HC.CONTENT_STATUS_PENDING ]
return ( current_count, pending_count )
def _GroupHashIdsByTagCachedFileServiceId( self, hash_ids, hash_ids_table_name, hash_ids_to_current_file_service_ids = None ):
# when we would love to do a fast cache lookup, it is useful to know if all the hash_ids are on one or two common file domains
if hash_ids_to_current_file_service_ids is None:
hash_ids_to_current_file_service_ids = self.modules_files_storage.GetHashIdsToCurrentServiceIds( hash_ids_table_name )
cached_file_service_ids = set( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
file_service_ids_to_hash_ids = collections.defaultdict( set )
for ( hash_id, file_service_ids ) in hash_ids_to_current_file_service_ids.items():
for file_service_id in file_service_ids:
if file_service_id in cached_file_service_ids:
file_service_ids_to_hash_ids[ file_service_id ].add( hash_id )
# ok, we have our map, let's sort it out
# sorting by most comprehensive service_id first
file_service_ids_to_value = sorted( ( ( file_service_id, len( hash_ids ) ) for ( file_service_id, hash_ids ) in file_service_ids_to_hash_ids.items() ), key = lambda p: p[1], reverse = True )
seen_hash_ids = set()
# make our mapping non-overlapping
for pair in file_service_ids_to_value:
file_service_id = pair[0]
this_services_hash_ids_set = file_service_ids_to_hash_ids[ file_service_id ]
if len( seen_hash_ids ) > 0:
this_services_hash_ids_set.difference_update( seen_hash_ids )
if len( this_services_hash_ids_set ) == 0:
del file_service_ids_to_hash_ids[ file_service_id ]
else:
seen_hash_ids.update( this_services_hash_ids_set )
unmapped_hash_ids = set( hash_ids ).difference( seen_hash_ids )
if len( unmapped_hash_ids ) > 0:
file_service_ids_to_hash_ids[ self.modules_services.combined_file_service_id ] = unmapped_hash_ids
return file_service_ids_to_hash_ids
def _ImportFile( self, file_import_job: ClientImportFiles.FileImportJob ):
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job starting db job' )
hash = file_import_job.GetHash()
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
file_import_status = self._GetHashIdStatus( hash_id, prefix = 'file recognised by database' )
if not file_import_status.AlreadyInDB():
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job adding new file' )
( size, mime, width, height, duration, num_frames, has_audio, num_words ) = file_import_job.GetFileInfo()
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job adding file info row' )
self.modules_files_metadata_basic.AddFilesInfo( [ ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) ], overwrite = True )
#
perceptual_hashes = file_import_job.GetPerceptualHashes()
if perceptual_hashes is not None:
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job associating perceptual_hashes' )
self.modules_similar_files.AssociatePerceptualHashes( hash_id, perceptual_hashes )
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job adding file to local file service' )
#
( md5, sha1, sha512 ) = file_import_job.GetExtraHashes()
self.modules_hashes.SetExtraHashes( hash_id, md5, sha1, sha512 )
#
self.modules_files_metadata_basic.SetHasICCProfile( hash_id, file_import_job.HasICCProfile() )
#
pixel_hash = file_import_job.GetPixelHash()
if pixel_hash is None:
self.modules_similar_files.ClearPixelHash( hash_id )
else:
pixel_hash_id = self.modules_hashes.GetHashId( pixel_hash )
self.modules_similar_files.SetPixelHash( hash_id, pixel_hash_id )
#
file_modified_timestamp = file_import_job.GetFileModifiedTimestamp()
self._Execute( 'REPLACE INTO file_modified_timestamps ( hash_id, file_modified_timestamp ) VALUES ( ?, ? );', ( hash_id, file_modified_timestamp ) )
#
file_import_options = file_import_job.GetFileImportOptions()
file_info_manager = ClientMediaManagers.FileInfoManager( hash_id, hash, size, mime, width, height, duration, num_frames, has_audio, num_words )
now = HydrusData.GetNow()
destination_location_context = file_import_options.GetDestinationLocationContext()
destination_location_context.FixMissingServices( ClientLocation.ValidLocalDomainsFilter )
if not destination_location_context.IncludesCurrent():
service_ids = self.modules_services.GetServiceIds( ( HC.LOCAL_FILE_DOMAIN, ) )
service_id = min( service_ids )
service_key = self.modules_services.GetService( service_id ).GetServiceKey()
destination_location_context = ClientLocation.LocationContext( current_service_keys = ( service_key, ) )
for destination_file_service_key in destination_location_context.current_service_keys:
destination_service_id = self.modules_services.GetServiceId( destination_file_service_key )
self._AddFiles( destination_service_id, [ ( hash_id, now ) ] )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ADD, ( file_info_manager, now ) )
self.pub_content_updates_after_commit( { destination_file_service_key : [ content_update ] } )
#
if file_import_options.AutomaticallyArchives():
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job archiving new file' )
self._ArchiveFiles( ( hash_id, ) )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ARCHIVE, ( hash, ) )
self.pub_content_updates_after_commit( { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] } )
else:
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job inboxing new file' )
self._InboxFiles( ( hash_id, ) )
#
if self._weakref_media_result_cache.HasFile( hash_id ):
self._weakref_media_result_cache.DropMediaResult( hash_id, hash )
self._controller.pub( 'new_file_info', { hash } )
#
file_import_status = ClientImportFiles.FileImportStatus( CC.STATUS_SUCCESSFUL_AND_NEW, hash, mime = mime )
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job done at db level, final status: {}'.format( file_import_status.ToString() ) )
return file_import_status
def _ImportUpdate( self, update_network_bytes, update_hash, mime ):
try:
HydrusSerialisable.CreateFromNetworkBytes( update_network_bytes )
except:
HydrusData.ShowText( 'Was unable to parse an incoming update!' )
raise
hash_id = self.modules_hashes_local_cache.GetHashId( update_hash )
size = len( update_network_bytes )
width = None
height = None
duration = None
num_frames = None
has_audio = None
num_words = None
client_files_manager = self._controller.client_files_manager
client_files_manager.LocklessAddFileFromBytes( update_hash, mime, update_network_bytes )
self.modules_files_metadata_basic.AddFilesInfo( [ ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) ], overwrite = True )
now = HydrusData.GetNow()
self._AddFiles( self.modules_services.local_update_service_id, [ ( hash_id, now ) ] )
def _InboxFiles( self, hash_ids ):
inboxed_hash_ids = self.modules_files_metadata_basic.InboxFiles( hash_ids )
if len( inboxed_hash_ids ) > 0:
service_ids_to_counts = self.modules_files_storage.GetServiceIdCounts( inboxed_hash_ids )
if len( service_ids_to_counts ) > 0:
self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', [ ( count, service_id, HC.SERVICE_INFO_NUM_INBOX ) for ( service_id, count ) in service_ids_to_counts.items() ] )
def _InitCaches( self ):
# this occurs after db update, so is safe to reference things in there but also cannot be relied upon in db update
HG.client_controller.frame_splash_status.SetText( 'preparing db caches' )
HG.client_controller.frame_splash_status.SetSubtext( 'inbox' )
def _InitExternalDatabases( self ):
self._db_filenames[ 'external_caches' ] = 'client.caches.db'
self._db_filenames[ 'external_mappings' ] = 'client.mappings.db'
self._db_filenames[ 'external_master' ] = 'client.master.db'
def _FilterInboxHashes( self, hashes: typing.Collection[ bytes ] ):
hash_ids_to_hashes = self.modules_hashes_local_cache.GetHashIdsToHashes( hashes = hashes )
inbox_hashes = { hash for ( hash_id, hash ) in hash_ids_to_hashes.items() if hash_id in self.modules_files_metadata_basic.inbox_hash_ids }
return inbox_hashes
def _IsAnOrphan( self, test_type, possible_hash ):
if self.modules_hashes.HasHash( possible_hash ):
hash = possible_hash
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
if test_type == 'file':
orphan_hash_ids = self.modules_files_storage.FilterOrphanFileHashIds( ( hash_id, ) )
return len( orphan_hash_ids ) == 1
elif test_type == 'thumbnail':
orphan_hash_ids = self.modules_files_storage.FilterOrphanThumbnailHashIds( ( hash_id, ) )
return len( orphan_hash_ids ) == 1
else:
return True
def _LoadModules( self ):
self.modules_db_maintenance = ClientDBMaintenance.ClientDBMaintenance( self._c, self._db_dir, self._db_filenames )
self._modules.append( self.modules_db_maintenance )
self.modules_services = ClientDBServices.ClientDBMasterServices( self._c )
self._modules.append( self.modules_services )
self.modules_hashes = ClientDBMaster.ClientDBMasterHashes( self._c )
self._modules.append( self.modules_hashes )
self.modules_tags = ClientDBMaster.ClientDBMasterTags( self._c )
self._modules.append( self.modules_tags )
self.modules_urls = ClientDBMaster.ClientDBMasterURLs( self._c )
self._modules.append( self.modules_urls )
self.modules_texts = ClientDBMaster.ClientDBMasterTexts( self._c )
self._modules.append( self.modules_texts )
self.modules_serialisable = ClientDBSerialisable.ClientDBSerialisable( self._c, self._db_dir, self._cursor_transaction_wrapper, self.modules_services )
self._modules.append( self.modules_serialisable )
#
self.modules_files_metadata_basic = ClientDBFilesMetadataBasic.ClientDBFilesMetadataBasic( self._c )
self._modules.append( self.modules_files_metadata_basic )
#
self.modules_files_storage = ClientDBFilesStorage.ClientDBFilesStorage( self._c, self._cursor_transaction_wrapper, self.modules_services, self.modules_hashes, self.modules_texts )
self._modules.append( self.modules_files_storage )
#
self.modules_mappings_counts = ClientDBMappingsCounts.ClientDBMappingsCounts( self._c, self.modules_services )
self._modules.append( self.modules_mappings_counts )
#
self.modules_tags_local_cache = ClientDBDefinitionsCache.ClientDBCacheLocalTags( self._c, self.modules_tags, self.modules_services, self.modules_mappings_counts )
self._modules.append( self.modules_tags_local_cache )
self.modules_hashes_local_cache = ClientDBDefinitionsCache.ClientDBCacheLocalHashes( self._c, self.modules_hashes, self.modules_services, self.modules_files_storage )
self._modules.append( self.modules_hashes_local_cache )
#
self.modules_mappings_storage = ClientDBMappingsStorage.ClientDBMappingsStorage( self._c, self.modules_services )
self._modules.append( self.modules_mappings_storage )
#
self.modules_tag_siblings = ClientDBTagSiblings.ClientDBTagSiblings( self._c, self.modules_services, self.modules_tags, self.modules_tags_local_cache )
self._modules.append( self.modules_tag_siblings )
self.modules_tag_parents = ClientDBTagParents.ClientDBTagParents( self._c, self.modules_services, self.modules_tags_local_cache, self.modules_tag_siblings )
self._modules.append( self.modules_tag_parents )
self.modules_tag_display = ClientDBTagDisplay.ClientDBTagDisplay( self._c, self._cursor_transaction_wrapper, self.modules_services, self.modules_tags, self.modules_tags_local_cache, self.modules_tag_siblings, self.modules_tag_parents )
self._modules.append( self.modules_tag_display )
# when you do the mappings caches, storage and display, consider carefully how you want them slotting in here
# don't rush into it
self.modules_tag_search = ClientDBTagSearch.ClientDBTagSearch( self._c, self.modules_services, self.modules_tags, self.modules_tag_display )
self._modules.append( self.modules_tag_search )
self.modules_mappings_counts_update = ClientDBMappingsCountsUpdate.ClientDBMappingsCountsUpdate( self._c, self.modules_services, self.modules_mappings_counts, self.modules_tags_local_cache, self.modules_tag_display, self.modules_tag_search )
self._modules.append( self.modules_mappings_counts_update )
#
self.modules_mappings_cache_specific_display = ClientDBMappingsCacheSpecificDisplay.ClientDBMappingsCacheSpecificDisplay( self._c, self.modules_services, self.modules_mappings_counts, self.modules_mappings_counts_update, self.modules_mappings_storage, self.modules_tag_display )
#
self.modules_similar_files = ClientDBSimilarFiles.ClientDBSimilarFiles( self._c, self.modules_services, self.modules_files_storage )
self._modules.append( self.modules_similar_files )
self.modules_files_duplicates = ClientDBFilesDuplicates.ClientDBFilesDuplicates( self._c, self.modules_files_storage, self.modules_hashes_local_cache, self.modules_similar_files )
self._modules.append( self.modules_files_duplicates )
#
self.modules_files_maintenance_queue = ClientDBFilesMaintenanceQueue.ClientDBFilesMaintenanceQueue( self._c, self.modules_hashes_local_cache )
self._modules.append( self.modules_files_maintenance_queue )
#
self.modules_repositories = ClientDBRepositories.ClientDBRepositories( self._c, self._cursor_transaction_wrapper, self.modules_services, self.modules_files_storage, self.modules_files_metadata_basic, self.modules_hashes_local_cache, self.modules_tags_local_cache, self.modules_files_maintenance_queue )
self._modules.append( self.modules_repositories )
#
self.modules_files_maintenance = ClientDBFilesMaintenance.ClientDBFilesMaintenance( self._c, self.modules_files_maintenance_queue, self.modules_hashes, self.modules_hashes_local_cache, self.modules_files_metadata_basic, self.modules_similar_files, self.modules_repositories, self._weakref_media_result_cache )
self._modules.append( self.modules_files_maintenance )
def _ManageDBError( self, job, e ):
if isinstance( e, MemoryError ):
HydrusData.ShowText( 'The client is running out of memory! Restart it ASAP!' )
tb = traceback.format_exc()
if 'malformed' in tb:
HydrusData.ShowText( 'A database exception looked like it could be a very serious \'database image is malformed\' error! Unless you know otherwise, please shut down the client immediately and check the \'help my db is broke.txt\' under install_dir/db.' )
if job.IsSynchronous():
db_traceback = 'Database ' + tb
first_line = str( type( e ).__name__ ) + ': ' + str( e )
new_e = HydrusExceptions.DBException( e, first_line, db_traceback )
job.PutResult( new_e )
else:
HydrusData.ShowException( e )
def _MigrationClearJob( self, database_temp_job_name ):
self._Execute( 'DROP TABLE {};'.format( database_temp_job_name ) )
def _MigrationGetMappings( self, database_temp_job_name, file_service_key, tag_service_key, hash_type, tag_filter, content_statuses ):
time_started_precise = HydrusData.GetNowPrecise()
data = []
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_service_key )
statuses_to_table_names = self.modules_mappings_storage.GetFastestStorageMappingTableNames( file_service_id, tag_service_id )
select_queries = []
for content_status in content_statuses:
table_name = statuses_to_table_names[ content_status ]
select_query = 'SELECT tag_id FROM {} WHERE hash_id = ?;'.format( table_name )
select_queries.append( select_query )
we_should_stop = False
while not we_should_stop:
result = self._Execute( 'SELECT hash_id FROM {};'.format( database_temp_job_name ) ).fetchone()
if result is None:
break
( hash_id, ) = result
self._Execute( 'DELETE FROM {} WHERE hash_id = ?;'.format( database_temp_job_name ), ( hash_id, ) )
if hash_type == 'sha256':
desired_hash = self.modules_hashes_local_cache.GetHash( hash_id )
else:
try:
desired_hash = self.modules_hashes.GetExtraHash( hash_type, hash_id )
except HydrusExceptions.DataMissing:
continue
tags = set()
for select_query in select_queries:
tag_ids = self._STL( self._Execute( select_query, ( hash_id, ) ) )
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = tag_ids )
tags.update( tag_ids_to_tags.values() )
if not tag_filter.AllowsEverything():
tags = tag_filter.Filter( tags )
if len( tags ) > 0:
data.append( ( desired_hash, tags ) )
we_should_stop = len( data ) >= 256 or ( len( data ) > 0 and HydrusData.TimeHasPassedPrecise( time_started_precise + 1.0 ) )
return data
def _MigrationGetPairs( self, database_temp_job_name, left_tag_filter, right_tag_filter ):
time_started_precise = HydrusData.GetNowPrecise()
data = []
we_should_stop = False
while not we_should_stop:
result = self._Execute( 'SELECT left_tag_id, right_tag_id FROM {};'.format( database_temp_job_name ) ).fetchone()
if result is None:
break
( left_tag_id, right_tag_id ) = result
self._Execute( 'DELETE FROM {} WHERE left_tag_id = ? AND right_tag_id = ?;'.format( database_temp_job_name ), ( left_tag_id, right_tag_id ) )
left_tag = self.modules_tags_local_cache.GetTag( left_tag_id )
if not left_tag_filter.TagOK( left_tag ):
continue
right_tag = self.modules_tags_local_cache.GetTag( right_tag_id )
if not right_tag_filter.TagOK( right_tag ):
continue
data.append( ( left_tag, right_tag ) )
we_should_stop = len( data ) >= 256 or ( len( data ) > 0 and HydrusData.TimeHasPassedPrecise( time_started_precise + 1.0 ) )
return data
def _MigrationStartMappingsJob( self, database_temp_job_name, file_service_key, tag_service_key, hashes, content_statuses ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
self._Execute( 'CREATE TABLE IF NOT EXISTS durable_temp.{} ( hash_id INTEGER PRIMARY KEY );'.format( database_temp_job_name ) )
if hashes is not None:
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._ExecuteMany( 'INSERT INTO {} ( hash_id ) VALUES ( ? );'.format( database_temp_job_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
else:
tag_service_id = self.modules_services.GetServiceId( tag_service_key )
statuses_to_table_names = {}
use_hashes_table = False
if file_service_id == self.modules_services.combined_file_service_id:
# if our tag service is the biggest, and if it basically accounts for all the hashes we know about, it is much faster to just use the hashes table
our_results = self._GetServiceInfo( tag_service_key )
our_num_files = our_results[ HC.SERVICE_INFO_NUM_FILES ]
other_services = [ service for service in self.modules_services.GetServices( HC.REAL_TAG_SERVICES ) if service.GetServiceKey() != tag_service_key ]
other_num_files = []
for other_service in other_services:
other_results = self._GetServiceInfo( other_service.GetServiceKey() )
other_num_files.append( other_results[ HC.SERVICE_INFO_NUM_FILES ] )
if len( other_num_files ) == 0:
we_are_big = True
else:
we_are_big = our_num_files >= 0.75 * max( other_num_files )
if we_are_big:
local_files_results = self._GetServiceInfo( CC.COMBINED_LOCAL_FILE_SERVICE_KEY )
local_files_num_files = local_files_results[ HC.SERVICE_INFO_NUM_FILES ]
if local_files_num_files > our_num_files:
# probably a small local tags service, ok to pull from current_mappings
we_are_big = False
if we_are_big:
use_hashes_table = True
if use_hashes_table:
# this obviously just pulls literally all known files
# makes migration take longer if the tag service does not cover many of these files, but saves huge startup time since it is a simple list
select_subqueries = [ 'SELECT hash_id FROM hashes' ]
else:
statuses_to_table_names = self.modules_mappings_storage.GetFastestStorageMappingTableNames( file_service_id, tag_service_id )
select_subqueries = []
for content_status in content_statuses:
table_name = statuses_to_table_names[ content_status ]
select_subquery = 'SELECT DISTINCT hash_id FROM {}'.format( table_name )
select_subqueries.append( select_subquery )
for select_subquery in select_subqueries:
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id ) {};'.format( database_temp_job_name, select_subquery ) )
def _MigrationStartPairsJob( self, database_temp_job_name, tag_service_key, content_type, content_statuses ):
self._Execute( 'CREATE TABLE IF NOT EXISTS durable_temp.{} ( left_tag_id INTEGER, right_tag_id INTEGER, PRIMARY KEY ( left_tag_id, right_tag_id ) );'.format( database_temp_job_name ) )
tag_service_id = self.modules_services.GetServiceId( tag_service_key )
if content_type == HC.CONTENT_TYPE_TAG_PARENTS:
source_table_names = [ 'tag_parents', 'tag_parent_petitions' ]
left_column_name = 'child_tag_id'
right_column_name = 'parent_tag_id'
elif content_type == HC.CONTENT_TYPE_TAG_SIBLINGS:
source_table_names = [ 'tag_siblings', 'tag_sibling_petitions' ]
left_column_name = 'bad_tag_id'
right_column_name = 'good_tag_id'
for source_table_name in source_table_names:
self._Execute( 'INSERT OR IGNORE INTO {} ( left_tag_id, right_tag_id ) SELECT {}, {} FROM {} WHERE service_id = ? AND status IN {};'.format( database_temp_job_name, left_column_name, right_column_name, source_table_name, HydrusData.SplayListForDB( content_statuses ) ), ( tag_service_id, ) )
def _PerceptualHashesResetSearchFromHashes( self, hashes ):
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self.modules_similar_files.ResetSearch( hash_ids )
def _PerceptualHashesSearchForPotentialDuplicates( self, search_distance, maintenance_mode = HC.MAINTENANCE_FORCED, job_key = None, stop_time = None, work_time_float = None ):
time_started_float = HydrusData.GetNowFloat()
num_done = 0
still_work_to_do = True
group_of_hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM shape_search_cache WHERE searched_distance IS NULL or searched_distance < ?;', ( search_distance, ) ).fetchmany( 10 ) )
while len( group_of_hash_ids ) > 0:
text = 'searching potential duplicates: {}'.format( HydrusData.ToHumanInt( num_done ) )
HG.client_controller.frame_splash_status.SetSubtext( text )
for ( i, hash_id ) in enumerate( group_of_hash_ids ):
if work_time_float is not None and HydrusData.TimeHasPassedFloat( time_started_float + work_time_float ):
return ( still_work_to_do, num_done )
if job_key is not None:
( i_paused, should_stop ) = job_key.WaitIfNeeded()
if should_stop:
return ( still_work_to_do, num_done )
should_stop = HG.client_controller.ShouldStopThisWork( maintenance_mode, stop_time = stop_time )
if should_stop:
return ( still_work_to_do, num_done )
media_id = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id )
potential_duplicate_media_ids_and_distances = [ ( self.modules_files_duplicates.DuplicatesGetMediaId( duplicate_hash_id ), distance ) for ( duplicate_hash_id, distance ) in self.modules_similar_files.Search( hash_id, search_distance ) if duplicate_hash_id != hash_id ]
self.modules_files_duplicates.DuplicatesAddPotentialDuplicates( media_id, potential_duplicate_media_ids_and_distances )
self._Execute( 'UPDATE shape_search_cache SET searched_distance = ? WHERE hash_id = ?;', ( search_distance, hash_id ) )
num_done += 1
group_of_hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM shape_search_cache WHERE searched_distance IS NULL or searched_distance < ?;', ( search_distance, ) ).fetchmany( 10 ) )
still_work_to_do = False
return ( still_work_to_do, num_done )
def _ProcessContentUpdates( self, service_keys_to_content_updates, publish_content_updates = True ):
notify_new_downloads = False
notify_new_pending = False
notify_new_parents = False
notify_new_siblings = False
valid_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
try:
service_id = self.modules_services.GetServiceId( service_key )
except HydrusExceptions.DataMissing:
continue
valid_service_keys_to_content_updates[ service_key ] = content_updates
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
ultimate_mappings_ids = []
ultimate_deleted_mappings_ids = []
ultimate_pending_mappings_ids = []
ultimate_pending_rescinded_mappings_ids = []
ultimate_petitioned_mappings_ids = []
ultimate_petitioned_rescinded_mappings_ids = []
changed_sibling_tag_ids = set()
changed_parent_tag_ids = set()
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
if service_type in HC.FILE_SERVICES:
if data_type == HC.CONTENT_TYPE_FILES:
if action == HC.CONTENT_UPDATE_ADVANCED:
( sub_action, sub_row ) = row
if sub_action == 'delete_deleted':
hashes = sub_row
if hashes is None:
service_ids_to_nums_cleared = self.modules_files_storage.ClearLocalDeleteRecord()
else:
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
service_ids_to_nums_cleared = self.modules_files_storage.ClearLocalDeleteRecord( hash_ids )
self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', ( ( -num_cleared, clear_service_id, HC.SERVICE_INFO_NUM_DELETED_FILES ) for ( clear_service_id, num_cleared ) in service_ids_to_nums_cleared.items() ) )
elif action == HC.CONTENT_UPDATE_ADD:
if service_type in HC.LOCAL_FILE_SERVICES or service_type == HC.FILE_REPOSITORY:
( file_info_manager, timestamp ) = row
( hash_id, hash, size, mime, width, height, duration, num_frames, has_audio, num_words ) = file_info_manager.ToTuple()
self.modules_files_metadata_basic.AddFilesInfo( [ ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) ] )
elif service_type == HC.IPFS:
( file_info_manager, multihash ) = row
hash_id = file_info_manager.hash_id
self._SetServiceFilename( service_id, hash_id, multihash )
timestamp = HydrusData.GetNow()
self._AddFiles( service_id, [ ( hash_id, timestamp ) ] )
else:
hashes = row
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
if action == HC.CONTENT_UPDATE_ARCHIVE:
self._ArchiveFiles( hash_ids )
elif action == HC.CONTENT_UPDATE_INBOX:
self._InboxFiles( hash_ids )
elif action == HC.CONTENT_UPDATE_DELETE:
actual_delete_hash_ids = self._FilterForFileDeleteLock( service_id, hash_ids )
if len( actual_delete_hash_ids ) < len( hash_ids ):
hash_ids = actual_delete_hash_ids
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
content_update.SetRow( hashes )
if service_type in ( HC.LOCAL_FILE_DOMAIN, HC.COMBINED_LOCAL_FILE ):
if content_update.HasReason():
reason = content_update.GetReason()
# at the moment, we only set a deletion reason when a file leaves a real file domain. not on second delete from trash, so if file in trash, no new delete reason will be set
location_context = ClientLocation.LocationContext( current_service_keys = ( service_key, ) )
reason_setting_hash_ids = self.modules_files_storage.FilterHashIds( location_context, hash_ids )
self.modules_files_storage.SetFileDeletionReason( reason_setting_hash_ids, reason )
if service_id == self.modules_services.trash_service_id:
# shouldn't be called anymore, but just in case someone fidgets a trash delete with client api or something
self._DeleteFiles( self.modules_services.combined_local_file_service_id, hash_ids )
else:
self._DeleteFiles( service_id, hash_ids )
elif action == HC.CONTENT_UPDATE_UNDELETE:
self._UndeleteFiles( service_id, hash_ids )
elif action == HC.CONTENT_UPDATE_PEND:
invalid_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, hash_ids, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = hash_ids.difference( invalid_hash_ids )
self.modules_files_storage.PendFiles( service_id, valid_hash_ids )
if service_key == CC.COMBINED_LOCAL_FILE_SERVICE_KEY:
notify_new_downloads = True
else:
notify_new_pending = True
elif action == HC.CONTENT_UPDATE_PETITION:
reason = content_update.GetReason()
reason_id = self.modules_texts.GetTextId( reason )
valid_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, hash_ids, HC.CONTENT_STATUS_CURRENT )
self.modules_files_storage.PetitionFiles( service_id, reason_id, valid_hash_ids )
notify_new_pending = True
elif action == HC.CONTENT_UPDATE_RESCIND_PEND:
self.modules_files_storage.RescindPendFiles( service_id, hash_ids )
if service_key == CC.COMBINED_LOCAL_FILE_SERVICE_KEY:
notify_new_downloads = True
else:
notify_new_pending = True
elif action == HC.CONTENT_UPDATE_RESCIND_PETITION:
self.modules_files_storage.RescindPetitionFiles( service_id, hash_ids )
notify_new_pending = True
elif data_type == HC.CONTENT_TYPE_DIRECTORIES:
if action == HC.CONTENT_UPDATE_ADD:
( hashes, dirname, note ) = row
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._SetServiceDirectory( service_id, hash_ids, dirname, note )
elif action == HC.CONTENT_UPDATE_DELETE:
dirname = row
self._DeleteServiceDirectory( service_id, dirname )
elif data_type == HC.CONTENT_TYPE_URLS:
if action == HC.CONTENT_UPDATE_ADD:
( urls, hashes ) = row
url_ids = { self.modules_urls.GetURLId( url ) for url in urls }
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._ExecuteMany( 'INSERT OR IGNORE INTO url_map ( hash_id, url_id ) VALUES ( ?, ? );', itertools.product( hash_ids, url_ids ) )
elif action == HC.CONTENT_UPDATE_DELETE:
( urls, hashes ) = row
url_ids = { self.modules_urls.GetURLId( url ) for url in urls }
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._ExecuteMany( 'DELETE FROM url_map WHERE hash_id = ? AND url_id = ?;', itertools.product( hash_ids, url_ids ) )
elif data_type == HC.CONTENT_TYPE_TIMESTAMP:
( timestamp_type, hash, data ) = row
if timestamp_type == 'domain':
if action == HC.CONTENT_UPDATE_ADD:
( domain, timestamp ) = data
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
domain_id = self.modules_urls.GetURLDomainId( domain )
self.modules_files_metadata_basic.UpdateDomainModifiedTimestamp( hash_id, domain_id, timestamp )
elif action == HC.CONTENT_UPDATE_SET:
( domain, timestamp ) = data
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
domain_id = self.modules_urls.GetURLDomainId( domain )
self.modules_files_metadata_basic.SetDomainModifiedTimestamp( hash_id, domain_id, timestamp )
elif action == HC.CONTENT_UPDATE_DELETE:
domain = data
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
domain_id = self.modules_urls.GetURLDomainId( domain )
self.modules_files_metadata_basic.ClearDomainModifiedTimestamp( hash_id, domain_id )
elif data_type == HC.CONTENT_TYPE_FILE_VIEWING_STATS:
if action == HC.CONTENT_UPDATE_ADVANCED:
action = row
if action == 'clear':
self._Execute( 'DELETE FROM file_viewing_stats;' )
elif action == HC.CONTENT_UPDATE_ADD:
( hash, canvas_type, view_timestamp, views_delta, viewtime_delta ) = row
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
self._Execute( 'INSERT OR IGNORE INTO file_viewing_stats ( hash_id, canvas_type, last_viewed_timestamp, views, viewtime ) VALUES ( ?, ?, ?, ?, ? );', ( hash_id, canvas_type, 0, 0, 0 ) )
self._Execute( 'UPDATE file_viewing_stats SET last_viewed_timestamp = ?, views = views + ?, viewtime = viewtime + ? WHERE hash_id = ? AND canvas_type = ?;', ( view_timestamp, views_delta, viewtime_delta, hash_id, canvas_type ) )
elif action == HC.CONTENT_UPDATE_DELETE:
hashes = row
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._ExecuteMany( 'DELETE FROM file_viewing_stats WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
elif service_type in HC.REAL_TAG_SERVICES:
if data_type == HC.CONTENT_TYPE_MAPPINGS:
( tag, hashes ) = row
try:
tag_id = self.modules_tags.GetTagId( tag )
except HydrusExceptions.TagSizeException:
continue
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
display_affected = action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_RESCIND_PEND )
if display_affected and publish_content_updates and self.modules_tag_display.IsChained( ClientTags.TAG_DISPLAY_ACTUAL, service_id, tag_id ):
self._regen_tags_managers_hash_ids.update( hash_ids )
if action == HC.CONTENT_UPDATE_ADD:
if not HG.client_controller.tag_display_manager.TagOK( ClientTags.TAG_DISPLAY_STORAGE, service_key, tag ):
continue
ultimate_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_DELETE:
ultimate_deleted_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_PEND:
if not HG.client_controller.tag_display_manager.TagOK( ClientTags.TAG_DISPLAY_STORAGE, service_key, tag ):
continue
ultimate_pending_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_RESCIND_PEND:
ultimate_pending_rescinded_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_PETITION:
reason = content_update.GetReason()
reason_id = self.modules_texts.GetTextId( reason )
ultimate_petitioned_mappings_ids.append( ( tag_id, hash_ids, reason_id ) )
elif action == HC.CONTENT_UPDATE_RESCIND_PETITION:
ultimate_petitioned_rescinded_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_CLEAR_DELETE_RECORD:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( service_id )
self._ExecuteMany( 'DELETE FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( deleted_mappings_table_name ), ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
self._Execute( 'DELETE FROM service_info WHERE service_id = ? AND info_type = ?;', ( service_id, HC.SERVICE_INFO_NUM_DELETED_MAPPINGS ) )
cache_file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for cache_file_service_id in cache_file_service_ids:
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( cache_file_service_id, service_id )
self._ExecuteMany( 'DELETE FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in hash_ids ) )
elif data_type == HC.CONTENT_TYPE_TAG_PARENTS:
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE ):
( child_tag, parent_tag ) = row
try:
child_tag_id = self.modules_tags.GetTagId( child_tag )
parent_tag_id = self.modules_tags.GetTagId( parent_tag )
except HydrusExceptions.TagSizeException:
continue
pairs = ( ( child_tag_id, parent_tag_id ), )
if action == HC.CONTENT_UPDATE_ADD:
self.modules_tag_parents.AddTagParents( service_id, pairs )
elif action == HC.CONTENT_UPDATE_DELETE:
self.modules_tag_parents.DeleteTagParents( service_id, pairs )
changed_parent_tag_ids.update( ( child_tag_id, parent_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
elif action in ( HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_PETITION ):
( child_tag, parent_tag ) = row
try:
child_tag_id = self.modules_tags.GetTagId( child_tag )
parent_tag_id = self.modules_tags.GetTagId( parent_tag )
except HydrusExceptions.TagSizeException:
continue
reason = content_update.GetReason()
reason_id = self.modules_texts.GetTextId( reason )
triples = ( ( child_tag_id, parent_tag_id, reason_id ), )
if action == HC.CONTENT_UPDATE_PEND:
self.modules_tag_parents.PendTagParents( service_id, triples )
elif action == HC.CONTENT_UPDATE_PETITION:
self.modules_tag_parents.PetitionTagParents( service_id, triples )
changed_parent_tag_ids.update( ( child_tag_id, parent_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
elif action in ( HC.CONTENT_UPDATE_RESCIND_PEND, HC.CONTENT_UPDATE_RESCIND_PETITION ):
( child_tag, parent_tag ) = row
try:
child_tag_id = self.modules_tags.GetTagId( child_tag )
parent_tag_id = self.modules_tags.GetTagId( parent_tag )
except HydrusExceptions.TagSizeException:
continue
pairs = ( ( child_tag_id, parent_tag_id ), )
if action == HC.CONTENT_UPDATE_RESCIND_PEND:
self.modules_tag_parents.RescindPendingTagParents( service_id, pairs )
elif action == HC.CONTENT_UPDATE_RESCIND_PETITION:
self.modules_tag_parents.RescindPetitionedTagParents( service_id, pairs )
changed_parent_tag_ids.update( ( child_tag_id, parent_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
notify_new_parents = True
elif data_type == HC.CONTENT_TYPE_TAG_SIBLINGS:
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE ):
( bad_tag, good_tag ) = row
try:
bad_tag_id = self.modules_tags.GetTagId( bad_tag )
good_tag_id = self.modules_tags.GetTagId( good_tag )
except HydrusExceptions.TagSizeException:
continue
pairs = ( ( bad_tag_id, good_tag_id ), )
if action == HC.CONTENT_UPDATE_ADD:
self.modules_tag_siblings.AddTagSiblings( service_id, pairs )
elif action == HC.CONTENT_UPDATE_DELETE:
self.modules_tag_siblings.DeleteTagSiblings( service_id, pairs )
changed_sibling_tag_ids.update( ( bad_tag_id, good_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
elif action in ( HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_PETITION ):
( bad_tag, good_tag ) = row
try:
bad_tag_id = self.modules_tags.GetTagId( bad_tag )
good_tag_id = self.modules_tags.GetTagId( good_tag )
except HydrusExceptions.TagSizeException:
continue
reason = content_update.GetReason()
reason_id = self.modules_texts.GetTextId( reason )
triples = ( ( bad_tag_id, good_tag_id, reason_id ), )
if action == HC.CONTENT_UPDATE_PEND:
self.modules_tag_siblings.PendTagSiblings( service_id, triples )
elif action == HC.CONTENT_UPDATE_PETITION:
self.modules_tag_siblings.PetitionTagSiblings( service_id, triples )
changed_sibling_tag_ids.update( ( bad_tag_id, good_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
elif action in ( HC.CONTENT_UPDATE_RESCIND_PEND, HC.CONTENT_UPDATE_RESCIND_PETITION ):
( bad_tag, good_tag ) = row
try:
bad_tag_id = self.modules_tags.GetTagId( bad_tag )
good_tag_id = self.modules_tags.GetTagId( good_tag )
except HydrusExceptions.TagSizeException:
continue
pairs = ( ( bad_tag_id, good_tag_id ), )
if action == HC.CONTENT_UPDATE_RESCIND_PEND:
self.modules_tag_siblings.RescindPendingTagSiblings( service_id, pairs )
elif action == HC.CONTENT_UPDATE_RESCIND_PETITION:
self.modules_tag_siblings.RescindPetitionedTagSiblings( service_id, pairs )
changed_sibling_tag_ids.update( ( bad_tag_id, good_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
notify_new_siblings = True
elif service_type in HC.RATINGS_SERVICES:
if action == HC.CONTENT_UPDATE_ADD:
( rating, hashes ) = row
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
splayed_hash_ids = HydrusData.SplayListForDB( hash_ids )
if service_type in ( HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ):
ratings_added = 0
self._ExecuteMany( 'DELETE FROM local_ratings WHERE service_id = ? AND hash_id = ?;', ( ( service_id, hash_id ) for hash_id in hash_ids ) )
ratings_added -= self._GetRowCount()
if rating is not None:
self._ExecuteMany( 'INSERT INTO local_ratings ( service_id, hash_id, rating ) VALUES ( ?, ?, ? );', [ ( service_id, hash_id, rating ) for hash_id in hash_ids ] )
ratings_added += self._GetRowCount()
self._Execute( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', ( ratings_added, service_id, HC.SERVICE_INFO_NUM_FILES ) )
elif action == HC.CONTENT_UPDATE_ADVANCED:
action = row
if action == 'delete_for_deleted_files':
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_DELETED )
self._Execute( 'DELETE FROM local_ratings WHERE service_id = ? and hash_id IN ( SELECT hash_id FROM {} );'.format( deleted_files_table_name ), ( service_id, ) )
ratings_deleted = self._GetRowCount()
self._Execute( 'UPDATE service_info SET info = info - ? WHERE service_id = ? AND info_type = ?;', ( ratings_deleted, service_id, HC.SERVICE_INFO_NUM_FILES ) )
elif action == 'delete_for_non_local_files':
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
self._Execute( 'DELETE FROM local_ratings WHERE local_ratings.service_id = ? and hash_id NOT IN ( SELECT hash_id FROM {} );'.format( current_files_table_name ), ( service_id, ) )
ratings_deleted = self._GetRowCount()
self._Execute( 'UPDATE service_info SET info = info - ? WHERE service_id = ? AND info_type = ?;', ( ratings_deleted, service_id, HC.SERVICE_INFO_NUM_FILES ) )
elif action == 'delete_for_all_files':
self._Execute( 'DELETE FROM local_ratings WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'UPDATE service_info SET info = ? WHERE service_id = ? AND info_type = ?;', ( 0, service_id, HC.SERVICE_INFO_NUM_FILES ) )
elif service_type == HC.LOCAL_NOTES:
if action == HC.CONTENT_UPDATE_SET:
( hash, name, note ) = row
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
name_id = self.modules_texts.GetLabelId( name )
self._Execute( 'DELETE FROM file_notes WHERE hash_id = ? AND name_id = ?;', ( hash_id, name_id ) )
if len( note ) > 0:
note_id = self.modules_texts.GetNoteId( note )
self._Execute( 'INSERT OR IGNORE INTO file_notes ( hash_id, name_id, note_id ) VALUES ( ?, ?, ? );', ( hash_id, name_id, note_id ) )
elif action == HC.CONTENT_UPDATE_DELETE:
( hash, name ) = row
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
name_id = self.modules_texts.GetLabelId( name )
self._Execute( 'DELETE FROM file_notes WHERE hash_id = ? AND name_id = ?;', ( hash_id, name_id ) )
if len( ultimate_mappings_ids ) + len( ultimate_deleted_mappings_ids ) + len( ultimate_pending_mappings_ids ) + len( ultimate_pending_rescinded_mappings_ids ) + len( ultimate_petitioned_mappings_ids ) + len( ultimate_petitioned_rescinded_mappings_ids ) > 0:
self._UpdateMappings( service_id, mappings_ids = ultimate_mappings_ids, deleted_mappings_ids = ultimate_deleted_mappings_ids, pending_mappings_ids = ultimate_pending_mappings_ids, pending_rescinded_mappings_ids = ultimate_pending_rescinded_mappings_ids, petitioned_mappings_ids = ultimate_petitioned_mappings_ids, petitioned_rescinded_mappings_ids = ultimate_petitioned_rescinded_mappings_ids )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
if len( changed_sibling_tag_ids ) > 0:
self.modules_tag_display.NotifySiblingsChanged( service_id, changed_sibling_tag_ids )
if len( changed_parent_tag_ids ) > 0:
self.modules_tag_display.NotifyParentsChanged( service_id, changed_parent_tag_ids )
if publish_content_updates:
if notify_new_pending:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
if notify_new_downloads:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_downloads' )
if notify_new_siblings or notify_new_parents:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self.pub_content_updates_after_commit( valid_service_keys_to_content_updates )
def _ProcessRepositoryContent( self, service_key, content_hash, content_iterator_dict, content_types_to_process, job_key, work_time ):
FILES_INITIAL_CHUNK_SIZE = 20
MAPPINGS_INITIAL_CHUNK_SIZE = 50
PAIR_ROWS_INITIAL_CHUNK_SIZE = 100
service_id = self.modules_services.GetServiceId( service_key )
precise_time_to_stop = HydrusData.GetNowPrecise() + work_time
num_rows_processed = 0
if HC.CONTENT_TYPE_FILES in content_types_to_process:
if 'new_files' in content_iterator_dict:
has_audio = None # hack until we figure this out better
i = content_iterator_dict[ 'new_files' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, FILES_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
files_info_rows = []
files_rows = []
for ( service_hash_id, size, mime, timestamp, width, height, duration, num_frames, num_words ) in chunk:
hash_id = self.modules_repositories.NormaliseServiceHashId( service_id, service_hash_id )
files_info_rows.append( ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) )
files_rows.append( ( hash_id, timestamp ) )
self.modules_files_metadata_basic.AddFilesInfo( files_info_rows )
self._AddFiles( service_id, files_rows )
num_rows_processed += len( files_rows )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'new_files' ]
#
if 'deleted_files' in content_iterator_dict:
i = content_iterator_dict[ 'deleted_files' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, FILES_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
service_hash_ids = chunk
hash_ids = self.modules_repositories.NormaliseServiceHashIds( service_id, service_hash_ids )
self._DeleteFiles( service_id, hash_ids )
num_rows_processed += len( hash_ids )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'deleted_files' ]
#
if HC.CONTENT_TYPE_MAPPINGS in content_types_to_process:
if 'new_mappings' in content_iterator_dict:
i = content_iterator_dict[ 'new_mappings' ]
for chunk in HydrusData.SplitMappingIteratorIntoAutothrottledChunks( i, MAPPINGS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
mappings_ids = []
num_rows = 0
# yo, I can save time if I merge these ids so we only have one round of normalisation
for ( service_tag_id, service_hash_ids ) in chunk:
tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_tag_id )
hash_ids = self.modules_repositories.NormaliseServiceHashIds( service_id, service_hash_ids )
mappings_ids.append( ( tag_id, hash_ids ) )
num_rows += len( service_hash_ids )
self._UpdateMappings( service_id, mappings_ids = mappings_ids )
num_rows_processed += num_rows
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'new_mappings' ]
#
if 'deleted_mappings' in content_iterator_dict:
i = content_iterator_dict[ 'deleted_mappings' ]
for chunk in HydrusData.SplitMappingIteratorIntoAutothrottledChunks( i, MAPPINGS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
deleted_mappings_ids = []
num_rows = 0
for ( service_tag_id, service_hash_ids ) in chunk:
tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_tag_id )
hash_ids = self.modules_repositories.NormaliseServiceHashIds( service_id, service_hash_ids )
deleted_mappings_ids.append( ( tag_id, hash_ids ) )
num_rows += len( service_hash_ids )
self._UpdateMappings( service_id, deleted_mappings_ids = deleted_mappings_ids )
num_rows_processed += num_rows
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'deleted_mappings' ]
#
parents_or_siblings_changed = False
try:
if HC.CONTENT_TYPE_TAG_PARENTS in content_types_to_process:
if 'new_parents' in content_iterator_dict:
i = content_iterator_dict[ 'new_parents' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, PAIR_ROWS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
parent_ids = []
tag_ids = set()
for ( service_child_tag_id, service_parent_tag_id ) in chunk:
child_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_child_tag_id )
parent_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_parent_tag_id )
tag_ids.add( child_tag_id )
tag_ids.add( parent_tag_id )
parent_ids.append( ( child_tag_id, parent_tag_id ) )
self.modules_tag_parents.AddTagParents( service_id, parent_ids )
self.modules_tag_display.NotifyParentsChanged( service_id, tag_ids )
parents_or_siblings_changed = True
num_rows_processed += len( parent_ids )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'new_parents' ]
#
if 'deleted_parents' in content_iterator_dict:
i = content_iterator_dict[ 'deleted_parents' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, PAIR_ROWS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
parent_ids = []
tag_ids = set()
for ( service_child_tag_id, service_parent_tag_id ) in chunk:
child_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_child_tag_id )
parent_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_parent_tag_id )
tag_ids.add( child_tag_id )
tag_ids.add( parent_tag_id )
parent_ids.append( ( child_tag_id, parent_tag_id ) )
self.modules_tag_parents.DeleteTagParents( service_id, parent_ids )
self.modules_tag_display.NotifyParentsChanged( service_id, tag_ids )
parents_or_siblings_changed = True
num_rows = len( parent_ids )
num_rows_processed += num_rows
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'deleted_parents' ]
#
if HC.CONTENT_TYPE_TAG_SIBLINGS in content_types_to_process:
if 'new_siblings' in content_iterator_dict:
i = content_iterator_dict[ 'new_siblings' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, PAIR_ROWS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
sibling_ids = []
tag_ids = set()
for ( service_bad_tag_id, service_good_tag_id ) in chunk:
bad_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_bad_tag_id )
good_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_good_tag_id )
tag_ids.add( bad_tag_id )
tag_ids.add( good_tag_id )
sibling_ids.append( ( bad_tag_id, good_tag_id ) )
self.modules_tag_siblings.AddTagSiblings( service_id, sibling_ids )
self.modules_tag_display.NotifySiblingsChanged( service_id, tag_ids )
parents_or_siblings_changed = True
num_rows = len( sibling_ids )
num_rows_processed += num_rows
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'new_siblings' ]
#
if 'deleted_siblings' in content_iterator_dict:
i = content_iterator_dict[ 'deleted_siblings' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, PAIR_ROWS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
sibling_ids = []
tag_ids = set()
for ( service_bad_tag_id, service_good_tag_id ) in chunk:
bad_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_bad_tag_id )
good_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_good_tag_id )
tag_ids.add( bad_tag_id )
tag_ids.add( good_tag_id )
sibling_ids.append( ( bad_tag_id, good_tag_id ) )
self.modules_tag_siblings.DeleteTagSiblings( service_id, sibling_ids )
self.modules_tag_display.NotifySiblingsChanged( service_id, tag_ids )
parents_or_siblings_changed = True
num_rows_processed += len( sibling_ids )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'deleted_siblings' ]
finally:
if parents_or_siblings_changed:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self.modules_repositories.SetUpdateProcessed( service_id, content_hash, content_types_to_process )
return num_rows_processed
def _PushRecentTags( self, service_key, tags ):
service_id = self.modules_services.GetServiceId( service_key )
if tags is None:
self._Execute( 'DELETE FROM recent_tags WHERE service_id = ?;', ( service_id, ) )
else:
now = HydrusData.GetNow()
tag_ids = [ self.modules_tags.GetTagId( tag ) for tag in tags ]
self._ExecuteMany( 'REPLACE INTO recent_tags ( service_id, tag_id, timestamp ) VALUES ( ?, ?, ? );', ( ( service_id, tag_id, now ) for tag_id in tag_ids ) )
def _Read( self, action, *args, **kwargs ):
if action == 'autocomplete_predicates': result = self._GetAutocompletePredicates( *args, **kwargs )
elif action == 'boned_stats': result = self._GetBonedStats( *args, **kwargs )
elif action == 'client_files_locations': result = self._GetClientFilesLocations( *args, **kwargs )
elif action == 'deferred_physical_delete': result = self.modules_files_storage.GetDeferredPhysicalDelete( *args, **kwargs )
elif action == 'duplicate_pairs_for_filtering': result = self._DuplicatesGetPotentialDuplicatePairsForFiltering( *args, **kwargs )
elif action == 'file_duplicate_hashes': result = self.modules_files_duplicates.DuplicatesGetFileHashesByDuplicateType( *args, **kwargs )
elif action == 'file_duplicate_info': result = self.modules_files_duplicates.DuplicatesGetFileDuplicateInfo( *args, **kwargs )
elif action == 'file_hashes': result = self.modules_hashes.GetFileHashes( *args, **kwargs )
elif action == 'file_history': result = self._GetFileHistory( *args, **kwargs )
elif action == 'file_maintenance_get_job': result = self.modules_files_maintenance_queue.GetJob( *args, **kwargs )
elif action == 'file_maintenance_get_job_counts': result = self.modules_files_maintenance_queue.GetJobCounts( *args, **kwargs )
elif action == 'file_query_ids': result = self._GetHashIdsFromQuery( *args, **kwargs )
elif action == 'file_system_predicates': result = self._GetFileSystemPredicates( *args, **kwargs )
elif action == 'filter_existing_tags': result = self._FilterExistingTags( *args, **kwargs )
elif action == 'filter_hashes': result = self._FilterHashesByService( *args, **kwargs )
elif action == 'force_refresh_tags_managers': result = self._GetForceRefreshTagsManagers( *args, **kwargs )
elif action == 'gui_session': result = self.modules_serialisable.GetGUISession( *args, **kwargs )
elif action == 'hash_ids_to_hashes': result = self.modules_hashes_local_cache.GetHashIdsToHashes( *args, **kwargs )
elif action == 'hash_status': result = self._GetHashStatus( *args, **kwargs )
elif action == 'have_hashed_serialised_objects': result = self.modules_serialisable.HaveHashedJSONDumps( *args, **kwargs )
elif action == 'ideal_client_files_locations': result = self._GetIdealClientFilesLocations( *args, **kwargs )
elif action == 'imageboards': result = self.modules_serialisable.GetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_IMAGEBOARD, *args, **kwargs )
elif action == 'inbox_hashes': result = self._FilterInboxHashes( *args, **kwargs )
elif action == 'is_an_orphan': result = self._IsAnOrphan( *args, **kwargs )
elif action == 'last_shutdown_work_time': result = self.modules_db_maintenance.GetLastShutdownWorkTime( *args, **kwargs )
elif action == 'local_booru_share_keys': result = self.modules_serialisable.GetYAMLDumpNames( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU )
elif action == 'local_booru_share': result = self.modules_serialisable.GetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU, *args, **kwargs )
elif action == 'local_booru_shares': result = self.modules_serialisable.GetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU )
elif action == 'maintenance_due': result = self._GetMaintenanceDue( *args, **kwargs )
elif action == 'media_predicates': result = self._GetMediaPredicates( *args, **kwargs )
elif action == 'media_result': result = self._GetMediaResultFromHash( *args, **kwargs )
elif action == 'media_results': result = self._GetMediaResultsFromHashes( *args, **kwargs )
elif action == 'media_results_from_ids': result = self._GetMediaResults( *args, **kwargs )
elif action == 'migration_get_mappings': result = self._MigrationGetMappings( *args, **kwargs )
elif action == 'migration_get_pairs': result = self._MigrationGetPairs( *args, **kwargs )
elif action == 'missing_repository_update_hashes': result = self.modules_repositories.GetRepositoryUpdateHashesIDoNotHave( *args, **kwargs )
elif action == 'missing_thumbnail_hashes': result = self._GetRepositoryThumbnailHashesIDoNotHave( *args, **kwargs )
elif action == 'num_deferred_file_deletes': result = self.modules_files_storage.GetDeferredPhysicalDeleteCounts()
elif action == 'nums_pending': result = self._GetNumsPending( *args, **kwargs )
elif action == 'options': result = self._GetOptions( *args, **kwargs )
elif action == 'pending': result = self._GetPending( *args, **kwargs )
elif action == 'random_potential_duplicate_hashes': result = self._DuplicatesGetRandomPotentialDuplicateHashes( *args, **kwargs )
elif action == 'recent_tags': result = self._GetRecentTags( *args, **kwargs )
elif action == 'repository_progress': result = self.modules_repositories.GetRepositoryProgress( *args, **kwargs )
elif action == 'repository_update_hashes_to_process': result = self.modules_repositories.GetRepositoryUpdateHashesICanProcess( *args, **kwargs )
elif action == 'serialisable': result = self.modules_serialisable.GetJSONDump( *args, **kwargs )
elif action == 'serialisable_simple': result = self.modules_serialisable.GetJSONSimple( *args, **kwargs )
elif action == 'serialisable_named': result = self.modules_serialisable.GetJSONDumpNamed( *args, **kwargs )
elif action == 'serialisable_names': result = self.modules_serialisable.GetJSONDumpNames( *args, **kwargs )
elif action == 'serialisable_names_to_backup_timestamps': result = self.modules_serialisable.GetJSONDumpNamesToBackupTimestamps( *args, **kwargs )
elif action == 'service_directory': result = self._GetServiceDirectoryHashes( *args, **kwargs )
elif action == 'service_directories': result = self._GetServiceDirectoriesInfo( *args, **kwargs )
elif action == 'service_filenames': result = self._GetServiceFilenames( *args, **kwargs )
elif action == 'service_info': result = self._GetServiceInfo( *args, **kwargs )
elif action == 'services': result = self.modules_services.GetServices( *args, **kwargs )
elif action == 'similar_files_maintenance_status': result = self.modules_similar_files.GetMaintenanceStatus( *args, **kwargs )
elif action == 'related_tags': result = self._GetRelatedTags( *args, **kwargs )
elif action == 'tag_display_application': result = self.modules_tag_display.GetApplication( *args, **kwargs )
elif action == 'tag_display_maintenance_status': result = self._CacheTagDisplayGetApplicationStatusNumbers( *args, **kwargs )
elif action == 'tag_parents': result = self.modules_tag_parents.GetTagParents( *args, **kwargs )
elif action == 'tag_siblings': result = self.modules_tag_siblings.GetTagSiblings( *args, **kwargs )
elif action == 'tag_siblings_all_ideals': result = self.modules_tag_siblings.GetTagSiblingsIdeals( *args, **kwargs )
elif action == 'tag_display_decorators': result = self.modules_tag_display.GetUIDecorators( *args, **kwargs )
elif action == 'tag_siblings_and_parents_lookup': result = self.modules_tag_display.GetSiblingsAndParentsForTags( *args, **kwargs )
elif action == 'tag_siblings_lookup': result = self.modules_tag_siblings.GetTagSiblingsForTags( *args, **kwargs )
elif action == 'trash_hashes': result = self._GetTrashHashes( *args, **kwargs )
elif action == 'potential_duplicates_count': result = self._DuplicatesGetPotentialDuplicatesCount( *args, **kwargs )
elif action == 'url_statuses': result = self._GetURLStatuses( *args, **kwargs )
elif action == 'vacuum_data': result = self.modules_db_maintenance.GetVacuumData( *args, **kwargs )
else: raise Exception( 'db received an unknown read command: ' + action )
return result
def _RecoverFromMissingDefinitions( self, content_type ):
# this is not finished, but basics are there
# remember this func uses a bunch of similar tech for the eventual orphan definition cleansing routine
# we just have to extend modules functionality to cover all content tables and we are good to go
if content_type == HC.CONTENT_TYPE_HASH:
definition_column_name = 'hash_id'
# eventually migrate this gubbins to cancellable async done in parts, which means generating, handling, and releasing the temp table name more cleverly
# job presentation to UI
all_tables_and_columns = []
for module in self._modules:
all_tables_and_columns.extend( module.GetTablesAndColumnsThatUseDefinitions( HC.CONTENT_TYPE_HASH ) )
temp_all_useful_definition_ids_table_name = 'durable_temp.all_useful_definition_ids_{}'.format( os.urandom( 8 ).hex() )
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( {} INTEGER PRIMARY KEY );'.format( temp_all_useful_definition_ids_table_name, definition_column_name ) )
try:
num_to_do = 0
for ( table_name, column_name ) in all_tables_and_columns:
query = 'INSERT OR IGNORE INTO {} ( {} ) SELECT DISTINCT {} FROM {};'.format(
temp_all_useful_definition_ids_table_name,
definition_column_name,
column_name,
table_name
)
self._Execute( query )
num_to_do += self._GetRowCount()
num_missing = 0
num_recovered = 0
batch_of_definition_ids = self._STL( self._Execute( 'SELECT {} FROM {} LIMIT 1024;'.format( definition_column_name, temp_all_useful_definition_ids_table_name ) ) )
while len( batch_of_definition_ids ) > 1024:
for definition_id in batch_of_definition_ids:
if not self.modules_hashes.HasHashId( definition_id ):
if content_type == HC.CONTENT_TYPE_HASH and self.modules_hashes_local_cache.HasHashId( definition_id ):
hash = self.modules_hashes_local_cache.GetHash( definition_id )
self._Execute( 'INSERT OR IGNORE INTO hashes ( hash_id, hash ) VALUES ( ?, ? );', ( definition_id, sqlite3.Binary( hash ) ) )
HydrusData.Print( '{} {} had no master definition, but I was able to recover from the local cache'.format( definition_column_name, definition_id ) )
num_recovered += 1
else:
HydrusData.Print( '{} {} had no master definition, it has been purged from the database!'.format( definition_column_name, definition_id ) )
for ( table_name, column_name ) in all_tables_and_columns:
self._Execute( 'DELETE FROM {} WHERE {} = ?;'.format( table_name, column_name ), ( definition_id, ) )
# tell user they will want to run clear orphan files, reset service cache info, and may need to recalc some autocomplete counts depending on total missing definitions
# I should clear service info based on content_type
num_missing += 1
batch_of_definition_ids = self._Execute( 'SELECT {} FROM {} LIMIT 1024;'.format( definition_column_name, temp_all_useful_definition_ids_table_name ) )
finally:
self._Execute( 'DROP TABLE {};'.format( temp_all_useful_definition_ids_table_name ) )
def _RegenerateLocalHashCache( self ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating local hash cache' )
self._controller.pub( 'modal_message', job_key )
message = 'generating local hash cache'
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self.modules_hashes_local_cache.Repopulate()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _RegenerateLocalTagCache( self ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating local tag cache' )
self._controller.pub( 'modal_message', job_key )
message = 'generating local tag cache'
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self.modules_tags_local_cache.Repopulate()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RegenerateTagCacheSearchableSubtagMaps( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerate tag fast search cache searchable subtag map' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
def status_hook( s ):
job_key.SetVariable( 'popup_text_2', s )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'repopulating specific cache {}_{}'.format( file_service_id, tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.RegenerateSearchableSubtagMap( file_service_id, tag_service_id, status_hook = status_hook )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'repopulating combined cache {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.RegenerateSearchableSubtagMap( self.modules_services.combined_file_service_id, tag_service_id, status_hook = status_hook )
finally:
job_key.DeleteVariable( 'popup_text_2' )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _RegenerateTagCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag fast search cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
def status_hook( s ):
job_key.SetVariable( 'popup_text_2', s )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'generating specific cache {}_{}'.format( file_service_id, tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheTagsPopulate( file_service_id, tag_service_id, status_hook = status_hook )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'generating combined cache {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, tag_service_id )
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, tag_service_id )
self._CacheTagsPopulate( self.modules_services.combined_file_service_id, tag_service_id, status_hook = status_hook )
finally:
job_key.DeleteVariable( 'popup_text_2' )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _RegenerateTagDisplayMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag display mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for tag_service_id in tag_service_ids:
# first off, we want to clear all the current siblings and parents so they will be reprocessed later
# we'll also have to catch up the tag definition cache to account for this
tag_ids_in_dispute = set()
tag_ids_in_dispute.update( self.modules_tag_siblings.GetAllTagIds( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id ) )
tag_ids_in_dispute.update( self.modules_tag_parents.GetAllTagIds( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id ) )
self.modules_tag_siblings.ClearActual( tag_service_id )
self.modules_tag_parents.ClearActual( tag_service_id )
if len( tag_ids_in_dispute ) > 0:
self._CacheTagsSyncTags( tag_service_id, tag_ids_in_dispute )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'generating specific display cache {}_{}'.format( file_service_id, tag_service_id )
def status_hook_1( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
status_hook_1( 'dropping old data' )
self.modules_mappings_cache_specific_display.Drop( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.Generate( file_service_id, tag_service_id, populate_from_storage = True, status_hook = status_hook_1 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'generating combined display cache {}'.format( tag_service_id )
def status_hook_2( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
status_hook_2( 'dropping old data' )
self._CacheCombinedFilesDisplayMappingsDrop( tag_service_id )
self._CacheCombinedFilesDisplayMappingsGenerate( tag_service_id, status_hook = status_hook_2 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RegenerateTagDisplayPendingMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag display pending mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'regenerating specific display cache pending {}_{}'.format( file_service_id, tag_service_id )
def status_hook_1( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self.modules_mappings_cache_specific_display.RegeneratePending( file_service_id, tag_service_id, status_hook = status_hook_1 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'regenerating combined display cache pending {}'.format( tag_service_id )
def status_hook_2( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self._CacheCombinedFilesDisplayMappingsRegeneratePending( tag_service_id, status_hook = status_hook_2 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RegenerateTagMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
tag_cache_file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
for tag_service_id in tag_service_ids:
self.modules_tag_siblings.ClearActual( tag_service_id )
self.modules_tag_parents.ClearActual( tag_service_id )
time.sleep( 0.01 )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'generating specific cache {}_{}'.format( file_service_id, tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
if file_service_id in tag_cache_file_service_ids:
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheSpecificMappingsDrop( file_service_id, tag_service_id )
self._CacheSpecificMappingsGenerate( file_service_id, tag_service_id )
self._cursor_transaction_wrapper.CommitAndBegin()
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'generating combined cache {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, tag_service_id )
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, tag_service_id )
self._CacheCombinedFilesMappingsDrop( tag_service_id )
self._CacheCombinedFilesMappingsGenerate( tag_service_id )
self._cursor_transaction_wrapper.CommitAndBegin()
if tag_service_key is None:
message = 'generating local tag cache'
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self.modules_tags_local_cache.Repopulate()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RegenerateTagParentsCache( self, only_these_service_ids = None ):
if only_these_service_ids is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = only_these_service_ids
# as siblings may have changed, parents may have as well
self.modules_tag_parents.Regen( tag_service_ids )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
def _RegenerateTagPendingMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag pending mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'regenerating specific cache pending {}_{}'.format( file_service_id, tag_service_id )
def status_hook_1( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self._CacheSpecificMappingsRegeneratePending( file_service_id, tag_service_id, status_hook = status_hook_1 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'regenerating combined cache pending {}'.format( tag_service_id )
def status_hook_2( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self._CacheCombinedFilesMappingsRegeneratePending( tag_service_id, status_hook = status_hook_2 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RelocateClientFiles( self, prefix, source, dest ):
if not os.path.exists( dest ):
raise Exception( 'Was commanded to move prefix "{}" from "{}" to "{}", but that destination does not exist!'.format( prefix, source, dest ) )
full_source = os.path.join( source, prefix )
full_dest = os.path.join( dest, prefix )
if os.path.exists( full_source ):
HydrusPaths.MergeTree( full_source, full_dest )
elif not os.path.exists( full_dest ):
HydrusPaths.MakeSureDirectoryExists( full_dest )
portable_dest = HydrusPaths.ConvertAbsPathToPortablePath( dest )
self._Execute( 'UPDATE client_files_locations SET location = ? WHERE prefix = ?;', ( portable_dest, prefix ) )
if os.path.exists( full_source ):
try: HydrusPaths.RecyclePath( full_source )
except: pass
def _RepairClientFiles( self, correct_rows ):
for ( prefix, correct_location ) in correct_rows:
full_abs_correct_location = os.path.join( correct_location, prefix )
HydrusPaths.MakeSureDirectoryExists( full_abs_correct_location )
portable_correct_location = HydrusPaths.ConvertAbsPathToPortablePath( correct_location )
self._Execute( 'UPDATE client_files_locations SET location = ? WHERE prefix = ?;', ( portable_correct_location, prefix ) )
def _RepairDB( self, version ):
# migrate most of this gubbins to the new modules system, and HydrusDB tbh!
self._controller.frame_splash_status.SetText( 'checking database' )
HydrusDB.HydrusDB._RepairDB( self, version )
self._weakref_media_result_cache = ClientMediaResultCache.MediaResultCache()
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
# caches
existing_cache_tables = self._STS( self._Execute( 'SELECT name FROM external_caches.sqlite_master WHERE type = ?;', ( 'table', ) ) )
mappings_cache_tables = set()
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if version >= 465:
mappings_cache_tables.update( ( name.split( '.' )[1] for name in ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id ) ) )
mappings_cache_tables.update( ( name.split( '.' )[1] for name in ClientDBMappingsCacheSpecificDisplay.GenerateSpecificDisplayMappingsCacheTableNames( file_service_id, tag_service_id ) ) )
we_did_a_full_regen = False
missing_main_tables = sorted( mappings_cache_tables.difference( existing_cache_tables ) )
if len( missing_main_tables ) > 0:
HydrusData.DebugPrint( 'The missing mapping cache tables were:' )
HydrusData.DebugPrint( os.linesep.join( missing_main_tables ) )
message = 'On boot, {} mapping caches tables were missing! This could be due to the entire \'caches\' database file being missing or due to some other problem. All of this data can be regenerated.'.format( len( missing_main_tables ) )
message += os.linesep * 2
message += 'If you wish, click ok on this message and the client will recreate and repopulate these tables with the correct data. This may take a few minutes. But if you want to solve this problem otherwise, kill the hydrus process now.'
message += os.linesep * 2
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
BlockingSafeShowMessage( message )
self._RegenerateTagMappingsCache()
we_did_a_full_regen = True
if not we_did_a_full_regen:
# autocomplete
( missing_storage_tag_count_service_pairs, missing_display_tag_count_service_pairs ) = self.modules_mappings_counts.GetMissingTagCountServicePairs()
# unfortunately, for now, due to display maintenance being tag service wide, I can't regen individual lads here
# maybe in future I can iterate all sibs/parents and just do it here and now with addimplication
missing_storage_tag_count_tag_service_ids = { tag_service_id for ( file_service_id, tag_service_id ) in missing_storage_tag_count_service_pairs }
missing_display_tag_count_tag_service_ids = { tag_service_id for ( file_service_id, tag_service_id ) in missing_display_tag_count_service_pairs }
# a storage regen will cover a display regen
missing_display_tag_count_tag_service_ids = missing_display_tag_count_tag_service_ids.difference( missing_storage_tag_count_tag_service_ids )
if len( missing_display_tag_count_tag_service_ids ) > 0:
missing_display_tag_count_tag_service_ids = sorted( missing_display_tag_count_tag_service_ids )
message = 'On boot, some important tag count tables for the display context were missing! You should have already had a notice about this. You may have had other problems earlier, but this particular problem is completely recoverable and results in no lost data. The relevant tables have been recreated and will now be repopulated. The services about to be worked on are:'
message += os.linesep * 2
message += os.linesep.join( ( str( t ) for t in missing_display_tag_count_tag_service_ids ) )
message += os.linesep * 2
message += 'If you want to go ahead, click ok on this message and the client will fill these tables with the correct data. It may take some time. If you want to solve this problem otherwise, kill the hydrus process now.'
BlockingSafeShowMessage( message )
for tag_service_id in missing_display_tag_count_tag_service_ids:
tag_service_key = self.modules_services.GetService( tag_service_id ).GetServiceKey()
self._RegenerateTagDisplayMappingsCache( tag_service_key = tag_service_key )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
if len( missing_storage_tag_count_tag_service_ids ) > 0:
missing_storage_tag_count_tag_service_ids = sorted( missing_storage_tag_count_tag_service_ids )
message = 'On boot, some important tag count tables for the storage context were missing! You should have already had a notice about this. You may have had other problems earlier, but this particular problem is completely recoverable and results in no lost data. The relevant tables have been recreated and will now be repopulated. The services about to be worked on are:'
message += os.linesep * 2
message += os.linesep.join( ( str( t ) for t in missing_storage_tag_count_tag_service_ids ) )
message += os.linesep * 2
message += 'If you want to go ahead, click ok on this message and the client will fill these tables with the correct data. It may take some time. If you want to solve this problem otherwise, kill the hydrus process now.'
BlockingSafeShowMessage( message )
for tag_service_id in missing_storage_tag_count_tag_service_ids:
tag_service_key = self.modules_services.GetService( tag_service_id ).GetServiceKey()
self._RegenerateTagMappingsCache( tag_service_key = tag_service_key )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
# tag search, this requires autocomplete and siblings/parents in place
missing_tag_search_service_pairs = self.modules_tag_search.GetMissingTagSearchServicePairs()
if len( missing_tag_search_service_pairs ) > 0:
missing_tag_search_service_pairs = sorted( missing_tag_search_service_pairs )
message = 'On boot, some important tag search tables were missing! You should have already had a notice about this. You may have had other problems earlier, but this particular problem is completely recoverable and results in no lost data. The relevant tables have been recreated and will now be repopulated. The service pairs about to be worked on are:'
message += os.linesep * 2
message += os.linesep.join( ( str( t ) for t in missing_tag_search_service_pairs ) )
message += os.linesep * 2
message += 'If you want to go ahead, click ok on this message and the client will fill these tables with the correct data. It may take some time. If you want to solve this problem otherwise, kill the hydrus process now.'
BlockingSafeShowMessage( message )
for ( file_service_id, tag_service_id ) in missing_tag_search_service_pairs:
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheTagsPopulate( file_service_id, tag_service_id )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
#
new_options = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_OPTIONS )
if new_options is None:
message = 'On boot, your main options object was missing!'
message += os.linesep * 2
message += 'If you wish, click ok on this message and the client will re-add fresh options with default values. But if you want to solve this problem otherwise, kill the hydrus process now.'
message += os.linesep * 2
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
BlockingSafeShowMessage( message )
new_options = ClientOptions.ClientOptions()
new_options.SetSimpleDownloaderFormulae( ClientDefaults.GetDefaultSimpleDownloaderFormulae() )
self.modules_serialisable.SetJSONDump( new_options )
# an explicit empty string so we don't linger on 'checking database' if the next stage lags a bit on its own update. no need to give anyone heart attacks
self._controller.frame_splash_status.SetText( '' )
def _RepairInvalidTags( self, job_key: typing.Optional[ ClientThreading.JobKey ] = None ):
invalid_tag_ids_and_tags = set()
BLOCK_SIZE = 1000
select_statement = 'SELECT tag_id FROM tags;'
bad_tag_count = 0
for ( group_of_tag_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, select_statement, BLOCK_SIZE ):
if job_key is not None:
if job_key.IsCancelled():
break
message = 'Scanning tags: {} - Bad Found: {}'.format( HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ), HydrusData.ToHumanInt( bad_tag_count ) )
job_key.SetVariable( 'popup_text_1', message )
for tag_id in group_of_tag_ids:
tag = self.modules_tags_local_cache.GetTag( tag_id )
try:
cleaned_tag = HydrusTags.CleanTag( tag )
HydrusTags.CheckTagNotEmpty( cleaned_tag )
except:
cleaned_tag = 'unrecoverable invalid tag'
if tag != cleaned_tag:
invalid_tag_ids_and_tags.add( ( tag_id, tag, cleaned_tag ) )
bad_tag_count += 1
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for ( i, ( tag_id, tag, cleaned_tag ) ) in enumerate( invalid_tag_ids_and_tags ):
if job_key is not None:
if job_key.IsCancelled():
break
message = 'Fixing bad tags: {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, bad_tag_count ) )
job_key.SetVariable( 'popup_text_1', message )
# now find an entirely new namespace_id, subtag_id pair for this tag
existing_tags = set()
potential_new_cleaned_tag = cleaned_tag
while self.modules_tags.TagExists( potential_new_cleaned_tag ):
existing_tags.add( potential_new_cleaned_tag )
potential_new_cleaned_tag = HydrusData.GetNonDupeName( cleaned_tag, existing_tags )
cleaned_tag = potential_new_cleaned_tag
( namespace, subtag ) = HydrusTags.SplitTag( cleaned_tag )
namespace_id = self.modules_tags.GetNamespaceId( namespace )
subtag_id = self.modules_tags.GetSubtagId( subtag )
self.modules_tags.UpdateTagId( tag_id, namespace_id, subtag_id )
self.modules_tags_local_cache.UpdateTagInCache( tag_id, cleaned_tag )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if self.modules_tag_search.HasTag( file_service_id, tag_service_id, tag_id ):
self.modules_tag_search.DeleteTags( file_service_id, tag_service_id, ( tag_id, ) )
self.modules_tag_search.AddTags( file_service_id, tag_service_id, ( tag_id, ) )
try:
HydrusData.Print( 'Invalid tag fixing: {} replaced with {}'.format( repr( tag ), repr( cleaned_tag ) ) )
except:
HydrusData.Print( 'Invalid tag fixing: Could not even print the bad tag to the log! It is now known as {}'.format( repr( cleaned_tag ) ) )
if job_key is not None:
if not job_key.IsCancelled():
if bad_tag_count == 0:
message = 'Invalid tag scanning: No bad tags found!'
else:
message = 'Invalid tag scanning: {} bad tags found and fixed! They have been written to the log.'.format( HydrusData.ToHumanInt( bad_tag_count ) )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
HydrusData.Print( message )
job_key.SetVariable( 'popup_text_1', message )
job_key.Finish()
def _RepopulateMappingsFromCache( self, tag_service_key = None, job_key = None ):
BLOCK_SIZE = 10000
num_rows_recovered = 0
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
for tag_service_id in tag_service_ids:
service = self.modules_services.GetService( tag_service_id )
name = service.GetName()
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( self.modules_services.combined_local_file_service_id, tag_service_id )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
select_statement = 'SELECT hash_id FROM {};'.format( current_files_table_name )
for ( group_of_hash_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, select_statement, BLOCK_SIZE ):
if job_key is not None:
message = 'Doing "{}"\u2026: {}'.format( name, HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ) )
message += os.linesep * 2
message += 'Total rows recovered: {}'.format( HydrusData.ToHumanInt( num_rows_recovered ) )
job_key.SetVariable( 'popup_text_1', message )
if job_key.IsCancelled():
return
with self._MakeTemporaryIntegerTable( group_of_hash_ids, 'hash_id' ) as temp_table_name:
# temp hashes to mappings
insert_template = 'INSERT OR IGNORE INTO {} ( tag_id, hash_id ) SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'
self._Execute( insert_template.format( current_mappings_table_name, temp_table_name, cache_current_mappings_table_name ) )
num_rows_recovered += self._GetRowCount()
self._Execute( insert_template.format( deleted_mappings_table_name, temp_table_name, cache_deleted_mappings_table_name ) )
num_rows_recovered += self._GetRowCount()
self._Execute( insert_template.format( pending_mappings_table_name, temp_table_name, cache_pending_mappings_table_name ) )
num_rows_recovered += self._GetRowCount()
if job_key is not None:
job_key.SetVariable( 'popup_text_1', 'Done! Rows recovered: {}'.format( HydrusData.ToHumanInt( num_rows_recovered ) ) )
job_key.Finish()
def _RepopulateTagCacheMissingSubtags( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'repopulate tag fast search cache subtags' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
def status_hook( s ):
job_key.SetVariable( 'popup_text_2', s )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'repopulating specific cache {}_{}'.format( file_service_id, tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.RepopulateMissingSubtags( file_service_id, tag_service_id )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'repopulating combined cache {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.RepopulateMissingSubtags( self.modules_services.combined_file_service_id, tag_service_id )
finally:
job_key.DeleteVariable( 'popup_text_2' )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _RepopulateTagDisplayMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'repopulating tag display mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for ( i, file_service_id ) in enumerate( file_service_ids ):
if job_key.IsCancelled():
break
table_name = ClientDBFilesStorage.GenerateFilesTableName( file_service_id, HC.CONTENT_STATUS_CURRENT )
for ( group_of_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, 'SELECT hash_id FROM {};'.format( table_name ), 1024 ):
message = 'repopulating {} {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, len( file_service_ids ) ), HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
with self._MakeTemporaryIntegerTable( group_of_ids, 'hash_id' ) as temp_hash_id_table_name:
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, group_of_ids, temp_hash_id_table_name )
self.modules_mappings_cache_specific_display.AddFiles( file_service_id, tag_service_id, group_of_ids, temp_hash_id_table_name )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _ReportOverupdatedDB( self, version ):
message = 'This client\'s database is version {}, but the software is version {}! This situation only sometimes works, and when it does not, it can break things! If you are not sure what is going on, or if you accidentally installed an older version of the software to a newer database, force-kill this client in Task Manager right now. Otherwise, ok this dialog box to continue.'.format( HydrusData.ToHumanInt( version ), HydrusData.ToHumanInt( HC.SOFTWARE_VERSION ) )
BlockingSafeShowMessage( message )
def _ReportUnderupdatedDB( self, version ):
message = 'This client\'s database is version {}, but the software is significantly later, {}! Trying to update many versions in one go can be dangerous due to bitrot. I suggest you try at most to only do 10 versions at once. If you want to try a big jump anyway, you should make sure you have a backup beforehand so you can roll back to it in case the update makes your db unbootable. If you would rather try smaller updates, or you do not have a backup, force-kill this client in Task Manager right now. Otherwise, ok this dialog box to continue.'.format( HydrusData.ToHumanInt( version ), HydrusData.ToHumanInt( HC.SOFTWARE_VERSION ) )
BlockingSafeShowMessage( message )
def _ResetRepository( self, service ):
( service_key, service_type, name, dictionary ) = service.ToTuple()
service_id = self.modules_services.GetServiceId( service_key )
prefix = 'resetting ' + name
job_key = ClientThreading.JobKey()
try:
job_key.SetVariable( 'popup_text_1', prefix + ': deleting service' )
self._controller.pub( 'modal_message', job_key )
self._DeleteService( service_id )
job_key.SetVariable( 'popup_text_1', prefix + ': recreating service' )
self._AddService( service_key, service_type, name, dictionary )
self._cursor_transaction_wrapper.pub_after_job( 'notify_account_sync_due' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_data' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_gui' )
job_key.SetVariable( 'popup_text_1', prefix + ': done!' )
finally:
job_key.Finish()
def _ResetRepositoryProcessing( self, service_key: bytes, content_types ):
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
prefix = 'resetting content'
job_key = ClientThreading.JobKey()
try:
service_info_types_to_delete = []
job_key.SetVariable( 'popup_text_1', '{}: calculating'.format( prefix ) )
self._controller.pub( 'modal_message', job_key )
# note that siblings/parents do not do a cachetags clear-regen because they only actually delete ideal, not actual
if HC.CONTENT_TYPE_FILES in content_types:
service_info_types_to_delete.extend( { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_TOTAL_SIZE, HC.SERVICE_INFO_NUM_DELETED_FILES } )
self._Execute( 'DELETE FROM remote_thumbnails WHERE service_id = ?;', ( service_id, ) )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
self.modules_files_storage.ClearFilesTables( service_id, keep_pending = True )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsClear( service_id, tag_service_id, keep_pending = True )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES:
# not clear since siblings and parents can contribute
self.modules_tag_search.Drop( service_id, tag_service_id )
self.modules_tag_search.Generate( service_id, tag_service_id )
self._CacheTagsPopulate( service_id, tag_service_id )
if HC.CONTENT_TYPE_MAPPINGS in content_types:
service_info_types_to_delete.extend( { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_TAGS, HC.SERVICE_INFO_NUM_MAPPINGS, HC.SERVICE_INFO_NUM_DELETED_MAPPINGS } )
if service_type in HC.REAL_TAG_SERVICES:
self.modules_mappings_storage.ClearMappingsTables( service_id )
self._CacheCombinedFilesMappingsClear( service_id, keep_pending = True )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, service_id )
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, service_id )
self._CacheTagsPopulate( self.modules_services.combined_file_service_id, service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
tag_cache_file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
for file_service_id in file_service_ids:
self._CacheSpecificMappingsClear( file_service_id, service_id, keep_pending = True )
if file_service_id in tag_cache_file_service_ids:
# not clear since siblings and parents can contribute
self.modules_tag_search.Drop( file_service_id, service_id )
self.modules_tag_search.Generate( file_service_id, service_id )
self._CacheTagsPopulate( file_service_id, service_id )
if HC.CONTENT_TYPE_TAG_PARENTS in content_types:
self._Execute( 'DELETE FROM tag_parents WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM tag_parent_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) )
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = ClientDBTagParents.GenerateTagParentsLookupCacheTableNames( service_id )
# do not delete from actual!
self._Execute( 'DELETE FROM {};'.format( cache_ideal_tag_parents_lookup_table_name ) )
if HC.CONTENT_TYPE_TAG_SIBLINGS in content_types:
self._Execute( 'DELETE FROM tag_siblings WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM tag_sibling_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) )
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableNames( service_id )
self._Execute( 'DELETE FROM {};'.format( cache_ideal_tag_siblings_lookup_table_name ) )
#
job_key.SetVariable( 'popup_text_1', '{}: recalculating'.format( prefix ) )
if HC.CONTENT_TYPE_TAG_PARENTS in content_types or HC.CONTENT_TYPE_TAG_SIBLINGS in content_types:
interested_service_ids = set( self.modules_tag_display.GetInterestedServiceIds( service_id ) )
if len( interested_service_ids ) > 0:
self.modules_tag_display.RegenerateTagSiblingsAndParentsCache( only_these_service_ids = interested_service_ids )
self._ExecuteMany( 'DELETE FROM service_info WHERE service_id = ? AND info_type = ?;', ( ( service_id, info_type ) for info_type in service_info_types_to_delete ) )
self.modules_repositories.ReprocessRepository( service_key, content_types )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_data' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_gui' )
job_key.SetVariable( 'popup_text_1', prefix + ': done!' )
finally:
job_key.Finish()
def _SaveDirtyServices( self, dirty_services ):
# if allowed to save objects
self._SaveServices( dirty_services )
def _SaveServices( self, services ):
for service in services:
self.modules_services.UpdateService( service )
def _SaveOptions( self, options ):
try:
self._Execute( 'UPDATE options SET options = ?;', ( options, ) )
except:
HydrusData.Print( 'Failed options save dump:' )
HydrusData.Print( options )
raise
self._cursor_transaction_wrapper.pub_after_job( 'reset_thumbnail_cache' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_options' )
def _SetIdealClientFilesLocations( self, locations_to_ideal_weights, ideal_thumbnail_override_location ):
if len( locations_to_ideal_weights ) == 0:
raise Exception( 'No locations passed in ideal locations list!' )
self._Execute( 'DELETE FROM ideal_client_files_locations;' )
for ( abs_location, weight ) in locations_to_ideal_weights.items():
portable_location = HydrusPaths.ConvertAbsPathToPortablePath( abs_location )
self._Execute( 'INSERT INTO ideal_client_files_locations ( location, weight ) VALUES ( ?, ? );', ( portable_location, weight ) )
self._Execute( 'DELETE FROM ideal_thumbnail_override_location;' )
if ideal_thumbnail_override_location is not None:
portable_ideal_thumbnail_override_location = HydrusPaths.ConvertAbsPathToPortablePath( ideal_thumbnail_override_location )
self._Execute( 'INSERT INTO ideal_thumbnail_override_location ( location ) VALUES ( ? );', ( portable_ideal_thumbnail_override_location, ) )
def _SetPassword( self, password ):
if password is not None:
password_bytes = bytes( password, 'utf-8' )
password = hashlib.sha256( password_bytes ).digest()
self._controller.options[ 'password' ] = password
self._SaveOptions( self._controller.options )
def _SetServiceFilename( self, service_id, hash_id, filename ):
self._Execute( 'REPLACE INTO service_filenames ( service_id, hash_id, filename ) VALUES ( ?, ?, ? );', ( service_id, hash_id, filename ) )
def _SetServiceDirectory( self, service_id, hash_ids, dirname, note ):
directory_id = self.modules_texts.GetTextId( dirname )
self._Execute( 'DELETE FROM service_directories WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) )
self._Execute( 'DELETE FROM service_directory_file_map WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) )
num_files = len( hash_ids )
result = self._Execute( 'SELECT SUM( size ) FROM files_info WHERE hash_id IN ' + HydrusData.SplayListForDB( hash_ids ) + ';' ).fetchone()
if result is None:
total_size = 0
else:
( total_size, ) = result
self._Execute( 'INSERT INTO service_directories ( service_id, directory_id, num_files, total_size, note ) VALUES ( ?, ?, ?, ?, ? );', ( service_id, directory_id, num_files, total_size, note ) )
self._ExecuteMany( 'INSERT INTO service_directory_file_map ( service_id, directory_id, hash_id ) VALUES ( ?, ?, ? );', ( ( service_id, directory_id, hash_id ) for hash_id in hash_ids ) )
def _TryToSortHashIds( self, location_context: ClientLocation.LocationContext, hash_ids, sort_by: ClientMedia.MediaSort ):
did_sort = False
( sort_metadata, sort_data ) = sort_by.sort_type
sort_order = sort_by.sort_order
query = None
if sort_metadata == 'system':
simple_sorts = []
simple_sorts.append( CC.SORT_FILES_BY_IMPORT_TIME )
simple_sorts.append( CC.SORT_FILES_BY_FILESIZE )
simple_sorts.append( CC.SORT_FILES_BY_DURATION )
simple_sorts.append( CC.SORT_FILES_BY_FRAMERATE )
simple_sorts.append( CC.SORT_FILES_BY_NUM_FRAMES )
simple_sorts.append( CC.SORT_FILES_BY_WIDTH )
simple_sorts.append( CC.SORT_FILES_BY_HEIGHT )
simple_sorts.append( CC.SORT_FILES_BY_RATIO )
simple_sorts.append( CC.SORT_FILES_BY_NUM_PIXELS )
simple_sorts.append( CC.SORT_FILES_BY_MEDIA_VIEWS )
simple_sorts.append( CC.SORT_FILES_BY_MEDIA_VIEWTIME )
simple_sorts.append( CC.SORT_FILES_BY_APPROX_BITRATE )
simple_sorts.append( CC.SORT_FILES_BY_FILE_MODIFIED_TIMESTAMP )
simple_sorts.append( CC.SORT_FILES_BY_LAST_VIEWED_TIME )
simple_sorts.append( CC.SORT_FILES_BY_ARCHIVED_TIMESTAMP )
if sort_data in simple_sorts:
if sort_data == CC.SORT_FILES_BY_IMPORT_TIME:
if location_context.IsOneDomain() and location_context.IncludesCurrent():
file_service_key = list( location_context.current_service_keys )[0]
else:
file_service_key = CC.COMBINED_LOCAL_FILE_SERVICE_KEY
file_service_id = self.modules_services.GetServiceId( file_service_key )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( file_service_id, HC.CONTENT_STATUS_CURRENT )
query = 'SELECT hash_id, timestamp FROM {temp_table} CROSS JOIN {current_files_table} USING ( hash_id );'.format( temp_table = '{temp_table}', current_files_table = current_files_table_name )
elif sort_data == CC.SORT_FILES_BY_FILESIZE:
query = 'SELECT hash_id, size FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_DURATION:
query = 'SELECT hash_id, duration FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_FRAMERATE:
query = 'SELECT hash_id, num_frames, duration FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_NUM_FRAMES:
query = 'SELECT hash_id, num_frames FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_WIDTH:
query = 'SELECT hash_id, width FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_HEIGHT:
query = 'SELECT hash_id, height FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_RATIO:
query = 'SELECT hash_id, width, height FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_NUM_PIXELS:
query = 'SELECT hash_id, width, height FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_MEDIA_VIEWS:
query = 'SELECT hash_id, views FROM {temp_table} CROSS JOIN file_viewing_stats USING ( hash_id ) WHERE canvas_type = {canvas_type};'.format( temp_table = '{temp_table}', canvas_type = CC.CANVAS_MEDIA_VIEWER )
elif sort_data == CC.SORT_FILES_BY_MEDIA_VIEWTIME:
query = 'SELECT hash_id, viewtime FROM {temp_table} CROSS JOIN file_viewing_stats USING ( hash_id ) WHERE canvas_type = {canvas_type};'.format( temp_table = '{temp_table}', canvas_type = CC.CANVAS_MEDIA_VIEWER )
elif sort_data == CC.SORT_FILES_BY_APPROX_BITRATE:
query = 'SELECT hash_id, duration, num_frames, size, width, height FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_FILE_MODIFIED_TIMESTAMP:
q1 = 'SELECT hash_id, file_modified_timestamp FROM {temp_table} CROSS JOIN file_modified_timestamps USING ( hash_id )'
q2 = 'SELECT hash_id, file_modified_timestamp FROM {temp_table} CROSS JOIN file_domain_modified_timestamps USING ( hash_id )'
query = 'SELECT hash_id, MIN( file_modified_timestamp ) FROM ( {} UNION {} ) GROUP BY hash_id;'.format( q1, q2 )
elif sort_data == CC.SORT_FILES_BY_LAST_VIEWED_TIME:
query = 'SELECT hash_id, last_viewed_timestamp FROM {temp_table} CROSS JOIN file_viewing_stats USING ( hash_id ) WHERE canvas_type = {canvas_type};'.format( temp_table = '{temp_table}', canvas_type = CC.CANVAS_MEDIA_VIEWER )
elif sort_data == CC.SORT_FILES_BY_ARCHIVED_TIMESTAMP:
query = 'SELECT hash_id, archived_timestamp FROM {temp_table} CROSS JOIN archive_timestamps USING ( hash_id );'
if sort_data == CC.SORT_FILES_BY_RATIO:
def key( row ):
width = row[1]
height = row[2]
if width is None or height is None:
return -1
else:
return width / height
elif sort_data == CC.SORT_FILES_BY_FRAMERATE:
def key( row ):
num_frames = row[1]
duration = row[2]
if num_frames is None or duration is None or num_frames == 0 or duration == 0:
return -1
else:
return num_frames / duration
elif sort_data == CC.SORT_FILES_BY_NUM_PIXELS:
def key( row ):
width = row[1]
height = row[2]
if width is None or height is None or width == 0 or height == 0:
return -1
else:
return width * height
elif sort_data == CC.SORT_FILES_BY_APPROX_BITRATE:
def key( row ):
duration = row[1]
num_frames = row[2]
size = row[3]
width = row[4]
height = row[5]
if duration is None or duration == 0:
if size is None or size == 0:
duration_bitrate = -1
frame_bitrate = -1
else:
duration_bitrate = 0
if width is None or height is None:
frame_bitrate = 0
else:
if size is None or size == 0 or width is None or width == 0 or height is None or height == 0:
frame_bitrate = -1
else:
num_pixels = width * height
frame_bitrate = size / num_pixels
else:
if size is None or size == 0:
duration_bitrate = -1
frame_bitrate = -1
else:
duration_bitrate = size / duration
if num_frames is None or num_frames == 0:
frame_bitrate = 0
else:
frame_bitrate = duration_bitrate / num_frames
return ( duration_bitrate, frame_bitrate )
else:
key = lambda row: -1 if row[1] is None else row[1]
reverse = sort_order == CC.SORT_DESC
elif sort_data == CC.SORT_FILES_BY_RANDOM:
hash_ids = list( hash_ids )
random.shuffle( hash_ids )
did_sort = True
if query is not None:
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
hash_ids_and_other_data = sorted( self._Execute( query.format( temp_table = temp_hash_ids_table_name ) ), key = key, reverse = reverse )
original_hash_ids = set( hash_ids )
hash_ids = [ row[0] for row in hash_ids_and_other_data ]
# some stuff like media views won't have rows
missing_hash_ids = original_hash_ids.difference( hash_ids )
hash_ids.extend( missing_hash_ids )
did_sort = True
return ( did_sort, hash_ids )
def _UndeleteFiles( self, service_id, hash_ids ):
rows = self.modules_files_storage.GetUndeleteRows( service_id, hash_ids )
self._AddFiles( service_id, rows )
def _UnloadModules( self ):
del self.modules_hashes
del self.modules_tags
del self.modules_urls
del self.modules_texts
self._modules = []
def _UpdateDB( self, version ):
self._controller.frame_splash_status.SetText( 'updating db to v' + str( version + 1 ) )
if version == 419:
self._controller.frame_splash_status.SetSubtext( 'creating a couple of indices' )
self._CreateIndex( 'tag_parents', [ 'service_id', 'parent_tag_id' ] )
self._CreateIndex( 'tag_parent_petitions', [ 'service_id', 'parent_tag_id' ] )
self._CreateIndex( 'tag_siblings', [ 'service_id', 'good_tag_id' ] )
self._CreateIndex( 'tag_sibling_petitions', [ 'service_id', 'good_tag_id' ] )
self.modules_db_maintenance.AnalyzeTable( 'tag_parents' )
self.modules_db_maintenance.AnalyzeTable( 'tag_parent_petitions' )
self.modules_db_maintenance.AnalyzeTable( 'tag_siblings' )
self.modules_db_maintenance.AnalyzeTable( 'tag_sibling_petitions' )
self._controller.frame_splash_status.SetSubtext( 'regenerating ideal siblings and parents' )
try:
self.modules_tag_display.RegenerateTagSiblingsAndParentsCache()
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to regen sibling lookups failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 423:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'e621 file page parser', ) )
domain_manager.OverwriteDefaultURLClasses( ( 'nitter media timeline', 'nitter timeline' ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
#
result_master = self._Execute( 'SELECT 1 FROM external_master.sqlite_master WHERE name = ?;', ( 'subtags_fts4', ) ).fetchone()
result_caches = self._Execute( 'SELECT 1 FROM external_caches.sqlite_master WHERE name = ?;', ( 'subtags_fts4', ) ).fetchone()
if result_master is not None or result_caches is not None:
try:
self._controller.frame_splash_status.SetText( 'dropping old cache - subtags fts4' )
self._Execute( 'DROP TABLE IF EXISTS subtags_fts4;' )
self._controller.frame_splash_status.SetText( 'dropping old cache - subtags searchable map' )
self._Execute( 'DROP TABLE IF EXISTS subtags_searchable_map;' )
self._controller.frame_splash_status.SetText( 'dropping old cache - integer subtags' )
self._Execute( 'DROP TABLE IF EXISTS integer_subtags;' )
self.modules_services.combined_file_service_id = self.modules_services.GetServiceId( CC.COMBINED_FILE_SERVICE_KEY )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
self._controller.frame_splash_status.SetText( 'creating new specific cache - {} {}'.format( file_service_id, tag_service_id ) )
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheTagsPopulate( file_service_id, tag_service_id )
for tag_service_id in tag_service_ids:
self._controller.frame_splash_status.SetText( 'creating new combined files cache - {}'.format( tag_service_id ) )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, tag_service_id )
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, tag_service_id )
self._CacheTagsPopulate( self.modules_services.combined_file_service_id, tag_service_id )
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'The v424 cache update failed to work! The error has been printed to the log. Please rollback to 423 and let hydev know the details.' )
if version == 424:
session_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER )
if session_manager is None:
try:
legacy_session_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER_LEGACY )
if legacy_session_manager is None:
session_manager = ClientNetworkingSessions.NetworkSessionManager()
session_manager.SetDirty()
message = 'Hey, when updating your session manager to the new object, it seems the original was missing. I have created an empty new one, but it will have no cookies, so you will have to re-login as needed.'
self.pub_initial_message( message )
else:
session_manager = ClientNetworkingSessionsLegacy.ConvertLegacyToNewSessions( legacy_session_manager )
self.modules_serialisable.DeleteJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER_LEGACY )
self.modules_serialisable.SetJSONDump( session_manager )
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'The v425 session update failed to work! The error has been printed to the log. Please rollback to 424 and let hydev know the details.' )
bandwidth_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER )
if bandwidth_manager is None:
try:
legacy_bandwidth_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER_LEGACY )
if legacy_bandwidth_manager is None:
bandwidth_manager = ClientNetworkingBandwidth.NetworkBandwidthManager()
ClientDefaults.SetDefaultBandwidthManagerRules( bandwidth_manager )
bandwidth_manager.SetDirty()
message = 'Hey, when updating your bandwidth manager to the new object, it seems the original was missing. I have created an empty new one, but it will have no bandwidth record or saved rules.'
self.pub_initial_message( message )
else:
bandwidth_manager = ClientNetworkingBandwidthLegacy.ConvertLegacyToNewBandwidth( legacy_bandwidth_manager )
self.modules_serialisable.DeleteJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER_LEGACY )
self.modules_serialisable.SetJSONDump( bandwidth_manager )
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'The v425 bandwidth update failed to work! The error has been printed to the log. Please rollback to 424 and let hydev know the details.' )
if version == 425:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'gelbooru 0.2.x gallery page parser', 'e621 file page parser', 'gelbooru 0.2.5 file page parser' ) )
domain_manager.OverwriteDefaultURLClasses( ( 'gelbooru gallery pool page', ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
message = 'You updated from an older version, so some automatic maintenance could not be run. Please run _database->regenerate->tag text search cache (subtags repopulation)_ for all services when you have some time.'
self.pub_initial_message( message )
if version == 426:
try:
self._RegenerateTagDisplayPendingMappingsCache()
except Exception as e:
HydrusData.PrintException( e )
message = 'The v427 pending tags regen routine failed! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
from hydrus.client.gui import ClientGUIShortcuts
try:
shortcut_sets = ClientDefaults.GetDefaultShortcuts()
try:
tags_autocomplete = [ shortcut_set for shortcut_set in shortcut_sets if shortcut_set.GetName() == 'tags_autocomplete' ][0]
except Exception as e:
tags_autocomplete = ClientGUIShortcuts.ShortcutSet( 'tags_autocomplete' )
main_gui = self.modules_serialisable.GetJSONDumpNamed( HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUT_SET, dump_name = 'main_gui' )
shortcuts = main_gui.GetShortcuts( CAC.SIMPLE_SYNCHRONISED_WAIT_SWITCH )
for shortcut in shortcuts:
tags_autocomplete.SetCommand( shortcut, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_SYNCHRONISED_WAIT_SWITCH ) )
main_gui.DeleteShortcut( shortcut )
self.modules_serialisable.SetJSONDump( main_gui )
self.modules_serialisable.SetJSONDump( tags_autocomplete )
except Exception as e:
HydrusData.PrintException( e )
message = 'The v427 shortcut migrate failed! This is not super important, but hydev would be interested in seeing the error that was printed to the log. Check your \'main gui\' shortcuts if you want to set the migrated commands like \'force autocomplete search\'. I will now try to save an empty tag autocomplete shortcut set.'
self.pub_initial_message( message )
tags_autocomplete = ClientGUIShortcuts.ShortcutSet( 'tags_autocomplete' )
self.modules_serialisable.SetJSONDump( tags_autocomplete )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.DissolveParserLink( 'gelbooru gallery favorites page', 'gelbooru 0.2.5 file page parser' )
domain_manager.DissolveParserLink( 'gelbooru gallery page', 'gelbooru 0.2.5 file page parser' )
domain_manager.DissolveParserLink( 'gelbooru gallery pool page', 'gelbooru 0.2.5 file page parser' )
domain_manager.DissolveParserLink( 'gelbooru file page', 'gelbooru 0.2.x gallery page parser' )
#
domain_manager.OverwriteDefaultParsers( ( 'gelbooru 0.2.5 file page parser', ) )
#
domain_manager.OverwriteDefaultURLClasses( ( '420chan thread new format', ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 427:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultGUGs( [
'nitter (.eu mirror) media lookup',
'nitter (.eu mirror) retweets lookup',
'nitter (nixnet mirror) media lookup',
'nitter (nixnet mirror) retweets lookup'
] )
#
domain_manager.OverwriteDefaultURLClasses( [
'nitter (.eu mirror) media timeline',
'nitter (.eu mirror) timeline',
'nitter (.eu mirror) tweet media',
'nitter (.eu mirror) tweet',
'nitter (nixnet mirror) media timeline',
'nitter (nixnet mirror) timeline',
'nitter (nixnet mirror) tweet media',
'nitter (nixnet mirror) tweet'
] )
#
domain_manager.OverwriteDefaultParsers( [
'nitter media parser',
'nitter retweet parser',
'nitter tweet parser',
'nitter tweet parser (video from koto.reisen)'
] )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update nitter mirrors failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 428:
try:
self.modules_hashes_local_cache.CreateInitialTables()
self.modules_hashes_local_cache.CreateInitialIndices()
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'Could not create the new local hashes cache! The error has been printed to the log, please let hydev know!' )
# took out local hash regen here due to later file service splitting, which regens local hash cache anyway
if version == 429:
try:
tag_service_ids = set( self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
file_service_ids.add( self.modules_services.combined_file_service_id )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
subtags_searchable_map_table_name = self.modules_tag_search.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( subtag_id INTEGER PRIMARY KEY, searchable_subtag_id INTEGER );'.format( subtags_searchable_map_table_name ) )
self._CreateIndex( subtags_searchable_map_table_name, [ 'searchable_subtag_id' ] )
self._RegenerateTagCacheSearchableSubtagMaps()
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'The v430 subtag searchable map generation routine failed! The error has been printed to the log, please let hydev know!' )
if version == 430:
try:
# due to a bug in over-eager deletion from the tag definition cache, we'll need to resync chained tag ids
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
message = 'fixing up some desynchronised tag definitions: {}'.format( tag_service_id )
self._controller.frame_splash_status.SetSubtext( message )
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableNames( tag_service_id )
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = ClientDBTagParents.GenerateTagParentsLookupCacheTableNames( tag_service_id )
tag_ids_in_dispute = set()
tag_ids_in_dispute.update( self._STS( self._Execute( 'SELECT DISTINCT bad_tag_id FROM {};'.format( cache_actual_tag_siblings_lookup_table_name ) ) ) )
tag_ids_in_dispute.update( self._STS( self._Execute( 'SELECT ideal_tag_id FROM {};'.format( cache_actual_tag_siblings_lookup_table_name ) ) ) )
tag_ids_in_dispute.update( self._STS( self._Execute( 'SELECT DISTINCT child_tag_id FROM {};'.format( cache_actual_tag_parents_lookup_table_name ) ) ) )
tag_ids_in_dispute.update( self._STS( self._Execute( 'SELECT DISTINCT ancestor_tag_id FROM {};'.format( cache_actual_tag_parents_lookup_table_name ) ) ) )
if len( tag_ids_in_dispute ) > 0:
self._CacheTagsSyncTags( tag_service_id, tag_ids_in_dispute )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to resync some tag definitions failed! Please let hydrus dev know!'
self.pub_initial_message( message )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( [
'8chan.moe thread api parser',
'e621 file page parser'
] )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 431:
try:
new_options = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_OPTIONS )
old_options = self._GetOptions()
SORT_BY_LEXICOGRAPHIC_ASC = 8
SORT_BY_LEXICOGRAPHIC_DESC = 9
SORT_BY_INCIDENCE_ASC = 10
SORT_BY_INCIDENCE_DESC = 11
SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC = 12
SORT_BY_LEXICOGRAPHIC_NAMESPACE_DESC = 13
SORT_BY_INCIDENCE_NAMESPACE_ASC = 14
SORT_BY_INCIDENCE_NAMESPACE_DESC = 15
SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_ASC = 16
SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_DESC = 17
old_default_tag_sort = old_options[ 'default_tag_sort' ]
from hydrus.client.metadata import ClientTagSorting
sort_type = ClientTagSorting.SORT_BY_HUMAN_TAG
if old_default_tag_sort in ( SORT_BY_LEXICOGRAPHIC_ASC, SORT_BY_LEXICOGRAPHIC_DESC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC ):
sort_type = ClientTagSorting.SORT_BY_HUMAN_TAG
elif old_default_tag_sort in ( SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_DESC ):
sort_type = ClientTagSorting.SORT_BY_HUMAN_SUBTAG
elif old_default_tag_sort in ( SORT_BY_INCIDENCE_ASC, SORT_BY_INCIDENCE_DESC, SORT_BY_INCIDENCE_NAMESPACE_ASC, SORT_BY_INCIDENCE_NAMESPACE_DESC ):
sort_type = ClientTagSorting.SORT_BY_COUNT
if old_default_tag_sort in ( SORT_BY_INCIDENCE_ASC, SORT_BY_INCIDENCE_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_ASC, SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC ):
sort_order = CC.SORT_ASC
else:
sort_order = CC.SORT_DESC
use_siblings = True
if old_default_tag_sort in ( SORT_BY_INCIDENCE_NAMESPACE_ASC, SORT_BY_INCIDENCE_NAMESPACE_DESC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_DESC ):
group_by = ClientTagSorting.GROUP_BY_NAMESPACE
else:
group_by = ClientTagSorting.GROUP_BY_NOTHING
tag_sort = ClientTagSorting.TagSort(
sort_type = sort_type,
sort_order = sort_order,
use_siblings = use_siblings,
group_by = group_by
)
new_options.SetDefaultTagSort( tag_sort )
self.modules_serialisable.SetJSONDump( new_options )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to convert your old default tag sort to the new format failed! Please set it again in the options.'
self.pub_initial_message( message )
if version == 432:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultGUGs( [
'twitter syndication profile lookup (limited) (with replies)',
'twitter syndication profile lookup (limited)'
] )
#
domain_manager.OverwriteDefaultURLClasses( [
'twitter syndication api profile',
'twitter syndication api tweet',
'twitter tweet'
] )
#
domain_manager.OverwriteDefaultParsers( [
'twitter syndication api profile parser',
'twitter syndication api tweet parser'
] )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to add the twitter downloader failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 435:
try:
self._RegenerateTagPendingMappingsCache()
types_to_delete = (
HC.SERVICE_INFO_NUM_PENDING_MAPPINGS,
HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS,
HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS,
HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS,
HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS,
HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS,
HC.SERVICE_INFO_NUM_PENDING_FILES,
HC.SERVICE_INFO_NUM_PETITIONED_FILES
)
self._DeleteServiceInfo( types_to_delete = types_to_delete )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to regenerate the pending tag cache failed! This is not a big deal, but you might still have a bad pending count for your pending menu. Error information has been written to the log. Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 436:
result = self._Execute( 'SELECT sql FROM sqlite_master WHERE name = ?;', ( 'deleted_files', ) ).fetchone()
if result is None:
raise Exception( 'No deleted_files table!!!' )
( s, ) = result
if 'timestamp' not in s:
self._Execute( 'ALTER TABLE deleted_files ADD COLUMN timestamp INTEGER;' )
self._Execute( 'ALTER TABLE deleted_files ADD COLUMN original_timestamp INTEGER;' )
self._Execute( 'UPDATE deleted_files SET timestamp = ?, original_timestamp = ?;', ( None, None ) )
my_files_service_id = self.modules_services.GetServiceId( CC.LOCAL_FILE_SERVICE_KEY )
self._Execute( 'INSERT OR IGNORE INTO deleted_files ( service_id, hash_id, timestamp, original_timestamp ) SELECT ?, hash_id, timestamp, original_timestamp FROM deleted_files WHERE service_id = ?;', ( my_files_service_id, self.modules_services.combined_local_file_service_id ) )
self._Execute( 'INSERT OR IGNORE INTO deleted_files ( service_id, hash_id, timestamp, original_timestamp ) SELECT ?, hash_id, ?, timestamp FROM current_files WHERE service_id = ?;', ( my_files_service_id, None, self.modules_services.trash_service_id ) )
self._CreateIndex( 'deleted_files', [ 'timestamp' ] )
self._CreateIndex( 'deleted_files', [ 'original_timestamp' ] )
self._Execute( 'DELETE FROM service_info WHERE info_type = ?;', ( HC.SERVICE_INFO_NUM_DELETED_FILES, ) )
self.modules_db_maintenance.AnalyzeTable( 'deleted_files' )
if version == 438:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultURLClasses( ( 'imgur single media file url', ) )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some url classes failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 440:
try:
old_options = self._GetOptions()
if 'sort_by' in old_options:
old_sort_by = old_options[ 'sort_by' ]
new_options = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_OPTIONS )
default_namespace_sorts = [ ClientMedia.MediaSort( sort_type = ( 'namespaces', ( namespaces, ClientTags.TAG_DISPLAY_ACTUAL ) ) ) for ( gumpf, namespaces ) in old_sort_by ]
new_options.SetDefaultNamespaceSorts( default_namespace_sorts )
self.modules_serialisable.SetJSONDump( new_options )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to migrate the old default namespace sorts failed! Please let hydrus dev know!'
self.pub_initial_message( message )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultURLClasses( ( 'pixiv artist page (new format)', ) )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some url classes failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 441:
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'json_dumps_hashed', ) ).fetchone()
if result is None:
self._controller.frame_splash_status.SetSubtext( 'doing pre-update free space check' )
legacy_dump_type = HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_LEGACY
result = self._Execute( 'SELECT SUM( LENGTH( dump ) ) FROM json_dumps_named WHERE dump_type = ?;', ( legacy_dump_type, ) ).fetchone()
if result is None or result[0] is None:
raise Exception( 'Hey, for the v442 update step, I am supposed to be converting your sessions to a new object, but it did not seem like there were any! I am not sure what is going on, so the update will now be abandoned. Please roll back to v441 and let hydev know!' )
( space_needed, ) = result
space_needed /= 2 # most sessions will have backups and shared pages will save space in the end
try:
HydrusDBBase.CheckHasSpaceForDBTransaction( self._db_dir, space_needed )
except Exception as e:
message = 'Hey, for the v442 update step, I am supposed to be converting your sessions to a new object, but there was a problem. It looks like you have very large sessions, and I do not think you have enough free disk space to perform the conversion safely. If you OK this dialog, it will be attempted anyway, but be warned: you may run out of space mid-update and then have serious problems. I recommend you kill the hydrus process NOW and then free up some space before trying again. Please check the full error:'
message += os.linesep * 2
message += str( e )
BlockingSafeShowMessage( message )
one_worked_ok = False
self._Execute( 'CREATE TABLE IF NOT EXISTS json_dumps_hashed ( hash BLOB_BYTES PRIMARY KEY, dump_type INTEGER, version INTEGER, dump BLOB_BYTES );' )
names_and_timestamps = self._Execute( 'SELECT dump_name, timestamp FROM json_dumps_named WHERE dump_type = ?;', ( legacy_dump_type, ) ).fetchall()
from hydrus.client.gui.pages import ClientGUISessionLegacy
import json
for ( i, ( name, timestamp ) ) in enumerate( names_and_timestamps ):
self._controller.frame_splash_status.SetSubtext( 'converting "{}" "{}"\u2026'.format( name, HydrusData.ConvertTimestampToPrettyTime( timestamp ) ) )
( dump_version, dump ) = self._Execute( 'SELECT version, dump FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? AND timestamp = ?;', ( legacy_dump_type, name, timestamp ) ).fetchone()
try:
if isinstance( dump, bytes ):
dump = str( dump, 'utf-8' )
serialisable_info = json.loads( dump )
legacy_session = HydrusSerialisable.CreateFromSerialisableTuple( ( legacy_dump_type, name, dump_version, serialisable_info ) )
except Exception as e:
HydrusData.PrintException( e, do_wait = False )
try:
timestamp_string = time.strftime( '%Y-%m-%d %H-%M-%S' )
filename = '({}, {}) at {}.json'.format( name, timestamp, timestamp_string )
path = os.path.join( self._db_dir, filename )
with open( path, 'wb' ) as f:
if isinstance( dump, str ):
dump = bytes( dump, 'utf-8', errors = 'replace' )
f.write( dump )
except Exception as e:
pass
message = 'When updating sessions, "{}" at "{}" was non-loadable/convertable! I tried to save a backup of the object to your database directory.'.format( name, HydrusData.ConvertTimestampToPrettyTime( timestamp ) )
HydrusData.Print( message )
self.pub_initial_message( message )
continue
session = ClientGUISessionLegacy.ConvertLegacyToNew( legacy_session )
self.modules_serialisable.SetJSONDump( session, force_timestamp = timestamp )
self._Execute( 'DELETE FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? AND timestamp = ?;', ( legacy_dump_type, name, timestamp ) )
one_worked_ok = True
if not one_worked_ok:
raise Exception( 'When trying to update your sessions to the new format, none of them converted correctly! Rather than send you into an empty and potentially non-functional client, the update is now being abandoned. Please roll back to v441 and let hydev know!' )
self._Execute( 'DELETE FROM json_dumps_named WHERE dump_type = ?;', ( legacy_dump_type, ) )
self._controller.frame_splash_status.SetSubtext( 'session converting finished' )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'yande.re post page parser', 'moebooru file page parser' ) )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some url classes failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 442:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteParserLink( 'yande.re file page', 'yande.re post page parser' )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some url classes failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 446:
result = self._Execute( 'SELECT 1 FROM json_dumps_named WHERE dump_type = ? AND dump_name = ?;', ( 32, 'gelbooru md5' ) ).fetchone()
if result is not None:
try:
self._Execute( 'DELETE FROM json_dumps_named WHERE dump_type = ? AND dump_name = ?;', ( 32, 'gelbooru md5' ) )
script_info = ( 32, 'gelbooru md5', 2, HydrusData.GetNow(), '''["http://gelbooru.com/index.php", 0, 1, [55, 1, [[[4, "hex"]], "some hash bytes"]], "md5", {"s": "list", "page": "post"}, [[30, 6, ["we got sent back to main gallery page -- title test", 8, [27, 7, [[26, 1, [[62, 2, [0, "head", {}, 0, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "title", {}, 0, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], [true, [51, 1, [2, "Image List", null, null, "Image List"]]]]], [30, 6, ["", 0, [27, 7, [[26, 1, [[62, 2, [0, "li", {"class": "tag-type-general"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, 1, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], ""]], [30, 6, ["", 0, [27, 7, [[26, 1, [[62, 2, [0, "li", {"class": "tag-type-copyright"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, 1, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], "series"]], [30, 6, ["", 0, [27, 7, [[26, 1, [[62, 2, [0, "li", {"class": "tag-type-artist"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, 1, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], "creator"]], [30, 6, ["", 0, [27, 7, [[26, 1, [[62, 2, [0, "li", {"class": "tag-type-character"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, 1, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], "character"]], [30, 6, ["we got sent back to main gallery page -- page links exist", 8, [27, 7, [[26, 1, [[62, 2, [0, "div", {"id": "paginator"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 2, "class", [84, 1, [26, 1, []]]]], [true, [51, 1, [3, "", null, null, "pagination"]]]]]]]''' )
self._Execute( 'REPLACE INTO json_dumps_named VALUES ( ?, ?, ?, ?, ? );', script_info )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update gelbooru file lookup script failed! Please let hydrus dev know!'
self.pub_initial_message( message )
#
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'current_files', ) ).fetchone()
if result is not None:
try:
service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for ( i, service_id ) in enumerate( service_ids ):
self._controller.frame_splash_status.SetSubtext( 'reorganising file storage {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, len( service_ids ) ) ) )
self.modules_files_storage.GenerateFilesTables( service_id )
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = ClientDBFilesStorage.GenerateFilesTableNames( service_id )
self._Execute( 'INSERT INTO {} ( hash_id, timestamp ) SELECT hash_id, timestamp FROM current_files WHERE service_id = ?;'.format( current_files_table_name ), ( service_id, ) )
self._Execute( 'INSERT INTO {} ( hash_id, timestamp, original_timestamp ) SELECT hash_id, timestamp, original_timestamp FROM deleted_files WHERE service_id = ?;'.format( deleted_files_table_name ), ( service_id, ) )
self._Execute( 'INSERT INTO {} ( hash_id ) SELECT hash_id FROM file_transfers WHERE service_id = ?;'.format( pending_files_table_name ), ( service_id, ) )
self._Execute( 'INSERT INTO {} ( hash_id, reason_id ) SELECT hash_id, reason_id FROM file_petitions WHERE service_id = ?;'.format( petitioned_files_table_name ), ( service_id, ) )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._Execute( 'DROP TABLE current_files;' )
self._Execute( 'DROP TABLE deleted_files;' )
self._Execute( 'DROP TABLE file_transfers;' )
self._Execute( 'DROP TABLE file_petitions;' )
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'Unfortunately, hydrus was unable to update your file storage to the new system! The error has been written to your log, please roll back to v446 and let hydev know!' )
#
self.modules_hashes_local_cache.Repopulate()
if version == 447:
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling PSD files for thumbnail regen' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.APPLICATION_PSD, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule PSD files for thumbnail generation failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 448:
self._controller.frame_splash_status.SetSubtext( 'updating repository update storage' )
for service_id in self.modules_services.GetServiceIds( HC.REPOSITORIES ):
service_type = self.modules_services.GetService( service_id ).GetServiceType()
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = ClientDBRepositories.GenerateRepositoryUpdatesTableNames( service_id )
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( repository_unregistered_updates_table_name, ) ).fetchone()
if result is not None:
continue
all_data = self._Execute( 'SELECT update_index, hash_id, processed FROM {};'.format( repository_updates_table_name ) ).fetchall()
self._Execute( 'DROP TABLE {};'.format( repository_updates_table_name ) )
#
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( update_index INTEGER, hash_id INTEGER, PRIMARY KEY ( update_index, hash_id ) );'.format( repository_updates_table_name ) )
self._CreateIndex( repository_updates_table_name, [ 'hash_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );'.format( repository_unregistered_updates_table_name ) )
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, content_type INTEGER, processed INTEGER_BOOLEAN, PRIMARY KEY ( hash_id, content_type ) );'.format( repository_updates_processed_table_name ) )
self._CreateIndex( repository_updates_processed_table_name, [ 'content_type' ] )
#
for ( update_index, hash_id, processed ) in all_data:
self._Execute( 'INSERT OR IGNORE INTO {} ( update_index, hash_id ) VALUES ( ?, ? );'.format( repository_updates_table_name ), ( update_index, hash_id ) )
try:
mime = self.modules_files_metadata_basic.GetMime( hash_id )
except HydrusExceptions.DataMissing:
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( repository_unregistered_updates_table_name ), ( hash_id, ) )
continue
if mime == HC.APPLICATION_HYDRUS_UPDATE_DEFINITIONS:
content_types = ( HC.CONTENT_TYPE_DEFINITIONS, )
else:
if service_type == HC.FILE_REPOSITORY:
content_types = ( HC.CONTENT_TYPE_FILES, )
else:
content_types = ( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_TYPE_TAG_SIBLINGS )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id, content_type, processed ) VALUES ( ?, ?, ? );'.format( repository_updates_processed_table_name ), ( ( hash_id, content_type, processed ) for content_type in content_types ) )
self.modules_repositories.DoOutstandingUpdateRegistration()
self._controller.frame_splash_status.SetSubtext( 'resetting siblings and parents' )
for service in self.modules_services.GetServices( ( HC.TAG_REPOSITORY, ) ):
service_key = service.GetServiceKey()
self._ResetRepositoryProcessing( service_key, ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_TYPE_TAG_SIBLINGS ) )
if version == 450:
result = self._c.execute( 'SELECT 1 FROM external_caches.sqlite_master WHERE name = ?;', ( 'shape_perceptual_hashes', ) ).fetchone()
if result is not None:
self._controller.frame_splash_status.SetSubtext( 'moving some similar file data around' )
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.shape_perceptual_hashes ( phash_id INTEGER PRIMARY KEY, phash BLOB_BYTES UNIQUE );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.shape_perceptual_hash_map ( phash_id INTEGER, hash_id INTEGER, PRIMARY KEY ( phash_id, hash_id ) );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS shape_search_cache ( hash_id INTEGER PRIMARY KEY, searched_distance INTEGER );' )
self._Execute( 'INSERT OR IGNORE INTO external_master.shape_perceptual_hashes SELECT phash_id, phash FROM external_caches.shape_perceptual_hashes;' )
self._Execute( 'INSERT OR IGNORE INTO external_master.shape_perceptual_hash_map SELECT phash_id, hash_id FROM external_caches.shape_perceptual_hash_map;' )
self._Execute( 'INSERT OR IGNORE INTO main.shape_search_cache SELECT hash_id, searched_distance FROM external_caches.shape_search_cache;' )
self._Execute( 'DROP TABLE external_caches.shape_perceptual_hashes;' )
self._Execute( 'DROP TABLE external_caches.shape_perceptual_hash_map;' )
self._Execute( 'DROP TABLE external_caches.shape_search_cache;' )
self._CreateIndex( 'external_master.shape_perceptual_hash_map', [ 'hash_id' ] )
self.modules_db_maintenance.TouchAnalyzeNewTables()
if version == 451:
self.modules_services.combined_file_service_id = self.modules_services.GetServiceId( CC.COMBINED_FILE_SERVICE_KEY )
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if file_service_id == self.modules_services.combined_file_service_id:
self._controller.frame_splash_status.SetText( 'working on combined tags cache - {}'.format( tag_service_id ) )
else:
self._controller.frame_splash_status.SetText( 'working on specific tags cache - {} {}'.format( file_service_id, tag_service_id ) )
tags_table_name = self.modules_tag_search.GetTagsTableName( file_service_id, tag_service_id )
integer_subtags_table_name = self.modules_tag_search.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
query = 'SELECT subtag_id FROM {};'.format( tags_table_name )
BLOCK_SIZE = 10000
for ( group_of_subtag_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, query, BLOCK_SIZE ):
message = HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
self._controller.frame_splash_status.SetSubtext( message )
with self._MakeTemporaryIntegerTable( group_of_subtag_ids, 'subtag_id' ) as temp_subtag_ids_table_name:
# temp subtag_ids to subtags
subtag_ids_and_subtags = self._Execute( 'SELECT subtag_id, subtag FROM {} CROSS JOIN subtags USING ( subtag_id );'.format( temp_subtag_ids_table_name ) ).fetchall()
for ( subtag_id, subtag ) in subtag_ids_and_subtags:
if subtag.isdecimal():
try:
integer_subtag = int( subtag )
if ClientDBTagSearch.CanCacheInteger( integer_subtag ):
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id, integer_subtag ) VALUES ( ?, ? );'.format( integer_subtags_table_name ), ( subtag_id, integer_subtag ) )
except ValueError:
pass
if version == 452:
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
suffix = '{}_{}'.format( file_service_id, tag_service_id )
cache_files_table_name = 'external_caches.specific_files_cache_{}'.format( suffix )
result = self._Execute( 'SELECT 1 FROM external_caches.sqlite_master WHERE name = ?;', ( cache_files_table_name.split( '.', 1 )[1], ) ).fetchone()
if result is None:
continue
self._controller.frame_splash_status.SetText( 'filling holes in specific tags cache - {} {}'.format( file_service_id, tag_service_id ) )
# it turns out cache_files_table_name was not being populated on service creation/reset, so files imported before a tag service was created were not being stored in specific mapping cache data!
# furthermore, there was confusion whether cache_files_table_name was for mappings (files that have tags) on the tag service or just files on the file service.
# since we now store current files for each file service on a separate table, and the clever mappings intepretation seems expensive and not actually so useful, we are moving to our nice table instead in various joins/filters/etc...
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( file_service_id, HC.CONTENT_STATUS_CURRENT )
query = 'SELECT hash_id FROM {} EXCEPT SELECT hash_id FROM {};'.format( current_files_table_name, cache_files_table_name )
BLOCK_SIZE = 10000
for ( group_of_hash_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, query, BLOCK_SIZE ):
message = HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
self._controller.frame_splash_status.SetSubtext( message )
with self._MakeTemporaryIntegerTable( group_of_hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, group_of_hash_ids, temp_hash_ids_table_name )
self.modules_mappings_cache_specific_display.AddFiles( file_service_id, tag_service_id, group_of_hash_ids, temp_hash_ids_table_name )
self._Execute( 'DROP TABLE {};'.format( cache_files_table_name ) )
if version == 459:
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling clip and apng files for regen' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.APPLICATION_CLIP, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.IMAGE_APNG, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule clip and apng files for maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 460:
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling clip files for regen' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.APPLICATION_CLIP, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE duration > ? AND size < ? AND width >= ? AND height >= ?;'.format( table_join ), ( 3600 * 1000, 64 * 1048576, 480, 360 ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule clip files for maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 461:
try:
num_rating_services = len( self.modules_services.GetServiceIds( HC.RATINGS_SERVICES ) )
if num_rating_services == 0:
def ask_what_to_do_ratings_service():
message = 'New clients now start with a simple like/dislike rating service. You are not new, but you have no rating services--would you like to get this default now and try ratings out?'
from hydrus.client.gui import ClientGUIDialogsQuick
result = ClientGUIDialogsQuick.GetYesNo( None, message, title = 'Get rating service?' )
return result == QW.QDialog.Accepted
add_favourites = self._controller.CallBlockingToQt( None, ask_what_to_do_ratings_service )
if add_favourites:
( service_key, service_type, name ) = ( CC.DEFAULT_FAVOURITES_RATING_SERVICE_KEY, HC.LOCAL_RATING_LIKE, 'favourites' )
dictionary = ClientServices.GenerateDefaultServiceDictionary( service_type )
from hydrus.client.metadata import ClientRatings
dictionary[ 'shape' ] = ClientRatings.STAR
like_colours = {}
like_colours[ ClientRatings.LIKE ] = ( ( 0, 0, 0 ), ( 240, 240, 65 ) )
like_colours[ ClientRatings.DISLIKE ] = ( ( 0, 0, 0 ), ( 200, 80, 120 ) )
like_colours[ ClientRatings.NULL ] = ( ( 0, 0, 0 ), ( 191, 191, 191 ) )
like_colours[ ClientRatings.MIXED ] = ( ( 0, 0, 0 ), ( 95, 95, 95 ) )
dictionary[ 'colours' ] = list( like_colours.items() )
self._AddService( service_key, service_type, name, dictionary )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to add a default favourites service failed. Please let hydrus dev know!'
self.pub_initial_message( message )
#
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'pixiv artist gallery page api parser new urls' ) )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some downloader objects failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 462:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultGUGs( ( 'deviant art tag search', ) )
domain_manager.OverwriteDefaultParsers( ( 'deviant gallery page api parser (new cursor)', ) )
domain_manager.OverwriteDefaultURLClasses( ( 'deviant art tag gallery page api (cursor navigation)', ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling ogg files for regen' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.AUDIO_OGG, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule ogg files for maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 463:
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'deferred_physical_file_deletes', ) ).fetchone()
if result is None:
self._Execute( 'CREATE TABLE IF NOT EXISTS deferred_physical_file_deletes ( hash_id INTEGER PRIMARY KEY );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS deferred_physical_thumbnail_deletes ( hash_id INTEGER PRIMARY KEY );' )
if version == 464:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'gelbooru 0.2.x gallery page parser', ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
#
result = self.modules_services.GetServiceIds( ( HC.COMBINED_DELETED_FILE, ) )
if len( result ) == 0:
self._controller.frame_splash_status.SetText( 'creating new tag search data' )
dictionary = ClientServices.GenerateDefaultServiceDictionary( HC.COMBINED_DELETED_FILE )
self._AddService( CC.COMBINED_DELETED_FILE_SERVICE_KEY, HC.COMBINED_DELETED_FILE, 'all deleted files', dictionary )
#
# populate combined deleted files current files table
self.modules_files_storage.DropFilesTables( self.modules_services.combined_deleted_file_service_id )
self.modules_files_storage.GenerateFilesTables( self.modules_services.combined_deleted_file_service_id )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
# this should make them empty, since no files yet
self.modules_tag_search.Drop( self.modules_services.combined_deleted_file_service_id, tag_service_id )
self.modules_tag_search.Generate( self.modules_services.combined_deleted_file_service_id, tag_service_id )
self._CacheSpecificMappingsDrop( self.modules_services.combined_deleted_file_service_id, tag_service_id )
self._CacheSpecificMappingsGenerate( self.modules_services.combined_deleted_file_service_id, tag_service_id )
combined_deleted_files_current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_deleted_file_service_id, HC.CONTENT_STATUS_CURRENT )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_COVERED_BY_COMBINED_DELETED_FILE )
for ( i, file_service_id ) in enumerate( file_service_ids ):
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( file_service_id, HC.CONTENT_STATUS_DELETED )
for ( chunk_of_hash_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, 'SELECT hash_id FROM {};'.format( deleted_files_table_name ), 1024 ):
message = 'deleted files cache: service {}, done {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, len( file_service_ids ) ), HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ) )
self._controller.frame_splash_status.SetSubtext( message )
for hash_id in chunk_of_hash_ids:
row = self._Execute( 'SELECT hash_id, timestamp FROM {} WHERE hash_id = ?;'.format( deleted_files_table_name ), ( hash_id, ) ).fetchone()
existing_row = self._Execute( 'SELECT hash_id, timestamp FROM {} WHERE hash_id = ?;'.format( combined_deleted_files_current_files_table_name ), ( hash_id, ) ).fetchone()
if existing_row is None:
rows = [ row ]
# this should now populate the tag caches and search cache
self._AddFiles( self.modules_services.combined_deleted_file_service_id, rows )
else:
# it doesn't really matter, but let's try to have the earliest timestamp here to start with, since that'll be roughly 'natural' going forwards
if row[1] is not None and ( existing_row[1] is None or row[1] < existing_row[1] ):
self._Execute( 'UPDATE {} SET timestamp = ? WHERE hash_id = ?;'.format( combined_deleted_files_current_files_table_name ), ( row[1], hash_id ) )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
#
# ipfs is also getting specific caches and tag search too, so we'll do that here
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
file_service_ids = self.modules_services.GetServiceIds( ( HC.IPFS, ) )
for file_service_id in file_service_ids:
hash_ids = self.modules_files_storage.GetCurrentHashIdsList( file_service_id )
for tag_service_id in tag_service_ids:
time.sleep( 0.01 )
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheSpecificMappingsDrop( file_service_id, tag_service_id )
self._CacheSpecificMappingsCreateTables( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.Generate( file_service_id, tag_service_id, populate_from_storage = False )
BLOCK_SIZE = 1000
for ( i, block_of_hash_ids ) in enumerate( HydrusData.SplitListIntoChunks( hash_ids, BLOCK_SIZE ) ):
with self._MakeTemporaryIntegerTable( block_of_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
message = 'ipfs: {}_{} - {}'.format( file_service_id, tag_service_id, HydrusData.ConvertValueRangeToPrettyString( i * BLOCK_SIZE, len( hash_ids ) ) )
self._controller.frame_splash_status.SetSubtext( message )
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, block_of_hash_ids, temp_hash_id_table_name )
self.modules_mappings_cache_specific_display.AddFiles( file_service_id, tag_service_id, block_of_hash_ids, temp_hash_id_table_name )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
#
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'has_icc_profile', ) ).fetchone()
if result is None:
try:
self._Execute( 'CREATE TABLE IF NOT EXISTS has_icc_profile ( hash_id INTEGER PRIMARY KEY );' )
self._controller.frame_splash_status.SetSubtext( 'scheduling files for icc profile scan' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime IN {};'.format( table_join, HydrusData.SplayListForDB( HC.FILES_THAT_CAN_HAVE_ICC_PROFILE ) ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_HAS_ICC_PROFILE )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule image files for icc maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
#
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'pixel_hash_map', ) ).fetchone()
if result is None:
try:
self._Execute( 'CREATE TABLE IF NOT EXISTS pixel_hash_map ( hash_id INTEGER, pixel_hash_id INTEGER, PRIMARY KEY ( hash_id, pixel_hash_id ) );' )
self._CreateIndex( 'pixel_hash_map', [ 'pixel_hash_id' ] )
self._controller.frame_splash_status.SetSubtext( 'scheduling files for pixel hash generation' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime IN {};'.format( table_join, HydrusData.SplayListForDB( HC.FILES_THAT_CAN_HAVE_PIXEL_HASH ) ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_PIXEL_HASH )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule image files for pixel hash maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 467:
try:
self._controller.frame_splash_status.SetSubtext( 'fixing a pixel duplicates storage problem' )
bad_ids = self._STS( self._Execute( 'SELECT hash_id FROM pixel_hash_map WHERE hash_id = pixel_hash_id;' ) )
self.modules_files_maintenance_queue.AddJobs( bad_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_PIXEL_HASH )
self._Execute( 'DELETE FROM pixel_hash_map WHERE hash_id = pixel_hash_id;' )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule image files for pixel hash maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 469:
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling video for better silent audio track check' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime IN {} AND has_audio = ?;'.format( table_join, HydrusData.SplayListForDB( HC.VIDEO ) ), ( True, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule audible video files for audio track recheck failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 470:
( result, ) = self._Execute( 'SELECT sql FROM sqlite_master WHERE name = ?;', ( 'file_viewing_stats', ) ).fetchone()
if 'preview_views' in result:
self._controller.frame_splash_status.SetSubtext( 'reworking file viewing stats' )
self._Execute( 'ALTER TABLE file_viewing_stats RENAME TO file_viewing_stats_old;' )
self._Execute( 'CREATE TABLE IF NOT EXISTS file_viewing_stats ( hash_id INTEGER, canvas_type INTEGER, last_viewed_timestamp INTEGER, views INTEGER, viewtime INTEGER, PRIMARY KEY ( hash_id, canvas_type ) );' )
self._CreateIndex( 'file_viewing_stats', [ 'last_viewed_timestamp' ] )
self._CreateIndex( 'file_viewing_stats', [ 'views' ] )
self._CreateIndex( 'file_viewing_stats', [ 'viewtime' ] )
self._Execute( 'INSERT INTO file_viewing_stats SELECT hash_id, ?, ?, preview_views, preview_viewtime FROM file_viewing_stats_old;', ( CC.CANVAS_PREVIEW, None ) )
self._Execute( 'INSERT INTO file_viewing_stats SELECT hash_id, ?, ?, media_views, media_viewtime FROM file_viewing_stats_old;', ( CC.CANVAS_MEDIA_VIEWER, None ) )
self.modules_db_maintenance.AnalyzeTable( 'file_viewing_stats' )
self._Execute( 'DROP TABLE file_viewing_stats_old;' )
if version == 472:
try:
from hydrus.client.gui import ClientGUIShortcuts
main_gui = self.modules_serialisable.GetJSONDumpNamed( HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUT_SET, dump_name = 'main_gui' )
palette_shortcut = ClientGUIShortcuts.Shortcut( ClientGUIShortcuts.SHORTCUT_TYPE_KEYBOARD_CHARACTER, ord( 'P' ), ClientGUIShortcuts.SHORTCUT_PRESS_TYPE_PRESS, [ ClientGUIShortcuts.SHORTCUT_MODIFIER_CTRL ] )
palette_command = CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_OPEN_COMMAND_PALETTE )
result = main_gui.GetCommand( palette_shortcut )
if result is None:
main_gui.SetCommand( palette_shortcut, palette_command )
self.modules_serialisable.SetJSONDump( main_gui )
except Exception as e:
HydrusData.PrintException( e )
message = 'The new palette shortcut failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
if version == 473:
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'archive_timestamps', ) ).fetchone()
if result is None:
self._Execute( 'CREATE TABLE IF NOT EXISTS archive_timestamps ( hash_id INTEGER PRIMARY KEY, archived_timestamp INTEGER );' )
self._CreateIndex( 'archive_timestamps', [ 'archived_timestamp' ] )
try:
location_context = ClientLocation.LocationContext( current_service_keys = ( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, ) )
db_location_context = self.modules_files_storage.GetDBLocationContext( location_context )
operator = '>'
num_relationships = 0
dupe_type = HC.DUPLICATE_POTENTIAL
dupe_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, operator, num_relationships, dupe_type )
with self._MakeTemporaryIntegerTable( dupe_hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN files_info USING ( hash_id ) WHERE mime IN {};'.format( temp_hash_ids_table_name, HydrusData.SplayListForDB( ( HC.IMAGE_GIF, HC.IMAGE_PNG, HC.IMAGE_TIFF ) ) ), ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_PIXEL_HASH )
except Exception as e:
HydrusData.PrintException( e )
message = 'Some pixel hash regen scheduling failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
if version == 474:
try:
# ok we have improved apng detection now, so let's efficiently guess which of our pngs could be apngs for rescan
# IRL data of some 2-frame (i.e. minimal inaccuracy) apngs: 1.16MB @ 908x1,214 and 397KB @ 500x636, which for a single frame calculation is bitrates of 1.08 bits/pixel and 1.28 bits/pixel
# most apngs are going to be above this fake 1-frame bitrate
# as an aside, IRL data of some chunky pngs give about 2.5 bits/pixel, efficient screenshots and monochome tend to be around 0.2
# real apngs divided by number of frames tend to be around 0.05 to 0.2 to 1.0
# so, let's pull all the pngs with bitrate over 0.85 and schedule them for rescan
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ? AND size / ( width * height ) > ?;'.format( table_join ), ( HC.IMAGE_PNG, 0.85 ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Some apng regen scheduling failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
try:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.AUDIO_M4A, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Some mp4 regen scheduling failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'deviant art file extended_fetch parser', ) )
#
from hydrus.client.networking import ClientNetworkingContexts
sank_network_context = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, 'sankakucomplex.com' )
network_contexts_to_custom_header_dicts = domain_manager.GetNetworkContextsToCustomHeaderDicts()
if sank_network_context in network_contexts_to_custom_header_dicts:
custom_header_dict = network_contexts_to_custom_header_dicts[ sank_network_context ]
if 'User-Agent' in custom_header_dict:
( header, verified, reason ) = custom_header_dict[ 'User-Agent' ]
if header == 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0':
header = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
custom_header_dict[ 'User-Agent' ] = ( header, verified, reason )
domain_manager.SetNetworkContextsToCustomHeaderDicts( network_contexts_to_custom_header_dicts )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 475:
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'file_domain_modified_timestamps', ) ).fetchone()
if result is None:
self._Execute( 'CREATE TABLE IF NOT EXISTS file_domain_modified_timestamps ( hash_id INTEGER, domain_id INTEGER, file_modified_timestamp INTEGER, PRIMARY KEY ( hash_id, domain_id ) );' )
self._CreateIndex( 'file_domain_modified_timestamps', [ 'file_modified_timestamp' ] )
if version == 476:
try:
# fixed apng duration calculation
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.IMAGE_APNG, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Some apng regen scheduling failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'nitter tweet parser', 'nitter tweet parser (video from koto.reisen)' ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
self._controller.frame_splash_status.SetTitleText( 'updated db to v{}'.format( HydrusData.ToHumanInt( version + 1 ) ) )
self._Execute( 'UPDATE version SET version = ?;', ( version + 1, ) )
def _UpdateMappings( self, tag_service_id, mappings_ids = None, deleted_mappings_ids = None, pending_mappings_ids = None, pending_rescinded_mappings_ids = None, petitioned_mappings_ids = None, petitioned_rescinded_mappings_ids = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
if mappings_ids is None: mappings_ids = []
if deleted_mappings_ids is None: deleted_mappings_ids = []
if pending_mappings_ids is None: pending_mappings_ids = []
if pending_rescinded_mappings_ids is None: pending_rescinded_mappings_ids = []
if petitioned_mappings_ids is None: petitioned_mappings_ids = []
if petitioned_rescinded_mappings_ids is None: petitioned_rescinded_mappings_ids = []
mappings_ids = self._FilterExistingUpdateMappings( tag_service_id, mappings_ids, HC.CONTENT_UPDATE_ADD )
deleted_mappings_ids = self._FilterExistingUpdateMappings( tag_service_id, deleted_mappings_ids, HC.CONTENT_UPDATE_DELETE )
pending_mappings_ids = self._FilterExistingUpdateMappings( tag_service_id, pending_mappings_ids, HC.CONTENT_UPDATE_PEND )
pending_rescinded_mappings_ids = self._FilterExistingUpdateMappings( tag_service_id, pending_rescinded_mappings_ids, HC.CONTENT_UPDATE_RESCIND_PEND )
tag_ids_to_filter_chained = { tag_id for ( tag_id, hash_ids ) in itertools.chain.from_iterable( ( mappings_ids, deleted_mappings_ids, pending_mappings_ids, pending_rescinded_mappings_ids ) ) }
chained_tag_ids = self.modules_tag_display.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids_to_filter_chained )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
change_in_num_mappings = 0
change_in_num_deleted_mappings = 0
change_in_num_pending_mappings = 0
change_in_num_petitioned_mappings = 0
change_in_num_files = 0
hash_ids_lists = ( hash_ids for ( tag_id, hash_ids ) in itertools.chain.from_iterable( ( mappings_ids, pending_mappings_ids ) ) )
hash_ids_being_added = { hash_id for hash_id in itertools.chain.from_iterable( hash_ids_lists ) }
hash_ids_lists = ( hash_ids for ( tag_id, hash_ids ) in itertools.chain.from_iterable( ( deleted_mappings_ids, pending_rescinded_mappings_ids ) ) )
hash_ids_being_removed = { hash_id for hash_id in itertools.chain.from_iterable( hash_ids_lists ) }
hash_ids_being_altered = hash_ids_being_added.union( hash_ids_being_removed )
filtered_hashes_generator = self._CacheSpecificMappingsGetFilteredHashesGenerator( file_service_ids, tag_service_id, hash_ids_being_altered )
self._Execute( 'CREATE TABLE IF NOT EXISTS mem.temp_hash_ids ( hash_id INTEGER );' )
self._ExecuteMany( 'INSERT INTO temp_hash_ids ( hash_id ) VALUES ( ? );', ( ( hash_id, ) for hash_id in hash_ids_being_altered ) )
pre_existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM temp_hash_ids WHERE EXISTS ( SELECT 1 FROM {} WHERE hash_id = temp_hash_ids.hash_id );'.format( current_mappings_table_name ) ) )
num_files_added = len( hash_ids_being_added.difference( pre_existing_hash_ids ) )
change_in_num_files += num_files_added
# BIG NOTE:
# after testing some situations, it makes nicest logical sense to interleave all cache updates into the loops
# otherwise, when there are conflicts due to sheer duplication or the display system applying two tags at once with the same implications, we end up relying on an out-of-date/unsynced (in cache terms) specific cache for combined etc...
# I now extend this to counts, argh. this is not great in overhead terms, but many optimisations rely on a/c counts now, and the fallback is the combined storage ac count cache
if len( mappings_ids ) > 0:
for ( tag_id, hash_ids ) in mappings_ids:
if tag_id in chained_tag_ids:
self._CacheCombinedFilesDisplayMappingsAddMappingsForChained( tag_service_id, tag_id, hash_ids )
self._ExecuteMany( 'DELETE FROM ' + deleted_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_deleted_deleted = self._GetRowCount()
self._ExecuteMany( 'DELETE FROM ' + pending_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_pending_deleted = self._GetRowCount()
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + current_mappings_table_name + ' VALUES ( ?, ? );', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_current_inserted = self._GetRowCount()
change_in_num_deleted_mappings -= num_deleted_deleted
change_in_num_pending_mappings -= num_pending_deleted
change_in_num_mappings += num_current_inserted
self.modules_mappings_counts_update.UpdateCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, num_current_inserted, - num_pending_deleted ) ] )
if tag_id not in chained_tag_ids:
self.modules_mappings_counts_update.UpdateCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, num_current_inserted, - num_pending_deleted ) ] )
self._CacheSpecificMappingsAddMappings( tag_service_id, tag_id, hash_ids, filtered_hashes_generator )
if len( deleted_mappings_ids ) > 0:
for ( tag_id, hash_ids ) in deleted_mappings_ids:
if tag_id in chained_tag_ids:
self._CacheCombinedFilesDisplayMappingsDeleteMappingsForChained( tag_service_id, tag_id, hash_ids )
self._ExecuteMany( 'DELETE FROM ' + current_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_current_deleted = self._GetRowCount()
self._ExecuteMany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_petitions_deleted = self._GetRowCount()
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + deleted_mappings_table_name + ' VALUES ( ?, ? );', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_deleted_inserted = self._GetRowCount()
change_in_num_mappings -= num_current_deleted
change_in_num_petitioned_mappings -= num_petitions_deleted
change_in_num_deleted_mappings += num_deleted_inserted
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, num_current_deleted, 0 ) ] )
if tag_id not in chained_tag_ids:
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, num_current_deleted, 0 ) ] )
self._CacheSpecificMappingsDeleteMappings( tag_service_id, tag_id, hash_ids, filtered_hashes_generator )
if len( pending_mappings_ids ) > 0:
for ( tag_id, hash_ids ) in pending_mappings_ids:
if tag_id in chained_tag_ids:
self._CacheCombinedFilesDisplayMappingsPendMappingsForChained( tag_service_id, tag_id, hash_ids )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + pending_mappings_table_name + ' VALUES ( ?, ? );', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_pending_inserted = self._GetRowCount()
change_in_num_pending_mappings += num_pending_inserted
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, 0, num_pending_inserted ) ] )
if tag_id not in chained_tag_ids:
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, 0, num_pending_inserted ) ] )
self._CacheSpecificMappingsPendMappings( tag_service_id, tag_id, hash_ids, filtered_hashes_generator )
if len( pending_rescinded_mappings_ids ) > 0:
for ( tag_id, hash_ids ) in pending_rescinded_mappings_ids:
if tag_id in chained_tag_ids:
self._CacheCombinedFilesDisplayMappingsRescindPendingMappingsForChained( tag_service_id, tag_id, hash_ids )
self._ExecuteMany( 'DELETE FROM ' + pending_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_pending_deleted = self._GetRowCount()
change_in_num_pending_mappings -= num_pending_deleted
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, 0, num_pending_deleted ) ] )
if tag_id not in chained_tag_ids:
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, 0, num_pending_deleted ) ] )
self._CacheSpecificMappingsRescindPendingMappings( tag_service_id, tag_id, hash_ids, filtered_hashes_generator )
#
post_existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM temp_hash_ids WHERE EXISTS ( SELECT 1 FROM {} WHERE hash_id = temp_hash_ids.hash_id );'.format( current_mappings_table_name ) ) )
self._Execute( 'DROP TABLE temp_hash_ids;' )
num_files_removed = len( pre_existing_hash_ids.intersection( hash_ids_being_removed ).difference( post_existing_hash_ids ) )
change_in_num_files -= num_files_removed
for ( tag_id, hash_ids, reason_id ) in petitioned_mappings_ids:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + petitioned_mappings_table_name + ' VALUES ( ?, ?, ? );', [ ( tag_id, hash_id, reason_id ) for hash_id in hash_ids ] )
num_petitions_inserted = self._GetRowCount()
change_in_num_petitioned_mappings += num_petitions_inserted
for ( tag_id, hash_ids ) in petitioned_rescinded_mappings_ids:
self._ExecuteMany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_petitions_deleted = self._GetRowCount()
change_in_num_petitioned_mappings -= num_petitions_deleted
service_info_updates = []
if change_in_num_mappings != 0: service_info_updates.append( ( change_in_num_mappings, tag_service_id, HC.SERVICE_INFO_NUM_MAPPINGS ) )
if change_in_num_deleted_mappings != 0: service_info_updates.append( ( change_in_num_deleted_mappings, tag_service_id, HC.SERVICE_INFO_NUM_DELETED_MAPPINGS ) )
if change_in_num_pending_mappings != 0: service_info_updates.append( ( change_in_num_pending_mappings, tag_service_id, HC.SERVICE_INFO_NUM_PENDING_MAPPINGS ) )
if change_in_num_petitioned_mappings != 0: service_info_updates.append( ( change_in_num_petitioned_mappings, tag_service_id, HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS ) )
if change_in_num_files != 0: service_info_updates.append( ( change_in_num_files, tag_service_id, HC.SERVICE_INFO_NUM_FILES ) )
if len( service_info_updates ) > 0: self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', service_info_updates )
def _UpdateServerServices( self, admin_service_key, serverside_services, service_keys_to_access_keys, deletee_service_keys ):
admin_service_id = self.modules_services.GetServiceId( admin_service_key )
admin_service = self.modules_services.GetService( admin_service_id )
admin_credentials = admin_service.GetCredentials()
( host, admin_port ) = admin_credentials.GetAddress()
#
current_service_keys = self.modules_services.GetServiceKeys()
for serverside_service in serverside_services:
service_key = serverside_service.GetServiceKey()
if service_key in current_service_keys:
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
credentials = service.GetCredentials()
upnp_port = serverside_service.GetUPnPPort()
if upnp_port is None:
port = serverside_service.GetPort()
credentials.SetAddress( host, port )
else:
credentials.SetAddress( host, upnp_port )
service.SetCredentials( credentials )
self.modules_services.UpdateService( service )
else:
if service_key in service_keys_to_access_keys:
service_type = serverside_service.GetServiceType()
name = serverside_service.GetName()
service = ClientServices.GenerateService( service_key, service_type, name )
access_key = service_keys_to_access_keys[ service_key ]
credentials = service.GetCredentials()
upnp_port = serverside_service.GetUPnPPort()
if upnp_port is None:
port = serverside_service.GetPort()
credentials.SetAddress( host, port )
else:
credentials.SetAddress( host, upnp_port )
credentials.SetAccessKey( access_key )
service.SetCredentials( credentials )
( service_key, service_type, name, dictionary ) = service.ToTuple()
self._AddService( service_key, service_type, name, dictionary )
for service_key in deletee_service_keys:
try:
self.modules_services.GetServiceId( service_key )
except HydrusExceptions.DataMissing:
continue
self._DeleteService( service_id )
self._cursor_transaction_wrapper.pub_after_job( 'notify_account_sync_due' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_data' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_gui' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
def _UpdateServices( self, services ):
current_service_keys = self.modules_services.GetServiceKeys()
future_service_keys = { service.GetServiceKey() for service in services }
for service_key in current_service_keys:
if service_key not in future_service_keys:
service_id = self.modules_services.GetServiceId( service_key )
self._DeleteService( service_id )
for service in services:
service_key = service.GetServiceKey()
if service_key in current_service_keys:
self.modules_services.UpdateService( service )
else:
( service_key, service_type, name, dictionary ) = service.ToTuple()
self._AddService( service_key, service_type, name, dictionary )
self._cursor_transaction_wrapper.pub_after_job( 'notify_account_sync_due' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_data' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_gui' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
def _Vacuum( self, names: typing.Collection[ str ], maintenance_mode = HC.MAINTENANCE_FORCED, stop_time = None, force_vacuum = False ):
ok_names = []
for name in names:
db_path = os.path.join( self._db_dir, self._db_filenames[ name ] )
try:
HydrusDB.CheckCanVacuumCursor( db_path, self._c )
except Exception as e:
if not self._have_printed_a_cannot_vacuum_message:
HydrusData.Print( 'Cannot vacuum "{}": {}'.format( db_path, e ) )
self._have_printed_a_cannot_vacuum_message = True
continue
if self._controller.ShouldStopThisWork( maintenance_mode, stop_time = stop_time ):
return
ok_names.append( name )
if len( ok_names ) == 0:
HydrusData.ShowText( 'A call to vacuum was made, but none of those databases could be vacuumed! Maybe drive free space is tight and/or recently changed?' )
return
job_key_pubbed = False
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'database maintenance - vacuum' )
self._CloseDBConnection()
try:
for name in ok_names:
time.sleep( 1 )
try:
db_path = os.path.join( self._db_dir, self._db_filenames[ name ] )
if not job_key_pubbed:
self._controller.pub( 'modal_message', job_key )
job_key_pubbed = True
self._controller.frame_splash_status.SetText( 'vacuuming ' + name )
job_key.SetVariable( 'popup_text_1', 'vacuuming ' + name )
started = HydrusData.GetNowPrecise()
HydrusDB.VacuumDB( db_path )
time_took = HydrusData.GetNowPrecise() - started
HydrusData.Print( 'Vacuumed ' + db_path + ' in ' + HydrusData.TimeDeltaToPrettyTimeDelta( time_took ) )
except Exception as e:
HydrusData.Print( 'vacuum failed:' )
HydrusData.ShowException( e )
text = 'An attempt to vacuum the database failed.'
text += os.linesep * 2
text += 'If the error is not obvious, please contact the hydrus developer.'
HydrusData.ShowText( text )
self._InitDBConnection()
return
job_key.SetVariable( 'popup_text_1', 'cleaning up' )
finally:
self._InitDBConnection()
self.modules_db_maintenance.RegisterSuccessfulVacuum( name )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 10 )
def _Write( self, action, *args, **kwargs ):
result = None
if action == 'analyze': self.modules_db_maintenance.AnalyzeDueTables( *args, **kwargs )
elif action == 'associate_repository_update_hashes': self.modules_repositories.AssociateRepositoryUpdateHashes( *args, **kwargs )
elif action == 'backup': self._Backup( *args, **kwargs )
elif action == 'clear_deferred_physical_delete': self.modules_files_storage.ClearDeferredPhysicalDelete( *args, **kwargs )
elif action == 'clear_false_positive_relations': self.modules_files_duplicates.DuplicatesClearAllFalsePositiveRelationsFromHashes( *args, **kwargs )
elif action == 'clear_false_positive_relations_between_groups': self.modules_files_duplicates.DuplicatesClearFalsePositiveRelationsBetweenGroupsFromHashes( *args, **kwargs )
elif action == 'clear_orphan_file_records': self._ClearOrphanFileRecords( *args, **kwargs )
elif action == 'clear_orphan_tables': self._ClearOrphanTables( *args, **kwargs )
elif action == 'content_updates': self._ProcessContentUpdates( *args, **kwargs )
elif action == 'cull_file_viewing_statistics': self._CullFileViewingStatistics( *args, **kwargs )
elif action == 'db_integrity': self._CheckDBIntegrity( *args, **kwargs )
elif action == 'delete_imageboard': self.modules_serialisable.DeleteYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_IMAGEBOARD, *args, **kwargs )
elif action == 'delete_local_booru_share': self.modules_serialisable.DeleteYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU, *args, **kwargs )
elif action == 'delete_pending': self._DeletePending( *args, **kwargs )
elif action == 'delete_serialisable_named': self.modules_serialisable.DeleteJSONDumpNamed( *args, **kwargs )
elif action == 'delete_service_info': self._DeleteServiceInfo( *args, **kwargs )
elif action == 'delete_potential_duplicate_pairs': self.modules_files_duplicates.DuplicatesDeleteAllPotentialDuplicatePairs( *args, **kwargs )
elif action == 'dirty_services': self._SaveDirtyServices( *args, **kwargs )
elif action == 'dissolve_alternates_group': self.modules_files_duplicates.DuplicatesDissolveAlternatesGroupIdFromHashes( *args, **kwargs )
elif action == 'dissolve_duplicates_group': self.modules_files_duplicates.DuplicatesDissolveMediaIdFromHashes( *args, **kwargs )
elif action == 'duplicate_pair_status': self._DuplicatesSetDuplicatePairStatus( *args, **kwargs )
elif action == 'duplicate_set_king': self.modules_files_duplicates.DuplicatesSetKingFromHash( *args, **kwargs )
elif action == 'file_maintenance_add_jobs': self.modules_files_maintenance_queue.AddJobs( *args, **kwargs )
elif action == 'file_maintenance_add_jobs_hashes': self.modules_files_maintenance_queue.AddJobsHashes( *args, **kwargs )
elif action == 'file_maintenance_cancel_jobs': self.modules_files_maintenance_queue.CancelJobs( *args, **kwargs )
elif action == 'file_maintenance_clear_jobs': self.modules_files_maintenance.ClearJobs( *args, **kwargs )
elif action == 'fix_logically_inconsistent_mappings': self._FixLogicallyInconsistentMappings( *args, **kwargs )
elif action == 'imageboard': self.modules_serialisable.SetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_IMAGEBOARD, *args, **kwargs )
elif action == 'ideal_client_files_locations': self._SetIdealClientFilesLocations( *args, **kwargs )
elif action == 'import_file': result = self._ImportFile( *args, **kwargs )
elif action == 'import_update': self._ImportUpdate( *args, **kwargs )
elif action == 'local_booru_share': self.modules_serialisable.SetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU, *args, **kwargs )
elif action == 'maintain_hashed_serialisables': result = self.modules_serialisable.MaintainHashedStorage( *args, **kwargs )
elif action == 'maintain_similar_files_search_for_potential_duplicates': result = self._PerceptualHashesSearchForPotentialDuplicates( *args, **kwargs )
elif action == 'maintain_similar_files_tree': self.modules_similar_files.MaintainTree( *args, **kwargs )
elif action == 'migration_clear_job': self._MigrationClearJob( *args, **kwargs )
elif action == 'migration_start_mappings_job': self._MigrationStartMappingsJob( *args, **kwargs )
elif action == 'migration_start_pairs_job': self._MigrationStartPairsJob( *args, **kwargs )
elif action == 'process_repository_content': result = self._ProcessRepositoryContent( *args, **kwargs )
elif action == 'process_repository_definitions': result = self.modules_repositories.ProcessRepositoryDefinitions( *args, **kwargs )
elif action == 'push_recent_tags': self._PushRecentTags( *args, **kwargs )
elif action == 'regenerate_local_hash_cache': self._RegenerateLocalHashCache( *args, **kwargs )
elif action == 'regenerate_local_tag_cache': self._RegenerateLocalTagCache( *args, **kwargs )
elif action == 'regenerate_similar_files': self.modules_similar_files.RegenerateTree( *args, **kwargs )
elif action == 'regenerate_searchable_subtag_maps': self._RegenerateTagCacheSearchableSubtagMaps( *args, **kwargs )
elif action == 'regenerate_tag_cache': self._RegenerateTagCache( *args, **kwargs )
elif action == 'regenerate_tag_display_mappings_cache': self._RegenerateTagDisplayMappingsCache( *args, **kwargs )
elif action == 'regenerate_tag_display_pending_mappings_cache': self._RegenerateTagDisplayPendingMappingsCache( *args, **kwargs )
elif action == 'regenerate_tag_mappings_cache': self._RegenerateTagMappingsCache( *args, **kwargs )
elif action == 'regenerate_tag_parents_cache': self._RegenerateTagParentsCache( *args, **kwargs )
elif action == 'regenerate_tag_pending_mappings_cache': self._RegenerateTagPendingMappingsCache( *args, **kwargs )
elif action == 'regenerate_tag_siblings_and_parents_cache': self.modules_tag_display.RegenerateTagSiblingsAndParentsCache( *args, **kwargs )
elif action == 'register_shutdown_work': self.modules_db_maintenance.RegisterShutdownWork( *args, **kwargs )
elif action == 'repopulate_mappings_from_cache': self._RepopulateMappingsFromCache( *args, **kwargs )
elif action == 'repopulate_tag_cache_missing_subtags': self._RepopulateTagCacheMissingSubtags( *args, **kwargs )
elif action == 'repopulate_tag_display_mappings_cache': self._RepopulateTagDisplayMappingsCache( *args, **kwargs )
elif action == 'relocate_client_files': self._RelocateClientFiles( *args, **kwargs )
elif action == 'remove_alternates_member': self.modules_files_duplicates.DuplicatesRemoveAlternateMemberFromHashes( *args, **kwargs )
elif action == 'remove_duplicates_member': self.modules_files_duplicates.DuplicatesRemoveMediaIdMemberFromHashes( *args, **kwargs )
elif action == 'remove_potential_pairs': self.modules_files_duplicates.DuplicatesRemovePotentialPairsFromHashes( *args, **kwargs )
elif action == 'repair_client_files': self._RepairClientFiles( *args, **kwargs )
elif action == 'repair_invalid_tags': self._RepairInvalidTags( *args, **kwargs )
elif action == 'reprocess_repository': self.modules_repositories.ReprocessRepository( *args, **kwargs )
elif action == 'reset_repository': self._ResetRepository( *args, **kwargs )
elif action == 'reset_repository_processing': self._ResetRepositoryProcessing( *args, **kwargs )
elif action == 'reset_potential_search_status': self._PerceptualHashesResetSearchFromHashes( *args, **kwargs )
elif action == 'save_options': self._SaveOptions( *args, **kwargs )
elif action == 'serialisable': self.modules_serialisable.SetJSONDump( *args, **kwargs )
elif action == 'serialisable_atomic': self.modules_serialisable.SetJSONComplex( *args, **kwargs )
elif action == 'serialisable_simple': self.modules_serialisable.SetJSONSimple( *args, **kwargs )
elif action == 'serialisables_overwrite': self.modules_serialisable.OverwriteJSONDumps( *args, **kwargs )
elif action == 'set_password': self._SetPassword( *args, **kwargs )
elif action == 'set_repository_update_hashes': self.modules_repositories.SetRepositoryUpdateHashes( *args, **kwargs )
elif action == 'schedule_repository_update_file_maintenance': self.modules_repositories.ScheduleRepositoryUpdateFileMaintenance( *args, **kwargs )
elif action == 'sync_tag_display_maintenance': result = self._CacheTagDisplaySync( *args, **kwargs )
elif action == 'tag_display_application': self.modules_tag_display.SetApplication( *args, **kwargs )
elif action == 'update_server_services': self._UpdateServerServices( *args, **kwargs )
elif action == 'update_services': self._UpdateServices( *args, **kwargs )
elif action == 'vacuum': self._Vacuum( *args, **kwargs )
else: raise Exception( 'db received an unknown write command: ' + action )
return result
def pub_content_updates_after_commit( self, service_keys_to_content_updates ):
self._after_job_content_update_jobs.append( service_keys_to_content_updates )
def pub_initial_message( self, message ):
self._initial_messages.append( message )
def pub_service_updates_after_commit( self, service_keys_to_service_updates ):
self._cursor_transaction_wrapper.pub_after_job( 'service_updates_data', service_keys_to_service_updates )
self._cursor_transaction_wrapper.pub_after_job( 'service_updates_gui', service_keys_to_service_updates )
def publish_status_update( self ):
self._controller.pub( 'set_status_bar_dirty' )
def GetInitialMessages( self ):
return self._initial_messages
def RestoreBackup( self, path ):
for filename in self._db_filenames.values():
HG.client_controller.frame_splash_status.SetText( filename )
source = os.path.join( path, filename )
dest = os.path.join( self._db_dir, filename )
if os.path.exists( source ):
HydrusPaths.MirrorFile( source, dest )
else:
# if someone backs up with an older version that does not have as many db files as this version, we get conflict
# don't want to delete just in case, but we will move it out the way
HydrusPaths.MergeFile( dest, dest + '.old' )
additional_filenames = self._GetPossibleAdditionalDBFilenames()
for additional_filename in additional_filenames:
source = os.path.join( path, additional_filename )
dest = os.path.join( self._db_dir, additional_filename )
if os.path.exists( source ):
HydrusPaths.MirrorFile( source, dest )
HG.client_controller.frame_splash_status.SetText( 'media files' )
client_files_source = os.path.join( path, 'client_files' )
client_files_default = os.path.join( self._db_dir, 'client_files' )
if os.path.exists( client_files_source ):
HydrusPaths.MirrorTree( client_files_source, client_files_default )
| 48.517204 | 2,093 | 0.546328 | import collections
import hashlib
import itertools
import os
import random
import re
import sqlite3
import time
import traceback
import typing
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusDB
from hydrus.core import HydrusDBBase
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusPaths
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.core.networking import HydrusNetwork
from hydrus.client import ClientAPI
from hydrus.client import ClientApplicationCommand as CAC
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientData
from hydrus.client import ClientDefaults
from hydrus.client import ClientFiles
from hydrus.client import ClientLocation
from hydrus.client import ClientOptions
from hydrus.client import ClientSearch
from hydrus.client import ClientServices
from hydrus.client import ClientThreading
from hydrus.client.db import ClientDBDefinitionsCache
from hydrus.client.db import ClientDBFilesDuplicates
from hydrus.client.db import ClientDBFilesMaintenance
from hydrus.client.db import ClientDBFilesMaintenanceQueue
from hydrus.client.db import ClientDBFilesMetadataBasic
from hydrus.client.db import ClientDBFilesStorage
from hydrus.client.db import ClientDBMaintenance
from hydrus.client.db import ClientDBMappingsCacheSpecificDisplay
from hydrus.client.db import ClientDBMappingsCounts
from hydrus.client.db import ClientDBMappingsCountsUpdate
from hydrus.client.db import ClientDBMappingsStorage
from hydrus.client.db import ClientDBMaster
from hydrus.client.db import ClientDBRepositories
from hydrus.client.db import ClientDBSerialisable
from hydrus.client.db import ClientDBServices
from hydrus.client.db import ClientDBSimilarFiles
from hydrus.client.db import ClientDBTagDisplay
from hydrus.client.db import ClientDBTagParents
from hydrus.client.db import ClientDBTagSearch
from hydrus.client.db import ClientDBTagSiblings
from hydrus.client.importing import ClientImportFiles
from hydrus.client.media import ClientMedia
from hydrus.client.media import ClientMediaManagers
from hydrus.client.media import ClientMediaResult
from hydrus.client.media import ClientMediaResultCache
from hydrus.client.metadata import ClientTags
from hydrus.client.metadata import ClientTagsHandling
from hydrus.client.networking import ClientNetworkingBandwidth
from hydrus.client.networking import ClientNetworkingDomain
from hydrus.client.networking import ClientNetworkingFunctions
from hydrus.client.networking import ClientNetworkingLogin
from hydrus.client.networking import ClientNetworkingSessions
from hydrus.client.importing import ClientImportSubscriptionLegacy
from hydrus.client.networking import ClientNetworkingSessionsLegacy
from hydrus.client.networking import ClientNetworkingBandwidthLegacy
def BlockingSafeShowMessage( message ):
HydrusData.DebugPrint( message )
HG.client_controller.CallBlockingToQt( HG.client_controller.app, QW.QMessageBox.warning, None, 'Warning', message )
def report_content_speed_to_job_key( job_key, rows_done, total_rows, precise_timestamp, num_rows, row_name ):
it_took = HydrusData.GetNowPrecise() - precise_timestamp
rows_s = HydrusData.ToHumanInt( int( num_rows / it_took ) )
popup_message = 'content row ' + HydrusData.ConvertValueRangeToPrettyString( rows_done, total_rows ) + ': processing ' + row_name + ' at ' + rows_s + ' rows/s'
HG.client_controller.frame_splash_status.SetText( popup_message, print_to_log = False )
job_key.SetVariable( 'popup_text_2', popup_message )
def report_speed_to_job_key( job_key, precise_timestamp, num_rows, row_name ):
it_took = HydrusData.GetNowPrecise() - precise_timestamp
rows_s = HydrusData.ToHumanInt( int( num_rows / it_took ) )
popup_message = 'processing ' + row_name + ' at ' + rows_s + ' rows/s'
HG.client_controller.frame_splash_status.SetText( popup_message, print_to_log = False )
job_key.SetVariable( 'popup_text_2', popup_message )
def report_speed_to_log( precise_timestamp, num_rows, row_name ):
if num_rows == 0:
return
it_took = HydrusData.GetNowPrecise() - precise_timestamp
rows_s = HydrusData.ToHumanInt( int( num_rows / it_took ) )
summary = 'processed ' + HydrusData.ToHumanInt( num_rows ) + ' ' + row_name + ' at ' + rows_s + ' rows/s'
HydrusData.Print( summary )
class FilteredHashesGenerator( object ):
def __init__( self, file_service_ids_to_valid_hash_ids ):
self._file_service_ids_to_valid_hash_ids = file_service_ids_to_valid_hash_ids
def GetHashes( self, file_service_id, hash_ids ):
return self._file_service_ids_to_valid_hash_ids[ file_service_id ].intersection( hash_ids )
def IterateHashes( self, hash_ids ):
for ( file_service_id, valid_hash_ids ) in self._file_service_ids_to_valid_hash_ids.items():
if len( valid_hash_ids ) == 0:
continue
filtered_hash_ids = valid_hash_ids.intersection( hash_ids )
if len( filtered_hash_ids ) == 0:
continue
yield ( file_service_id, filtered_hash_ids )
class FilteredMappingsGenerator( object ):
def __init__( self, file_service_ids_to_valid_hash_ids, mappings_ids ):
self._file_service_ids_to_valid_hash_ids = file_service_ids_to_valid_hash_ids
self._mappings_ids = mappings_ids
def IterateMappings( self, file_service_id ):
valid_hash_ids = self._file_service_ids_to_valid_hash_ids[ file_service_id ]
if len( valid_hash_ids ) > 0:
for ( tag_id, hash_ids ) in self._mappings_ids:
hash_ids = valid_hash_ids.intersection( hash_ids )
if len( hash_ids ) == 0:
continue
yield ( tag_id, hash_ids )
class JobDatabaseClient( HydrusData.JobDatabase ):
def _DoDelayedResultRelief( self ):
if HG.db_ui_hang_relief_mode:
if QC.QThread.currentThread() == HG.client_controller.main_qt_thread:
HydrusData.Print( 'ui-hang event processing: begin' )
QW.QApplication.instance().processEvents()
HydrusData.Print( 'ui-hang event processing: end' )
class DB( HydrusDB.HydrusDB ):
READ_WRITE_ACTIONS = [ 'service_info', 'system_predicates', 'missing_thumbnail_hashes' ]
def __init__( self, controller, db_dir, db_name ):
self._initial_messages = []
self._have_printed_a_cannot_vacuum_message = False
self._weakref_media_result_cache = ClientMediaResultCache.MediaResultCache()
self._after_job_content_update_jobs = []
self._regen_tags_managers_hash_ids = set()
self._regen_tags_managers_tag_ids = set()
HydrusDB.HydrusDB.__init__( self, controller, db_dir, db_name )
def _AddFiles( self, service_id, rows ):
hash_ids = { row[0] for row in rows }
existing_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, hash_ids, HC.CONTENT_STATUS_CURRENT )
new_hash_ids = hash_ids.difference( existing_hash_ids )
if len( new_hash_ids ) > 0:
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
valid_rows = [ ( hash_id, timestamp ) for ( hash_id, timestamp ) in rows if hash_id in new_hash_ids ]
if service_type == HC.LOCAL_FILE_DOMAIN:
self._DeleteFiles( self.modules_services.trash_service_id, new_hash_ids )
self._AddFiles( self.modules_services.combined_local_file_service_id, valid_rows )
pending_changed = self.modules_files_storage.AddFiles( service_id, valid_rows )
if pending_changed:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
delta_size = self.modules_files_metadata_basic.GetTotalSize( new_hash_ids )
num_viewable_files = self.modules_files_metadata_basic.GetNumViewable( new_hash_ids )
num_files = len( new_hash_ids )
num_inbox = len( new_hash_ids.intersection( self.modules_files_metadata_basic.inbox_hash_ids ) )
service_info_updates = []
service_info_updates.append( ( delta_size, service_id, HC.SERVICE_INFO_TOTAL_SIZE ) )
service_info_updates.append( ( num_viewable_files, service_id, HC.SERVICE_INFO_NUM_VIEWABLE_FILES ) )
service_info_updates.append( ( num_files, service_id, HC.SERVICE_INFO_NUM_FILES ) )
service_info_updates.append( ( num_inbox, service_id, HC.SERVICE_INFO_NUM_INBOX ) )
if service_id != self.modules_services.trash_service_id:
num_deleted = self.modules_files_storage.ClearDeleteRecord( service_id, new_hash_ids )
service_info_updates.append( ( -num_deleted, service_id, HC.SERVICE_INFO_NUM_DELETED_FILES ) )
if service_id == self.modules_services.combined_local_file_service_id:
self.modules_hashes_local_cache.AddHashIdsToCache( new_hash_ids )
if service_id == self.modules_services.local_update_service_id:
self.modules_repositories.NotifyUpdatesImported( new_hash_ids )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
with self._MakeTemporaryIntegerTable( new_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsAddFiles( service_id, tag_service_id, new_hash_ids, temp_hash_id_table_name )
self.modules_mappings_cache_specific_display.AddFiles( service_id, tag_service_id, new_hash_ids, temp_hash_id_table_name )
if service_type in HC.FILE_SERVICES_COVERED_BY_COMBINED_DELETED_FILE:
location_context = self.modules_files_storage.GetLocationContextForAllServicesDeletedFiles()
still_deleted_hash_ids = self.modules_files_storage.FilterHashIds( location_context, new_hash_ids )
no_longer_deleted_hash_ids = new_hash_ids.difference( still_deleted_hash_ids )
self._DeleteFiles( self.modules_services.combined_deleted_file_service_id, no_longer_deleted_hash_ids )
self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', service_info_updates )
def _AddService( self, service_key, service_type, name, dictionary ):
name = self.modules_services.GetNonDupeName( name )
service_id = self.modules_services.AddService( service_key, service_type, name, dictionary )
self._AddServiceCreateFiles( service_id, service_type )
if service_type in HC.REPOSITORIES:
self.modules_repositories.GenerateRepositoryTables( service_id )
if service_type in HC.REAL_TAG_SERVICES:
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
for file_service_id in file_service_ids:
self.modules_tag_search.Generate( file_service_id, service_id )
self.modules_tag_parents.Generate( service_id )
self.modules_tag_siblings.Generate( service_id )
self._AddServiceCreateMappings( service_id, service_type )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self.modules_tag_search.Generate( service_id, tag_service_id )
def _AddServiceCreateFiles( self, service_id, service_type ):
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
self.modules_files_storage.GenerateFilesTables( service_id )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsGenerate( service_id, tag_service_id )
def _AddServiceCreateMappings( self, service_id, service_type ):
if service_type in HC.REAL_TAG_SERVICES:
self.modules_mappings_storage.GenerateMappingsTables( service_id )
self._CacheCombinedFilesMappingsGenerate( service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for file_service_id in file_service_ids:
self._CacheSpecificMappingsGenerate( file_service_id, service_id )
def _ArchiveFiles( self, hash_ids ):
hash_ids_archived = self.modules_files_metadata_basic.ArchiveFiles( hash_ids )
if len( hash_ids_archived ) > 0:
service_ids_to_counts = self.modules_files_storage.GetServiceIdCounts( hash_ids_archived )
update_rows = list( service_ids_to_counts.items() )
self._ExecuteMany( 'UPDATE service_info SET info = info - ? WHERE service_id = ? AND info_type = ?;', [ ( count, service_id, HC.SERVICE_INFO_NUM_INBOX ) for ( service_id, count ) in update_rows ] )
def _Backup( self, path ):
self._CloseDBConnection()
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'backing up db' )
self._controller.pub( 'modal_message', job_key )
job_key.SetVariable( 'popup_text_1', 'closing db' )
HydrusPaths.MakeSureDirectoryExists( path )
for filename in self._db_filenames.values():
if job_key.IsCancelled():
break
job_key.SetVariable( 'popup_text_1', 'copying ' + filename )
source = os.path.join( self._db_dir, filename )
dest = os.path.join( path, filename )
HydrusPaths.MirrorFile( source, dest )
additional_filenames = self._GetPossibleAdditionalDBFilenames()
for additional_filename in additional_filenames:
source = os.path.join( self._db_dir, additional_filename )
dest = os.path.join( path, additional_filename )
if os.path.exists( source ):
HydrusPaths.MirrorFile( source, dest )
def is_cancelled_hook():
return job_key.IsCancelled()
def text_update_hook( text ):
job_key.SetVariable( 'popup_text_1', text )
client_files_default = os.path.join( self._db_dir, 'client_files' )
if os.path.exists( client_files_default ):
HydrusPaths.MirrorTree( client_files_default, os.path.join( path, 'client_files' ), text_update_hook = text_update_hook, is_cancelled_hook = is_cancelled_hook )
finally:
self._InitDBConnection()
job_key.SetVariable( 'popup_text_1', 'backup complete!' )
job_key.Finish()
def _CacheCombinedFilesDisplayMappingsAddImplications( self, tag_service_id, implication_tag_ids, tag_id, status_hook = None ):
if len( implication_tag_ids ) == 0:
return
remaining_implication_tag_ids = set( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_id ) ).difference( implication_tag_ids )
( current_delta, pending_delta ) = self._GetWithAndWithoutTagsFileCountCombined( tag_service_id, implication_tag_ids, remaining_implication_tag_ids )
if current_delta > 0 or pending_delta > 0:
counts_cache_changes = ( ( tag_id, current_delta, pending_delta ), )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsAddMappingsForChained( self, tag_service_id, storage_tag_id, hash_ids ):
ac_current_counts = collections.Counter()
ac_pending_counts = collections.Counter()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
display_tag_ids = self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, storage_tag_id )
display_tag_ids_to_implied_by_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, display_tag_ids, tags_are_ideal = True )
file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, temp_hash_ids_table_name )
for ( display_tag_id, implied_by_tag_ids ) in display_tag_ids_to_implied_by_tag_ids.items():
other_implied_by_tag_ids = set( implied_by_tag_ids )
other_implied_by_tag_ids.discard( storage_tag_id )
num_pending_to_be_rescinded = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_PENDING, tag_service_id, ( storage_tag_id, ), other_implied_by_tag_ids, hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
num_non_addable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_CURRENT, tag_service_id, implied_by_tag_ids, set(), hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
num_addable = len( hash_ids ) - num_non_addable
if num_addable > 0:
ac_current_counts[ display_tag_id ] += num_addable
if num_pending_to_be_rescinded > 0:
ac_pending_counts[ display_tag_id ] += num_pending_to_be_rescinded
if len( ac_current_counts ) > 0:
counts_cache_changes = [ ( tag_id, current_delta, 0 ) for ( tag_id, current_delta ) in ac_current_counts.items() ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
if len( ac_pending_counts ) > 0:
counts_cache_changes = [ ( tag_id, 0, pending_delta ) for ( tag_id, pending_delta ) in ac_pending_counts.items() ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsDeleteImplications( self, tag_service_id, implication_tag_ids, tag_id, status_hook = None ):
if len( implication_tag_ids ) == 0:
return
remaining_implication_tag_ids = set( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_id ) ).difference( implication_tag_ids )
( current_delta, pending_delta ) = self._GetWithAndWithoutTagsFileCountCombined( tag_service_id, implication_tag_ids, remaining_implication_tag_ids )
if current_delta > 0 or pending_delta > 0:
counts_cache_changes = ( ( tag_id, current_delta, pending_delta ), )
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsDeleteMappingsForChained( self, tag_service_id, storage_tag_id, hash_ids ):
ac_counts = collections.Counter()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
display_tag_ids = self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, storage_tag_id )
display_tag_ids_to_implied_by_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, display_tag_ids, tags_are_ideal = True )
file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, temp_hash_ids_table_name )
for ( display_tag_id, implied_by_tag_ids ) in display_tag_ids_to_implied_by_tag_ids.items():
other_implied_by_tag_ids = set( implied_by_tag_ids )
other_implied_by_tag_ids.discard( storage_tag_id )
num_deletable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_CURRENT, tag_service_id, ( storage_tag_id, ), other_implied_by_tag_ids, hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
if num_deletable > 0:
ac_counts[ display_tag_id ] += num_deletable
if len( ac_counts ) > 0:
counts_cache_changes = [ ( tag_id, current_delta, 0 ) for ( tag_id, current_delta ) in ac_counts.items() ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsClear( self, tag_service_id, keep_pending = False ):
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, keep_pending = keep_pending )
def _CacheCombinedFilesDisplayMappingsDrop( self, tag_service_id ):
self.modules_mappings_counts.DropTables( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id )
def _CacheCombinedFilesDisplayMappingsGenerate( self, tag_service_id, status_hook = None ):
if status_hook is not None:
status_hook( 'copying storage counts' )
self.modules_mappings_counts.CreateTables( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, populate_from_storage = True )
def _CacheCombinedFilesDisplayMappingsPendMappingsForChained( self, tag_service_id, storage_tag_id, hash_ids ):
ac_counts = collections.Counter()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
display_tag_ids = self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, storage_tag_id )
display_tag_ids_to_implied_by_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, display_tag_ids, tags_are_ideal = True )
file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, temp_hash_ids_table_name )
for ( display_tag_id, implied_by_tag_ids ) in display_tag_ids_to_implied_by_tag_ids.items():
num_non_pendable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_PENDING, tag_service_id, implied_by_tag_ids, set(), hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
num_pendable = len( hash_ids ) - num_non_pendable
if num_pendable > 0:
ac_counts[ display_tag_id ] += num_pendable
if len( ac_counts ) > 0:
counts_cache_changes = [ ( tag_id, 0, pending_delta ) for ( tag_id, pending_delta ) in ac_counts.items() ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsRegeneratePending( self, tag_service_id, status_hook = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
if status_hook is not None:
message = 'clearing old combined display data'
status_hook( message )
all_pending_storage_tag_ids = self._STS( self._Execute( 'SELECT DISTINCT tag_id FROM {};'.format( pending_mappings_table_name ) ) )
storage_tag_ids_to_display_tag_ids = self.modules_tag_display.GetTagsToImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, all_pending_storage_tag_ids )
all_pending_display_tag_ids = set( itertools.chain.from_iterable( storage_tag_ids_to_display_tag_ids.values() ) )
del all_pending_storage_tag_ids
del storage_tag_ids_to_display_tag_ids
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, keep_current = True )
all_pending_display_tag_ids_to_implied_by_storage_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, all_pending_display_tag_ids, tags_are_ideal = True )
counts_cache_changes = []
num_to_do = len( all_pending_display_tag_ids_to_implied_by_storage_tag_ids )
for ( i, ( display_tag_id, storage_tag_ids ) ) in enumerate( all_pending_display_tag_ids_to_implied_by_storage_tag_ids.items() ):
if i % 100 == 0 and status_hook is not None:
message = 'regenerating pending tags {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
status_hook( message )
if len( storage_tag_ids ) == 1:
( storage_tag_id, ) = storage_tag_ids
( pending_delta, ) = self._Execute( 'SELECT COUNT( DISTINCT hash_id ) FROM {} WHERE tag_id = ?;'.format( pending_mappings_table_name ), ( storage_tag_id, ) ).fetchone()
else:
with self._MakeTemporaryIntegerTable( storage_tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
# temp tags to mappings merged
( pending_delta, ) = self._Execute( 'SELECT COUNT( DISTINCT hash_id ) FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_tag_ids_table_name, pending_mappings_table_name ) ).fetchone()
counts_cache_changes.append( ( display_tag_id, 0, pending_delta ) )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesDisplayMappingsRescindPendingMappingsForChained( self, tag_service_id, storage_tag_id, hash_ids ):
ac_counts = collections.Counter()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
display_tag_ids = self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, storage_tag_id )
display_tag_ids_to_implied_by_tag_ids = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, display_tag_ids, tags_are_ideal = True )
file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, temp_hash_ids_table_name )
for ( display_tag_id, implied_by_tag_ids ) in display_tag_ids_to_implied_by_tag_ids.items():
other_implied_by_tag_ids = set( implied_by_tag_ids )
other_implied_by_tag_ids.discard( storage_tag_id )
# get the count of current that are tagged by storage_tag_id but not tagged by any of the other implications
num_rescindable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_PENDING, tag_service_id, ( storage_tag_id, ), other_implied_by_tag_ids, hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
if num_rescindable > 0:
ac_counts[ display_tag_id ] += num_rescindable
if len( ac_counts ) > 0:
counts_cache_changes = [ ( tag_id, 0, pending_delta ) for ( tag_id, pending_delta ) in ac_counts.items() ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
def _CacheCombinedFilesMappingsClear( self, tag_service_id, keep_pending = False ):
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, keep_pending = keep_pending )
self._CacheCombinedFilesDisplayMappingsClear( tag_service_id, keep_pending = keep_pending )
def _CacheCombinedFilesMappingsDrop( self, tag_service_id ):
self.modules_mappings_counts.DropTables( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id )
self._CacheCombinedFilesDisplayMappingsDrop( tag_service_id )
def _CacheCombinedFilesMappingsGenerate( self, tag_service_id ):
self.modules_mappings_counts.CreateTables( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id )
#
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
current_mappings_exist = self._Execute( 'SELECT 1 FROM ' + current_mappings_table_name + ' LIMIT 1;' ).fetchone() is not None
pending_mappings_exist = self._Execute( 'SELECT 1 FROM ' + pending_mappings_table_name + ' LIMIT 1;' ).fetchone() is not None
if current_mappings_exist or pending_mappings_exist: # not worth iterating through all known tags for an empty service
for ( group_of_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, 'SELECT tag_id FROM tags;', 10000 ): # must be a cleverer way of doing this
with self._MakeTemporaryIntegerTable( group_of_ids, 'tag_id' ) as temp_table_name:
current_counter = collections.Counter()
# temp tags to mappings
for ( tag_id, count ) in self._Execute( 'SELECT tag_id, COUNT( * ) FROM {} CROSS JOIN {} USING ( tag_id ) GROUP BY ( tag_id );'.format( temp_table_name, current_mappings_table_name ) ):
current_counter[ tag_id ] = count
pending_counter = collections.Counter()
# temp tags to mappings
for ( tag_id, count ) in self._Execute( 'SELECT tag_id, COUNT( * ) FROM {} CROSS JOIN {} USING ( tag_id ) GROUP BY ( tag_id );'.format( temp_table_name, pending_mappings_table_name ) ):
pending_counter[ tag_id ] = count
all_ids_seen = set( current_counter.keys() )
all_ids_seen.update( pending_counter.keys() )
counts_cache_changes = [ ( tag_id, current_counter[ tag_id ], pending_counter[ tag_id ] ) for tag_id in all_ids_seen ]
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
self._CacheCombinedFilesDisplayMappingsGenerate( tag_service_id )
def _CacheCombinedFilesMappingsRegeneratePending( self, tag_service_id, status_hook = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
if status_hook is not None:
message = 'clearing old combined display data'
status_hook( message )
all_pending_storage_tag_ids = self._STS( self._Execute( 'SELECT DISTINCT tag_id FROM {};'.format( pending_mappings_table_name ) ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, keep_current = True )
counts_cache_changes = []
num_to_do = len( all_pending_storage_tag_ids )
for ( i, storage_tag_id ) in enumerate( all_pending_storage_tag_ids ):
if i % 100 == 0 and status_hook is not None:
message = 'regenerating pending tags {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
status_hook( message )
( pending_delta, ) = self._Execute( 'SELECT COUNT( DISTINCT hash_id ) FROM {} WHERE tag_id = ?;'.format( pending_mappings_table_name ), ( storage_tag_id, ) ).fetchone()
counts_cache_changes.append( ( storage_tag_id, 0, pending_delta ) )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, counts_cache_changes )
self._CacheCombinedFilesDisplayMappingsRegeneratePending( tag_service_id, status_hook = status_hook )
def _CacheSpecificMappingsAddFiles( self, file_service_id, tag_service_id, hash_ids, hash_ids_table_name ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
# deleted don't have a/c counts to update, so we can do it all in one go here
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id, tag_id ) SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( cache_deleted_mappings_table_name, hash_ids_table_name, deleted_mappings_table_name ) )
current_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, current_mappings_table_name ) ).fetchall()
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
pending_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, pending_mappings_table_name ) ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
counts_cache_changes = []
for tag_id in all_ids_seen:
current_hash_ids = current_mapping_ids_dict[ tag_id ]
current_delta = len( current_hash_ids )
if current_delta > 0:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in current_hash_ids ) )
current_delta = self._GetRowCount()
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
pending_delta = len( pending_hash_ids )
if pending_delta > 0:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_pending_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in pending_hash_ids ) )
pending_delta = self._GetRowCount()
if current_delta > 0 or pending_delta > 0:
counts_cache_changes.append( ( tag_id, current_delta, pending_delta ) )
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def _CacheSpecificMappingsAddMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.RescindPendingMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_pending_rescinded = self._GetRowCount()
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_current_inserted = self._GetRowCount()
self._ExecuteMany( 'DELETE FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
if num_current_inserted > 0:
counts_cache_changes = [ ( tag_id, num_current_inserted, 0 ) ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
if num_pending_rescinded > 0:
counts_cache_changes = [ ( tag_id, 0, num_pending_rescinded ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.AddMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
def _CacheSpecificMappingsClear( self, file_service_id, tag_service_id, keep_pending = False ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._Execute( 'DELETE FROM {};'.format( cache_current_mappings_table_name ) )
self._Execute( 'DELETE FROM {};'.format( cache_deleted_mappings_table_name ) )
if not keep_pending:
self._Execute( 'DELETE FROM {};'.format( cache_pending_mappings_table_name ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, keep_pending = keep_pending )
self.modules_mappings_cache_specific_display.Clear( file_service_id, tag_service_id, keep_pending = keep_pending )
def _CacheSpecificMappingsCreateTables( self, file_service_id, tag_service_id ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._Execute( 'CREATE TABLE IF NOT EXISTS ' + cache_current_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._Execute( 'CREATE TABLE IF NOT EXISTS ' + cache_deleted_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._Execute( 'CREATE TABLE IF NOT EXISTS ' + cache_pending_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._CreateIndex( cache_current_mappings_table_name, [ 'tag_id', 'hash_id' ], unique = True )
self._CreateIndex( cache_deleted_mappings_table_name, [ 'tag_id', 'hash_id' ], unique = True )
self._CreateIndex( cache_pending_mappings_table_name, [ 'tag_id', 'hash_id' ], unique = True )
self.modules_mappings_counts.CreateTables( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
def _CacheSpecificMappingsDrop( self, file_service_id, tag_service_id ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( cache_current_mappings_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( cache_deleted_mappings_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( cache_pending_mappings_table_name ) )
self.modules_mappings_counts.DropTables( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.Drop( file_service_id, tag_service_id )
def _CacheSpecificMappingsDeleteFiles( self, file_service_id, tag_service_id, hash_ids, hash_id_table_name ):
self.modules_mappings_cache_specific_display.DeleteFiles( file_service_id, tag_service_id, hash_ids, hash_id_table_name )
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
deleted_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_deleted_mappings_table_name ) ).fetchall()
if len( deleted_mapping_ids_raw ) > 0:
self._ExecuteMany( 'DELETE FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( cache_deleted_mappings_table_name ), deleted_mapping_ids_raw )
current_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_current_mappings_table_name ) ).fetchall()
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
pending_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_pending_mappings_table_name ) ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
counts_cache_changes = []
for tag_id in all_ids_seen:
current_hash_ids = current_mapping_ids_dict[ tag_id ]
num_current = len( current_hash_ids )
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
num_pending = len( pending_hash_ids )
counts_cache_changes.append( ( tag_id, num_current, num_pending ) )
self._ExecuteMany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def _CacheSpecificMappingsDeleteMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.DeleteMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_deleted = self._GetRowCount()
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_deleted_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
if num_deleted > 0:
counts_cache_changes = [ ( tag_id, num_deleted, 0 ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def _CacheSpecificMappingsGenerate( self, file_service_id, tag_service_id ):
self._CacheSpecificMappingsCreateTables( file_service_id, tag_service_id )
hash_ids = self.modules_files_storage.GetCurrentHashIdsList( file_service_id )
BLOCK_SIZE = 10000
for ( i, block_of_hash_ids ) in enumerate( HydrusData.SplitListIntoChunks( hash_ids, BLOCK_SIZE ) ):
with self._MakeTemporaryIntegerTable( block_of_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, block_of_hash_ids, temp_hash_id_table_name )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self.modules_mappings_cache_specific_display.Generate( file_service_id, tag_service_id, populate_from_storage = True )
def _CacheSpecificMappingsGetFilteredHashesGenerator( self, file_service_ids, tag_service_id, hash_ids ):
file_service_ids_to_valid_hash_ids = collections.defaultdict( set )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_table_name:
for file_service_id in file_service_ids:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, temp_table_name, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
file_service_ids_to_valid_hash_ids[ file_service_id ] = valid_hash_ids
return FilteredHashesGenerator( file_service_ids_to_valid_hash_ids )
def _CacheSpecificMappingsGetFilteredMappingsGenerator( self, file_service_ids, tag_service_id, mappings_ids ):
all_hash_ids = set( itertools.chain.from_iterable( ( hash_ids for ( tag_id, hash_ids ) in mappings_ids ) ) )
file_service_ids_to_valid_hash_ids = collections.defaultdict( set )
with self._MakeTemporaryIntegerTable( all_hash_ids, 'hash_id' ) as temp_table_name:
for file_service_id in file_service_ids:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, temp_table_name, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
file_service_ids_to_valid_hash_ids[ file_service_id ] = valid_hash_ids
return FilteredMappingsGenerator( file_service_ids_to_valid_hash_ids, mappings_ids )
def _CacheSpecificMappingsPendMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_pending_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_added = self._GetRowCount()
if num_added > 0:
counts_cache_changes = [ ( tag_id, 0, num_added ) ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.PendMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
def _CacheSpecificMappingsRegeneratePending( self, file_service_id, tag_service_id, status_hook = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
if status_hook is not None:
message = 'clearing old specific data'
status_hook( message )
all_pending_storage_tag_ids = self._STS( self._Execute( 'SELECT DISTINCT tag_id FROM {};'.format( pending_mappings_table_name ) ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, keep_current = True )
self._Execute( 'DELETE FROM {};'.format( cache_pending_mappings_table_name ) )
counts_cache_changes = []
num_to_do = len( all_pending_storage_tag_ids )
select_table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, pending_mappings_table_name, HC.CONTENT_STATUS_CURRENT )
for ( i, storage_tag_id ) in enumerate( all_pending_storage_tag_ids ):
if i % 100 == 0 and status_hook is not None:
message = 'regenerating pending tags {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
status_hook( message )
self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, hash_id ) SELECT tag_id, hash_id FROM {} WHERE tag_id = ?;'.format( cache_pending_mappings_table_name, select_table_join ), ( storage_tag_id, ) )
pending_delta = self._GetRowCount()
counts_cache_changes.append( ( storage_tag_id, 0, pending_delta ) )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.RegeneratePending( file_service_id, tag_service_id, status_hook = status_hook )
def _CacheSpecificMappingsRescindPendingMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
ac_counts = collections.Counter()
self.modules_mappings_cache_specific_display.RescindPendingMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_deleted = self._GetRowCount()
if num_deleted > 0:
counts_cache_changes = [ ( tag_id, 0, num_deleted ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def _CacheTagDisplayForceFullSyncTagsOnSpecifics( self, tag_service_id, file_service_ids ):
tag_ids_in_dispute = set()
tag_ids_in_dispute.update( self.modules_tag_siblings.GetAllTagIds( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id ) )
tag_ids_in_dispute.update( self.modules_tag_parents.GetAllTagIds( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id ) )
for tag_id in tag_ids_in_dispute:
storage_implication_tag_ids = { tag_id }
actual_implication_tag_ids = self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_id )
add_implication_tag_ids = actual_implication_tag_ids.difference( storage_implication_tag_ids )
if len( add_implication_tag_ids ) > 0:
for file_service_id in file_service_ids:
self.modules_mappings_cache_specific_display.AddImplications( file_service_id, tag_service_id, add_implication_tag_ids, tag_id )
delete_implication_tag_ids = storage_implication_tag_ids.difference( actual_implication_tag_ids )
if len( delete_implication_tag_ids ) > 0:
for file_service_id in file_service_ids:
self.modules_mappings_cache_specific_display.DeleteImplications( file_service_id, tag_service_id, delete_implication_tag_ids, tag_id )
for block_of_tag_ids in HydrusData.SplitIteratorIntoChunks( tag_ids_in_dispute, 1024 ):
self._CacheTagsSyncTags( tag_service_id, block_of_tag_ids, just_these_file_service_ids = file_service_ids )
def _CacheTagDisplayGetApplicationStatusNumbers( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self.modules_tag_display.GetApplicationStatus( service_id )
status = {
'num_siblings_to_sync' : len( sibling_rows_to_add ) + len( sibling_rows_to_remove ),
'num_parents_to_sync' : len( parent_rows_to_add ) + len( parent_rows_to_remove ),
'num_actual_rows' : num_actual_rows,
'num_ideal_rows' : num_ideal_rows,
'waiting_on_tag_repos' : []
}
for ( applicable_service_ids, content_type ) in [
( self.modules_tag_parents.GetApplicableServiceIds( service_id ), HC.CONTENT_TYPE_TAG_PARENTS ),
( self.modules_tag_siblings.GetApplicableServiceIds( service_id ), HC.CONTENT_TYPE_TAG_SIBLINGS )
]:
for applicable_service_id in applicable_service_ids:
service = self.modules_services.GetService( applicable_service_id )
if service.GetServiceType() == HC.TAG_REPOSITORY:
if self.modules_repositories.HasLotsOfOutstandingLocalProcessing( applicable_service_id, ( content_type, ) ):
status[ 'waiting_on_tag_repos' ].append( 'waiting on {} for {} processing'.format( service.GetName(), HC.content_type_string_lookup[ content_type ] ) )
return status
def _CacheTagDisplaySync( self, service_key: bytes, work_time = 0.5 ):
time_started = HydrusData.GetNowFloat()
tag_service_id = self.modules_services.GetServiceId( service_key )
all_tag_ids_altered = set()
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self.modules_tag_display.GetApplicationStatus( tag_service_id )
while len( sibling_rows_to_add ) + len( sibling_rows_to_remove ) + len( parent_rows_to_add ) + len( parent_rows_to_remove ) > 0 and not HydrusData.TimeHasPassedFloat( time_started + work_time ):
# therefore, we are now going to break the migration into smaller pieces
# I spent a large amount of time trying to figure out a way to _completely_ sync subsets of a chain's tags. this was a gigantic logical pain and complete sync couldn't get neat subsets in certain situations
# █▓█▓███▓█▓███████████████████████████████▓▓▓███▓████████████████
# █▓▓█▓▓▓▓▓███████████████████▓▓▓▓▓▓▓▓▓██████▓▓███▓███████████████
# █▓███▓████████████████▓▒░ ░▒▓██████████████████████
# █▓▓▓▓██████████████▒ ░░░░░░░░░░░░ ▒▓███████████████████
# █▓█▓████████████▓░ ░░░░░░░░░░░░░░░░░ ░░░ ░▓█████████████████
# ██████████████▓ ░░▒▒▒▒▒░░ ░░░ ░░ ░ ░░░░ ░████████████████
# █████████████▒ ░░░▒▒▒▒░░░░░░░░ ░ ░░░░ ████▓▓█████████
# ▓▓██████████▒ ░░░░▒▓▒░▒▒░░ ░░░ ░ ░ ░░░░░ ███▓▓▓████████
# ███▓███████▒ ▒▒▒░░▒▒▒▒░░░ ░ ░░░ ░░░ ███▓▓▓███████
# ██████████▓ ▒▒░▒░▒░▒▒▒▒▒░▒░ ░░ ░░░░░ ░ ██▓▓▓███████
# █████▓▓▓█▒ ▒▒░▒░░░░▒▒░░░░░▒░ ░ ░ ▒▒▒ ██▓▓███████
# ▓▓▓▓▓▓▓█░ ▒▓░░▒░░▒▒▒▒▓░░░░░▒░░ ░ ░░▒▒▒▒░ ▒██▓█▓▓▓▓▓▓
# ▓▓▓▓███▓ ▒▒▒░░░▒▒░░▒░▒▒░░ ░░░░░ ░░░▒░ ▒░▒ ███▓▓▓▓▓▓▓
# ███████▓░▒▒▒▒▒▒░░░▒▒▒░░░░ ░ ░░░ ░░░▒▒░ ░██▓████▓▓
# ▓▓█▓███▒▒▒▓▒▒▓░░▒░▒▒▒▒░░░░░ ░ ░ ░ ░░░░░░▒░░░ ██▓█████▓
# ▒▒▓▓▓▓▓▓▒▓▓░░▓▒ ▒▒░▒▒▒▒▒░░ ░░ ░░░▒░▒▓▓██████
# ▒▒▓▓▓▓▓▓▒▒▒░▒▒▓░░░▒▒▒▒▒▒░ ░░░░▒▒░▒▓▓▓▓▓▓▓▓
# ▓▒▓▓▓▓▓▓▒▓░ ▒▒▒▓▒▒░░▒▒▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒░░░░░▒░▒░░░▒░▒▒▒░▓█▓▓▓▓▓▓▓
# ▓▒▒▓▓▓▓▓▓▓▓░ ▒▒▒▓▒▓▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▓▓▓▓▒░▒▒▒░▒▒▓▒▒▒░░▒▓▓▓██▓▓▓░░░░░▒▒▒▓▓▒ ░▒▒▒▒▒▒▓▓▓▓▒▒▒▓▓▓
# █▓█▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▓▓▓▒▒▒▓▓▓▓▒▒▒▓█▓ ░▓▓▒▒▓█▓▒░▒▒▒▒▓█▓█▓▓▓▓▓▓▓
# █████▓▒▓▓▓▓▓▒▓▓▒▒▒▒▒▒▒▒▒▒▓▒░▒▓▒░░ ░▒▒ ░░░ ▓█▓▓▓▒▒▒▒█▓▒▒▒▓▓▓▓▓▒
# █████▓▓▓█▓▓▓▓▒▓▓▓▒▒▒▒▒▒░▒▒░░░░ ░░░▒░ ▒ ░ ░ ░▒░░▒▓▓▓▒▒▒▒▒▒▒▒░
# ████▓▓▓███▓▓▓▓▓▓▓▒▒▒▒░░ ▒▒░ ░░░░▒▒ ░▒░▒░ ░░ ░▓█▓▓▒▒▒▒░░▒▒▒
# ███▓▓▓█████▓▓▓▒▒▓▒▒▒▒▒░░ ░ ░░▒░ ░▒▒▒ ▒░░▒░░ ▒▓▒▒▒░▒▒▒▒▓▓▓▒
# ████▓███████▓▒▒▒░▒▒▓▓▓▒▒░░ ░ ▒▒▓██▒▒▓▓░ ░░░░▒▒░▒▒▒▒▒▓▒▓▒▓▒▒
# ████████████▒░▒██░▒▓▓▓▓▓▒▒▒░░░░ ▒▓▒▓▓▓▒░▒▒░ ▒▒▒▓▒▒▒▒▓▒▒▓▓▓▒▒▒▒
# ████▓▓▓▓▓▓▒▓▒ ▓▓ ▒▓▓▓▓▓▓▒▒▒░░░░░ ░ ░░░▒░░▒▒▒▒▒▒ ▒▓▒▒▒▒▒▒▒▒▒
# ▓░░░░░░░▒▒▓▓▓ ▒█▒ ▒▒▓▒▒▒▒▒▒░░░░ ░░░ ░ ░ ▒░▒▒▒▒▒░░▒▓▒▒▒▒▒▒▒▓▒
# ▒▒░░░▒▒▒▒▓▒▒▓▒░ ░▒▒▒▒▓▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▒▓▓▓▓▒░▒▒▒▒▒░░▒▓▒▒▒▒▒▒▒▓▒▒
# ▓▒▒▒▓▓▓▓▓▒▒▒▒▒▓▓▒▓██▓▓▓▒▒▒▒▒░░▒▒▒▒░░░▒▒░░▒▒▓▒░░▒▓▓▓▒▓▓▒▒▒▒▒▒▒▒▒▒
# ▓▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▓▓▒▓▓▓▓▓▒▒▒▒░░░░░░▒▒▒▒▒▒░░ ░▒░░▒▒▒▒▒▒▒▒▒▒▓▒▓▓▓▓▒
# ▓▒▒▒▒▒▓▓▓▒▓▓▓▓▓▓▓▒▒▒▓▓▓▓▓▒▒▒░░░░░░░ ░░░░░▒▒▓▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▓▒▒▒▒▒▓▓▓▒▓▒▒▓▓▓▓▓▓▓▒▒▒░░░░░░ ░░▒▒▒▒▓▒▒▒▒▒▒▒▓▒▒▓▓▓▓▓▓
# ▓▓▓▓▓▓▓▒▒▒▒▓▓▓▓▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒░░▒▒░░░▒▒▓▓▓▒▒█▓▒▓▒▒▒▓▓▒▒▓▓▓▓▓▓
# █▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▒▓▓▓▓▓▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▒▒░█▓▓▓▓▓▒▒▒▒▒▒▓▓▓▓▓
# ▓▓▓▒▒▒▒▒▓▓▓▓▓▒▓▓▓▒▒▒▒▒ ░▓▓▓▓▓▓▓▓▓██▓█▓▓▓▒▓▒░░░ ▓▓▒▓▒▒▒▒▒▒▒▒▒▓▓▓▒
#
# IN MEMORIAM
# tag_ids_to_trunkward_additional_implication_work_weight
#
# I am now moving to table row addition/subtraction. we'll try to move one row at a time and do the smallest amount of work
# I can always remove a sibling row from actual and stay valid. this does not invalidate ideals in parents table
# I can always remove a parent row from actual and stay valid
# I know I can copy a parent to actual if the tags aren't in any pending removes
# If we need to remove 1,000 mappings and then add 500 to be correct, we'll be doing 1,500 total no matter the order we do them in. This 1,000/500 is not the sum of all the current rows' individual current estimated work.
# When removing, the sum overestimates, when adding, the sum underestimates. The number of sibling/parent rows to change is obviously also the same.
# When you remove a row, the other row estimates may stay as weighty, or they may get less. (e.g. removing sibling A->B makes the parent B->C easier to remove later)
# When you add a row, the other row estimates may stay as weighty, or they may get more. (e.g. adding parent A->B makes adding the sibling b->B more difficult later on)
# The main priority of this function is to reduce each piece of work time.
# When removing, we can break down the large jobs by doing small jobs. So, by doing small jobs first, we reduce max job time.
# However, if we try that strategy when adding, we actually increase max job time, as those delayed big jobs only have the option of staying the same or getting bigger! We get zoom speed and then clunk mode.
# Therefore, when adding, to limit max work time for the whole migration, we want to actually choose the largest jobs first! That work has to be done, and it doesn't get easier!
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableNames( tag_service_id )
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = ClientDBTagParents.GenerateTagParentsLookupCacheTableNames( tag_service_id )
def GetWeightedSiblingRow( sibling_rows, index ):
ideal_tag_ids = { ideal_tag_id for ( bad_tag_id, ideal_tag_id ) in sibling_rows }
ideal_tag_ids_to_implies = self.modules_tag_display.GetTagsToImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, ideal_tag_ids )
bad_tag_ids = { bad_tag_id for ( bad_tag_id, ideal_tag ) in sibling_rows }
bad_tag_ids_to_count = self.modules_mappings_counts.GetCountsEstimate( ClientTags.TAG_DISPLAY_STORAGE, tag_service_id, self.modules_services.combined_file_service_id, bad_tag_ids, True, True )
weight_and_rows = [ ( bad_tag_ids_to_count[ b ] * len( ideal_tag_ids_to_implies[ i ] ) + 1, ( b, i ) ) for ( b, i ) in sibling_rows ]
weight_and_rows.sort()
return weight_and_rows[ index ]
def GetWeightedParentRow( parent_rows, index ):
child_tag_ids = { c for ( c, a ) in parent_rows }
child_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, child_tag_ids )
all_child_tags = set( child_tag_ids )
all_child_tags.update( itertools.chain.from_iterable( child_tag_ids_to_implied_by.values() ) )
child_tag_ids_to_count = self.modules_mappings_counts.GetCountsEstimate( ClientTags.TAG_DISPLAY_STORAGE, tag_service_id, self.modules_services.combined_file_service_id, all_child_tags, True, True )
weight_and_rows = [ ( sum( ( child_tag_ids_to_count[ implied_by ] for implied_by in child_tag_ids_to_implied_by[ c ] ) ), ( c, p ) ) for ( c, p ) in parent_rows ]
weight_and_rows.sort()
return weight_and_rows[ index ]
some_removee_sibling_rows = HydrusData.SampleSetByGettingFirst( sibling_rows_to_remove, 20 )
some_removee_parent_rows = HydrusData.SampleSetByGettingFirst( parent_rows_to_remove, 20 )
if len( some_removee_sibling_rows ) + len( some_removee_parent_rows ) > 0:
smallest_sibling_weight = None
smallest_sibling_row = None
smallest_parent_weight = None
smallest_parent_row = None
if len( some_removee_sibling_rows ) > 0:
( smallest_sibling_weight, smallest_sibling_row ) = GetWeightedSiblingRow( some_removee_sibling_rows, 0 )
if len( some_removee_parent_rows ) > 0:
( smallest_parent_weight, smallest_parent_row ) = GetWeightedParentRow( some_removee_parent_rows, 0 )
if smallest_sibling_weight is not None and smallest_parent_weight is not None:
if smallest_sibling_weight < smallest_parent_weight:
smallest_parent_weight = None
smallest_parent_row = None
else:
smallest_sibling_weight = None
smallest_sibling_row = None
if smallest_sibling_row is not None:
( a, b ) = smallest_sibling_row
possibly_affected_tag_ids = { a, b }
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
previous_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self._Execute( 'DELETE FROM {} WHERE bad_tag_id = ? AND ideal_tag_id = ?;'.format( cache_actual_tag_siblings_lookup_table_name ), smallest_sibling_row )
after_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self.modules_tag_siblings.NotifySiblingDeleteRowSynced( tag_service_id, smallest_sibling_row )
if smallest_parent_row is not None:
# the only things changed here are those implied by or that imply one of these values
( a, b ) = smallest_parent_row
possibly_affected_tag_ids = { a, b }
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
previous_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self._Execute( 'DELETE FROM {} WHERE child_tag_id = ? AND ancestor_tag_id = ?;'.format( cache_actual_tag_parents_lookup_table_name ), smallest_parent_row )
after_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self.modules_tag_parents.NotifyParentDeleteRowSynced( tag_service_id, smallest_parent_row )
else:
# there is nothing to remove, so we'll now go for what is in ideal but not actual
some_addee_sibling_rows = HydrusData.SampleSetByGettingFirst( sibling_rows_to_add, 20 )
some_addee_parent_rows = HydrusData.SampleSetByGettingFirst( parent_rows_to_add, 20 )
if len( some_addee_sibling_rows ) + len( some_addee_parent_rows ) > 0:
largest_sibling_weight = None
largest_sibling_row = None
largest_parent_weight = None
largest_parent_row = None
if len( some_addee_sibling_rows ) > 0:
( largest_sibling_weight, largest_sibling_row ) = GetWeightedSiblingRow( some_addee_sibling_rows, -1 )
if len( some_addee_parent_rows ) > 0:
( largest_parent_weight, largest_parent_row ) = GetWeightedParentRow( some_addee_parent_rows, -1 )
if largest_sibling_weight is not None and largest_parent_weight is not None:
if largest_sibling_weight > largest_parent_weight:
largest_parent_weight = None
largest_parent_row = None
else:
largest_sibling_weight = None
largest_sibling_row = None
if largest_sibling_row is not None:
( a, b ) = largest_sibling_row
possibly_affected_tag_ids = { a, b }
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
previous_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self._Execute( 'INSERT OR IGNORE INTO {} ( bad_tag_id, ideal_tag_id ) VALUES ( ?, ? );'.format( cache_actual_tag_siblings_lookup_table_name ), largest_sibling_row )
after_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self.modules_tag_siblings.NotifySiblingAddRowSynced( tag_service_id, largest_sibling_row )
if largest_parent_row is not None:
( a, b ) = largest_parent_row
possibly_affected_tag_ids = { a, b }
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, a ) )
possibly_affected_tag_ids.update( self.modules_tag_display.GetImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, b ) )
previous_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self._Execute( 'INSERT OR IGNORE INTO {} ( child_tag_id, ancestor_tag_id ) VALUES ( ?, ? );'.format( cache_actual_tag_parents_lookup_table_name ), largest_parent_row )
after_chain_tag_ids_to_implied_by = self.modules_tag_display.GetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, possibly_affected_tag_ids )
self.modules_tag_parents.NotifyParentAddRowSynced( tag_service_id, largest_parent_row )
else:
break
tag_ids_to_delete_implied_by = collections.defaultdict( set )
tag_ids_to_add_implied_by = collections.defaultdict( set )
for tag_id in possibly_affected_tag_ids:
previous_implied_by = previous_chain_tag_ids_to_implied_by[ tag_id ]
after_implied_by = after_chain_tag_ids_to_implied_by[ tag_id ]
to_delete = previous_implied_by.difference( after_implied_by )
to_add = after_implied_by.difference( previous_implied_by )
if len( to_delete ) > 0:
tag_ids_to_delete_implied_by[ tag_id ] = to_delete
all_tag_ids_altered.add( tag_id )
all_tag_ids_altered.update( to_delete )
if len( to_add ) > 0:
tag_ids_to_add_implied_by[ tag_id ] = to_add
all_tag_ids_altered.add( tag_id )
all_tag_ids_altered.update( to_add )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for file_service_id in file_service_ids:
for ( tag_id, implication_tag_ids ) in tag_ids_to_delete_implied_by.items():
self.modules_mappings_cache_specific_display.DeleteImplications( file_service_id, tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_add_implied_by.items():
self.modules_mappings_cache_specific_display.AddImplications( file_service_id, tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_delete_implied_by.items():
self._CacheCombinedFilesDisplayMappingsDeleteImplications( tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_add_implied_by.items():
self._CacheCombinedFilesDisplayMappingsAddImplications( tag_service_id, implication_tag_ids, tag_id )
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self.modules_tag_display.GetApplicationStatus( tag_service_id )
if len( all_tag_ids_altered ) > 0:
self._regen_tags_managers_tag_ids.update( all_tag_ids_altered )
self._CacheTagsSyncTags( tag_service_id, all_tag_ids_altered )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_sync_status', service_key )
still_needs_work = len( sibling_rows_to_add ) + len( sibling_rows_to_remove ) + len( parent_rows_to_add ) + len( parent_rows_to_remove ) > 0
return still_needs_work
def _CacheTagsPopulate( self, file_service_id, tag_service_id, status_hook = None ):
siblings_table_name = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableName( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id )
parents_table_name = ClientDBTagParents.GenerateTagParentsLookupCacheTableName( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id )
queries = [
self.modules_mappings_counts.GetQueryPhraseForCurrentTagIds( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id ),
'SELECT DISTINCT bad_tag_id FROM {}'.format( siblings_table_name ),
'SELECT ideal_tag_id FROM {}'.format( siblings_table_name ),
'SELECT DISTINCT child_tag_id FROM {}'.format( parents_table_name ),
'SELECT DISTINCT ancestor_tag_id FROM {}'.format( parents_table_name )
]
full_query = '{};'.format( ' UNION '.join( queries ) )
BLOCK_SIZE = 10000
for ( group_of_tag_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, full_query, BLOCK_SIZE ):
self.modules_tag_search.AddTags( file_service_id, tag_service_id, group_of_tag_ids )
message = HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
self._controller.frame_splash_status.SetSubtext( message )
if status_hook is not None:
status_hook( message )
self.modules_db_maintenance.TouchAnalyzeNewTables()
def _CacheTagsSyncTags( self, tag_service_id, tag_ids, just_these_file_service_ids = None ):
if len( tag_ids ) == 0:
return
if just_these_file_service_ids is None:
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
else:
file_service_ids = just_these_file_service_ids
chained_tag_ids = self.modules_tag_display.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids )
unchained_tag_ids = { tag_id for tag_id in tag_ids if tag_id not in chained_tag_ids }
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
with self._MakeTemporaryIntegerTable( unchained_tag_ids, 'tag_id' ) as temp_unchained_tag_ids_table_name:
for file_service_id in file_service_ids:
exist_in_tag_search_tag_ids = self.modules_tag_search.FilterExistingTagIds( file_service_id, tag_service_id, temp_tag_ids_table_name )
exist_in_counts_cache_tag_ids = self.modules_mappings_counts.FilterExistingTagIds( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, temp_unchained_tag_ids_table_name )
should_have = chained_tag_ids.union( exist_in_counts_cache_tag_ids )
should_not_have = unchained_tag_ids.difference( exist_in_counts_cache_tag_ids )
should_add = should_have.difference( exist_in_tag_search_tag_ids )
should_delete = exist_in_tag_search_tag_ids.intersection( should_not_have )
self.modules_tag_search.AddTags( file_service_id, tag_service_id, should_add )
self.modules_tag_search.DeleteTags( file_service_id, tag_service_id, should_delete )
def _CheckDBIntegrity( self ):
prefix_string = 'checking db integrity: '
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( prefix_string + 'preparing' )
self._controller.pub( 'modal_message', job_key )
num_errors = 0
job_key.SetStatusTitle( prefix_string + 'running' )
job_key.SetVariable( 'popup_text_1', 'errors found so far: ' + HydrusData.ToHumanInt( num_errors ) )
db_names = [ name for ( index, name, path ) in self._Execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp', 'durable_temp' ) ]
for db_name in db_names:
for ( text, ) in self._Execute( 'PRAGMA ' + db_name + '.integrity_check;' ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
job_key.SetStatusTitle( prefix_string + 'cancelled' )
job_key.SetVariable( 'popup_text_1', 'errors found: ' + HydrusData.ToHumanInt( num_errors ) )
return
if text != 'ok':
if num_errors == 0:
HydrusData.Print( 'During a db integrity check, these errors were discovered:' )
HydrusData.Print( text )
num_errors += 1
job_key.SetVariable( 'popup_text_1', 'errors found so far: ' + HydrusData.ToHumanInt( num_errors ) )
finally:
job_key.SetStatusTitle( prefix_string + 'completed' )
job_key.SetVariable( 'popup_text_1', 'errors found: ' + HydrusData.ToHumanInt( num_errors ) )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
def _CleanAfterJobWork( self ):
self._after_job_content_update_jobs = []
self._regen_tags_managers_hash_ids = set()
self._regen_tags_managers_tag_ids = set()
HydrusDB.HydrusDB._CleanAfterJobWork( self )
def _ClearOrphanFileRecords( self ):
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetStatusTitle( 'clear orphan file records' )
self._controller.pub( 'modal_message', job_key )
try:
job_key.SetVariable( 'popup_text_1', 'looking for orphans' )
local_file_service_ids = self.modules_services.GetServiceIds( ( HC.LOCAL_FILE_DOMAIN, HC.LOCAL_FILE_TRASH_DOMAIN ) )
local_hash_ids = set()
for local_file_service_id in local_file_service_ids:
some_hash_ids = self.modules_files_storage.GetCurrentHashIdsList( local_file_service_id )
local_hash_ids.update( some_hash_ids )
combined_local_hash_ids = set( self.modules_files_storage.GetCurrentHashIdsList( self.modules_services.combined_local_file_service_id ) )
in_local_not_in_combined = local_hash_ids.difference( combined_local_hash_ids )
in_combined_not_in_local = combined_local_hash_ids.difference( local_hash_ids )
if job_key.IsCancelled():
return
job_key.SetVariable( 'popup_text_1', 'deleting orphans' )
if len( in_local_not_in_combined ) > 0:
self._DeleteFiles( self.modules_services.combined_local_file_service_id, in_local_not_in_combined )
for hash_id in in_local_not_in_combined:
self.modules_similar_files.StopSearchingFile( hash_id )
HydrusData.ShowText( 'Found and deleted ' + HydrusData.ToHumanInt( len( in_local_not_in_combined ) ) + ' local domain orphan file records.' )
if job_key.IsCancelled():
return
if len( in_combined_not_in_local ) > 0:
self._DeleteFiles( self.modules_services.combined_local_file_service_id, in_combined_not_in_local )
for hash_id in in_combined_not_in_local:
self.modules_similar_files.StopSearchingFile( hash_id )
HydrusData.ShowText( 'Found and deleted ' + HydrusData.ToHumanInt( len( in_combined_not_in_local ) ) + ' combined domain orphan file records.' )
if len( in_local_not_in_combined ) == 0 and len( in_combined_not_in_local ) == 0:
HydrusData.ShowText( 'No orphan file records found!' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
def _ClearOrphanTables( self ):
all_table_names = set()
db_names = [ name for ( index, name, path ) in self._Execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp', 'durable_temp' ) ]
for db_name in db_names:
table_names = self._STS( self._Execute( 'SELECT name FROM {}.sqlite_master WHERE type = ?;'.format( db_name ), ( 'table', ) ) )
if db_name != 'main':
table_names = { '{}.{}'.format( db_name, table_name ) for table_name in table_names }
all_table_names.update( table_names )
all_surplus_table_names = set()
for module in self._modules:
surplus_table_names = module.GetSurplusServiceTableNames( all_table_names )
all_surplus_table_names.update( surplus_table_names )
if len( surplus_table_names ) == 0:
HydrusData.ShowText( 'No orphan tables!' )
for table_name in surplus_table_names:
HydrusData.ShowText( 'Dropping ' + table_name )
self._Execute( 'DROP table ' + table_name + ';' )
def _CreateDB( self ):
client_files_default = os.path.join( self._db_dir, 'client_files' )
HydrusPaths.MakeSureDirectoryExists( client_files_default )
for module in self._modules:
module.CreateInitialTables()
module.CreateInitialIndices()
self._Execute( 'CREATE TABLE version ( version INTEGER );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS client_files_locations ( prefix TEXT, location TEXT );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS ideal_client_files_locations ( location TEXT, weight INTEGER );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS ideal_thumbnail_override_location ( location TEXT );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS file_notes ( hash_id INTEGER, name_id INTEGER, note_id INTEGER, PRIMARY KEY ( hash_id, name_id ) );' )
self._CreateIndex( 'file_notes', [ 'note_id' ] )
self._CreateIndex( 'file_notes', [ 'name_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS local_ratings ( service_id INTEGER, hash_id INTEGER, rating REAL, PRIMARY KEY ( service_id, hash_id ) );' )
self._CreateIndex( 'local_ratings', [ 'hash_id' ] )
self._CreateIndex( 'local_ratings', [ 'rating' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS file_modified_timestamps ( hash_id INTEGER PRIMARY KEY, file_modified_timestamp INTEGER );' )
self._CreateIndex( 'file_modified_timestamps', [ 'file_modified_timestamp' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS options ( options TEXT_YAML );', )
self._Execute( 'CREATE TABLE IF NOT EXISTS recent_tags ( service_id INTEGER, tag_id INTEGER, timestamp INTEGER, PRIMARY KEY ( service_id, tag_id ) );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS remote_thumbnails ( service_id INTEGER, hash_id INTEGER, PRIMARY KEY( service_id, hash_id ) );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS service_filenames ( service_id INTEGER, hash_id INTEGER, filename TEXT, PRIMARY KEY ( service_id, hash_id ) );' )
self._CreateIndex( 'service_filenames', [ 'hash_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS service_directories ( service_id INTEGER, directory_id INTEGER, num_files INTEGER, total_size INTEGER, note TEXT, PRIMARY KEY ( service_id, directory_id ) );' )
self._CreateIndex( 'service_directories', [ 'directory_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS service_directory_file_map ( service_id INTEGER, directory_id INTEGER, hash_id INTEGER, PRIMARY KEY ( service_id, directory_id, hash_id ) );' )
self._CreateIndex( 'service_directory_file_map', [ 'directory_id' ] )
self._CreateIndex( 'service_directory_file_map', [ 'hash_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS service_info ( service_id INTEGER, info_type INTEGER, info INTEGER, PRIMARY KEY ( service_id, info_type ) );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS statuses ( status_id INTEGER PRIMARY KEY, status TEXT UNIQUE );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS url_map ( hash_id INTEGER, url_id INTEGER, PRIMARY KEY ( hash_id, url_id ) );' )
self._CreateIndex( 'url_map', [ 'url_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS file_viewing_stats ( hash_id INTEGER, canvas_type INTEGER, last_viewed_timestamp INTEGER, views INTEGER, viewtime INTEGER, PRIMARY KEY ( hash_id, canvas_type ) );' )
self._CreateIndex( 'file_viewing_stats', [ 'last_viewed_timestamp' ] )
self._CreateIndex( 'file_viewing_stats', [ 'views' ] )
self._CreateIndex( 'file_viewing_stats', [ 'viewtime' ] )
location = HydrusPaths.ConvertAbsPathToPortablePath( client_files_default )
for prefix in HydrusData.IterateHexPrefixes():
self._Execute( 'INSERT INTO client_files_locations ( prefix, location ) VALUES ( ?, ? );', ( 'f' + prefix, location ) )
self._Execute( 'INSERT INTO client_files_locations ( prefix, location ) VALUES ( ?, ? );', ( 't' + prefix, location ) )
self._Execute( 'INSERT INTO ideal_client_files_locations ( location, weight ) VALUES ( ?, ? );', ( location, 1 ) )
init_service_info = [
( CC.COMBINED_TAG_SERVICE_KEY, HC.COMBINED_TAG, 'all known tags' ),
( CC.COMBINED_FILE_SERVICE_KEY, HC.COMBINED_FILE, 'all known files' ),
( CC.COMBINED_DELETED_FILE_SERVICE_KEY, HC.COMBINED_DELETED_FILE, 'all deleted files' ),
( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, HC.COMBINED_LOCAL_FILE, 'all local files' ),
( CC.LOCAL_FILE_SERVICE_KEY, HC.LOCAL_FILE_DOMAIN, 'my files' ),
( CC.TRASH_SERVICE_KEY, HC.LOCAL_FILE_TRASH_DOMAIN, 'trash' ),
( CC.LOCAL_UPDATE_SERVICE_KEY, HC.LOCAL_FILE_DOMAIN, 'repository updates' ),
( CC.DEFAULT_LOCAL_TAG_SERVICE_KEY, HC.LOCAL_TAG, 'my tags' ),
( CC.DEFAULT_LOCAL_DOWNLOADER_TAG_SERVICE_KEY, HC.LOCAL_TAG, 'downloader tags' ),
( CC.LOCAL_BOORU_SERVICE_KEY, HC.LOCAL_BOORU, 'local booru' ),
( CC.LOCAL_NOTES_SERVICE_KEY, HC.LOCAL_NOTES, 'local notes' ),
( CC.DEFAULT_FAVOURITES_RATING_SERVICE_KEY, HC.LOCAL_RATING_LIKE, 'favourites' ),
( CC.CLIENT_API_SERVICE_KEY, HC.CLIENT_API_SERVICE, 'client api' )
]
for ( service_key, service_type, name ) in init_service_info:
dictionary = ClientServices.GenerateDefaultServiceDictionary( service_type )
if service_key == CC.DEFAULT_FAVOURITES_RATING_SERVICE_KEY:
from hydrus.client.metadata import ClientRatings
dictionary[ 'shape' ] = ClientRatings.STAR
like_colours = {}
like_colours[ ClientRatings.LIKE ] = ( ( 0, 0, 0 ), ( 240, 240, 65 ) )
like_colours[ ClientRatings.DISLIKE ] = ( ( 0, 0, 0 ), ( 200, 80, 120 ) )
like_colours[ ClientRatings.NULL ] = ( ( 0, 0, 0 ), ( 191, 191, 191 ) )
like_colours[ ClientRatings.MIXED ] = ( ( 0, 0, 0 ), ( 95, 95, 95 ) )
dictionary[ 'colours' ] = list( like_colours.items() )
self._AddService( service_key, service_type, name, dictionary )
self._ExecuteMany( 'INSERT INTO yaml_dumps VALUES ( ?, ?, ? );', ( ( ClientDBSerialisable.YAML_DUMP_ID_IMAGEBOARD, name, imageboards ) for ( name, imageboards ) in ClientDefaults.GetDefaultImageboards() ) )
new_options = ClientOptions.ClientOptions()
new_options.SetSimpleDownloaderFormulae( ClientDefaults.GetDefaultSimpleDownloaderFormulae() )
names_to_tag_filters = {}
tag_filter = HydrusTags.TagFilter()
tag_filter.SetRule( 'diaper', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'gore', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'guro', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'scat', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'vore', HC.FILTER_BLACKLIST )
names_to_tag_filters[ 'example blacklist' ] = tag_filter
tag_filter = HydrusTags.TagFilter()
tag_filter.SetRule( '', HC.FILTER_BLACKLIST )
tag_filter.SetRule( ':', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'series:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'creator:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'studio:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'character:', HC.FILTER_WHITELIST )
names_to_tag_filters[ 'basic namespaces only' ] = tag_filter
tag_filter = HydrusTags.TagFilter()
tag_filter.SetRule( ':', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'series:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'creator:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'studio:', HC.FILTER_WHITELIST )
tag_filter.SetRule( 'character:', HC.FILTER_WHITELIST )
names_to_tag_filters[ 'basic booru tags only' ] = tag_filter
tag_filter = HydrusTags.TagFilter()
tag_filter.SetRule( 'title:', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'filename:', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'source:', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'booru:', HC.FILTER_BLACKLIST )
tag_filter.SetRule( 'url:', HC.FILTER_BLACKLIST )
names_to_tag_filters[ 'exclude long/spammy namespaces' ] = tag_filter
new_options.SetFavouriteTagFilters( names_to_tag_filters )
self.modules_serialisable.SetJSONDump( new_options )
list_of_shortcuts = ClientDefaults.GetDefaultShortcuts()
for shortcuts in list_of_shortcuts:
self.modules_serialisable.SetJSONDump( shortcuts )
client_api_manager = ClientAPI.APIManager()
self.modules_serialisable.SetJSONDump( client_api_manager )
bandwidth_manager = ClientNetworkingBandwidth.NetworkBandwidthManager()
bandwidth_manager.SetDirty()
ClientDefaults.SetDefaultBandwidthManagerRules( bandwidth_manager )
self.modules_serialisable.SetJSONDump( bandwidth_manager )
domain_manager = ClientNetworkingDomain.NetworkDomainManager()
ClientDefaults.SetDefaultDomainManagerData( domain_manager )
self.modules_serialisable.SetJSONDump( domain_manager )
session_manager = ClientNetworkingSessions.NetworkSessionManager()
session_manager.SetDirty()
self.modules_serialisable.SetJSONDump( session_manager )
login_manager = ClientNetworkingLogin.NetworkLoginManager()
ClientDefaults.SetDefaultLoginManagerScripts( login_manager )
self.modules_serialisable.SetJSONDump( login_manager )
favourite_search_manager = ClientSearch.FavouriteSearchManager()
ClientDefaults.SetDefaultFavouriteSearchManagerData( favourite_search_manager )
self.modules_serialisable.SetJSONDump( favourite_search_manager )
tag_display_manager = ClientTagsHandling.TagDisplayManager()
self.modules_serialisable.SetJSONDump( tag_display_manager )
from hydrus.client.gui.lists import ClientGUIListManager
column_list_manager = ClientGUIListManager.ColumnListManager()
self.modules_serialisable.SetJSONDump( column_list_manager )
self._Execute( 'INSERT INTO namespaces ( namespace_id, namespace ) VALUES ( ?, ? );', ( 1, '' ) )
self._Execute( 'INSERT INTO version ( version ) VALUES ( ? );', ( HC.SOFTWARE_VERSION, ) )
self._ExecuteMany( 'INSERT INTO json_dumps_named VALUES ( ?, ?, ?, ?, ? );', ClientDefaults.GetDefaultScriptRows() )
def _CullFileViewingStatistics( self ):
media_min = self._controller.new_options.GetNoneableInteger( 'file_viewing_statistics_media_min_time' )
media_max = self._controller.new_options.GetNoneableInteger( 'file_viewing_statistics_media_max_time' )
preview_min = self._controller.new_options.GetNoneableInteger( 'file_viewing_statistics_preview_min_time' )
preview_max = self._controller.new_options.GetNoneableInteger( 'file_viewing_statistics_preview_max_time' )
if media_min is not None and media_max is not None and media_min > media_max:
raise Exception( 'Media min was greater than media max! Abandoning cull now!' )
if preview_min is not None and preview_max is not None and preview_min > preview_max:
raise Exception( 'Preview min was greater than preview max! Abandoning cull now!' )
if media_min is not None:
self._Execute( 'UPDATE file_viewing_stats SET views = CAST( viewtime / ? AS INTEGER ) WHERE views * ? > viewtime AND canvas_type = ?;', ( media_min, media_min, CC.CANVAS_MEDIA_VIEWER ) )
if media_max is not None:
self._Execute( 'UPDATE file_viewing_stats SET viewtime = views * ? WHERE viewtime > views * ? AND canvas_type = ?;', ( media_max, media_max, CC.CANVAS_MEDIA_VIEWER ) )
if preview_min is not None:
self._Execute( 'UPDATE file_viewing_stats SET views = CAST( viewtime / ? AS INTEGER ) WHERE views * ? > viewtime AND canvas_type = ?;', ( preview_min, preview_min, CC.CANVAS_PREVIEW ) )
if preview_max is not None:
self._Execute( 'UPDATE file_viewing_stats SET viewtime = views * ? WHERE viewtime > views * ? AND canvas_type = ?;', ( preview_max, preview_max, CC.CANVAS_PREVIEW ) )
def _DeleteFiles( self, service_id, hash_ids, only_if_current = False ):
local_file_service_ids = self.modules_services.GetServiceIds( ( HC.LOCAL_FILE_DOMAIN, ) )
if service_id == self.modules_services.combined_local_file_service_id:
for local_file_service_id in local_file_service_ids:
self._DeleteFiles( local_file_service_id, hash_ids, only_if_current = True )
self._DeleteFiles( self.modules_services.trash_service_id, hash_ids )
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
existing_hash_ids_to_timestamps = self.modules_files_storage.GetCurrentHashIdsToTimestamps( service_id, hash_ids )
existing_hash_ids = set( existing_hash_ids_to_timestamps.keys() )
service_info_updates = []
# do delete outside, file repos and perhaps some other bananas situation can delete without ever having added
now = HydrusData.GetNow()
if service_type not in HC.FILE_SERVICES_WITH_NO_DELETE_RECORD:
if only_if_current:
deletee_hash_ids = existing_hash_ids
else:
deletee_hash_ids = hash_ids
if len( deletee_hash_ids ) > 0:
insert_rows = [ ( hash_id, existing_hash_ids_to_timestamps[ hash_id ] if hash_id in existing_hash_ids_to_timestamps else None ) for hash_id in deletee_hash_ids ]
num_new_deleted_files = self.modules_files_storage.RecordDeleteFiles( service_id, insert_rows )
service_info_updates.append( ( num_new_deleted_files, service_id, HC.SERVICE_INFO_NUM_DELETED_FILES ) )
if len( existing_hash_ids_to_timestamps ) > 0:
# remove them from the service
pending_changed = self.modules_files_storage.RemoveFiles( service_id, existing_hash_ids )
if pending_changed:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
delta_size = self.modules_files_metadata_basic.GetTotalSize( existing_hash_ids )
num_viewable_files = self.modules_files_metadata_basic.GetNumViewable( existing_hash_ids )
num_existing_files_removed = len( existing_hash_ids )
num_inbox = len( existing_hash_ids.intersection( self.modules_files_metadata_basic.inbox_hash_ids ) )
service_info_updates.append( ( -delta_size, service_id, HC.SERVICE_INFO_TOTAL_SIZE ) )
service_info_updates.append( ( -num_viewable_files, service_id, HC.SERVICE_INFO_NUM_VIEWABLE_FILES ) )
service_info_updates.append( ( -num_existing_files_removed, service_id, HC.SERVICE_INFO_NUM_FILES ) )
service_info_updates.append( ( -num_inbox, service_id, HC.SERVICE_INFO_NUM_INBOX ) )
# now do special stuff
# if we maintain tag counts for this service, update
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
with self._MakeTemporaryIntegerTable( existing_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsDeleteFiles( service_id, tag_service_id, existing_hash_ids, temp_hash_id_table_name )
# update the combined deleted file service
if service_type in HC.FILE_SERVICES_COVERED_BY_COMBINED_DELETED_FILE:
now = HydrusData.GetNow()
rows = [ ( hash_id, now ) for hash_id in existing_hash_ids ]
self._AddFiles( self.modules_services.combined_deleted_file_service_id, rows )
# if any files are no longer in any local file services, send them to the trash
if service_id in local_file_service_ids:
hash_ids_still_in_another_service = set()
other_local_file_service_ids = set( local_file_service_ids )
other_local_file_service_ids.discard( service_id )
hash_ids_still_in_another_service = self.modules_files_storage.FilterAllCurrentHashIds( existing_hash_ids, just_these_service_ids = other_local_file_service_ids )
trashed_hash_ids = existing_hash_ids.difference( hash_ids_still_in_another_service )
if len( trashed_hash_ids ) > 0:
now = HydrusData.GetNow()
delete_rows = [ ( hash_id, now ) for hash_id in trashed_hash_ids ]
self._AddFiles( self.modules_services.trash_service_id, delete_rows )
# if the files are being fully deleted, then physically delete them
if service_id == self.modules_services.combined_local_file_service_id:
self._ArchiveFiles( hash_ids )
for hash_id in hash_ids:
self.modules_similar_files.StopSearchingFile( hash_id )
self.modules_files_maintenance_queue.CancelFiles( hash_ids )
self.modules_hashes_local_cache.DropHashIdsFromCache( existing_hash_ids )
# push the info updates, notify
self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', service_info_updates )
def _DeletePending( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
if service.GetServiceType() == HC.TAG_REPOSITORY:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( service_id )
pending_rescinded_mappings_ids = list( HydrusData.BuildKeyToListDict( self._Execute( 'SELECT tag_id, hash_id FROM ' + pending_mappings_table_name + ';' ) ).items() )
petitioned_rescinded_mappings_ids = list( HydrusData.BuildKeyToListDict( self._Execute( 'SELECT tag_id, hash_id FROM ' + petitioned_mappings_table_name + ';' ) ).items() )
self._UpdateMappings( service_id, pending_rescinded_mappings_ids = pending_rescinded_mappings_ids, petitioned_rescinded_mappings_ids = petitioned_rescinded_mappings_ids )
self._Execute( 'DELETE FROM tag_sibling_petitions WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM tag_parent_petitions WHERE service_id = ?;', ( service_id, ) )
elif service.GetServiceType() in ( HC.FILE_REPOSITORY, HC.IPFS ):
self.modules_files_storage.DeletePending( service_id )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
self.pub_service_updates_after_commit( { service_key : [ HydrusData.ServiceUpdate( HC.SERVICE_UPDATE_DELETE_PENDING ) ] } )
def _DeleteService( self, service_id ):
service = self.modules_services.GetService( service_id )
service_key = service.GetServiceKey()
service_type = service.GetServiceType()
# for a long time, much of this was done with foreign keys, which had to be turned on especially for this operation
# however, this seemed to cause some immense temp drive space bloat when dropping the mapping tables, as there seems to be a trigger/foreign reference check for every row to be deleted
# so now we just blat all tables and trust in the Lord that we don't forget to add any new ones in future
self._Execute( 'DELETE FROM local_ratings WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM recent_tags WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM service_info WHERE service_id = ?;', ( service_id, ) )
self._DeleteServiceDropFiles( service_id, service_type )
if service_type in HC.REPOSITORIES:
self.modules_repositories.DropRepositoryTables( service_id )
self._DeleteServiceDropMappings( service_id, service_type )
if service_type in HC.REAL_TAG_SERVICES:
interested_service_ids = set( self.modules_tag_display.GetInterestedServiceIds( service_id ) )
interested_service_ids.discard( service_id )
self.modules_tag_parents.Drop( service_id )
self.modules_tag_siblings.Drop( service_id )
if len( interested_service_ids ) > 0:
self.modules_tag_display.RegenerateTagSiblingsAndParentsCache( only_these_service_ids = interested_service_ids )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
for file_service_id in file_service_ids:
self.modules_tag_search.Drop( file_service_id, service_id )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self.modules_tag_search.Drop( service_id, tag_service_id )
self.modules_services.DeleteService( service_id )
service_update = HydrusData.ServiceUpdate( HC.SERVICE_UPDATE_RESET )
service_keys_to_service_updates = { service_key : [ service_update ] }
self.pub_service_updates_after_commit( service_keys_to_service_updates )
def _DeleteServiceDirectory( self, service_id, dirname ):
directory_id = self.modules_texts.GetTextId( dirname )
self._Execute( 'DELETE FROM service_directories WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) )
self._Execute( 'DELETE FROM service_directory_file_map WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) )
def _DeleteServiceDropFiles( self, service_id, service_type ):
if service_type == HC.FILE_REPOSITORY:
self._Execute( 'DELETE FROM remote_thumbnails WHERE service_id = ?;', ( service_id, ) )
if service_type == HC.IPFS:
self._Execute( 'DELETE FROM service_filenames WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM service_directories WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM service_directory_file_map WHERE service_id = ?;', ( service_id, ) )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
self.modules_files_storage.DropFilesTables( service_id )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsDrop( service_id, tag_service_id )
def _DeleteServiceDropMappings( self, service_id, service_type ):
if service_type in HC.REAL_TAG_SERVICES:
self.modules_mappings_storage.DropMappingsTables( service_id )
self._CacheCombinedFilesMappingsDrop( service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for file_service_id in file_service_ids:
self._CacheSpecificMappingsDrop( file_service_id, service_id )
def _DeleteServiceInfo( self, service_key = None, types_to_delete = None ):
predicates = []
if service_key is not None:
service_id = self.modules_services.GetServiceId( service_key )
predicates.append( 'service_id = {}'.format( service_id ) )
if types_to_delete is not None:
predicates.append( 'info_type IN {}'.format( HydrusData.SplayListForDB( types_to_delete ) ) )
if len( predicates ) > 0:
predicates_string = ' WHERE {}'.format( ' AND '.join( predicates ) )
else:
predicates_string = ''
self._Execute( 'DELETE FROM service_info{};'.format( predicates_string ) )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
def _DisplayCatastrophicError( self, text ):
message = 'The db encountered a serious error! This is going to be written to the log as well, but here it is for a screenshot:'
message += os.linesep * 2
message += text
HydrusData.DebugPrint( message )
self._controller.SafeShowCriticalMessage( 'hydrus db failed', message )
def _DoAfterJobWork( self ):
for service_keys_to_content_updates in self._after_job_content_update_jobs:
self._weakref_media_result_cache.ProcessContentUpdates( service_keys_to_content_updates )
self._cursor_transaction_wrapper.pub_after_job( 'content_updates_gui', service_keys_to_content_updates )
if len( self._regen_tags_managers_hash_ids ) > 0:
hash_ids_to_do = self._weakref_media_result_cache.FilterFiles( self._regen_tags_managers_hash_ids )
if len( hash_ids_to_do ) > 0:
hash_ids_to_tags_managers = self._GetForceRefreshTagsManagers( hash_ids_to_do )
self._weakref_media_result_cache.SilentlyTakeNewTagsManagers( hash_ids_to_tags_managers )
if len( self._regen_tags_managers_tag_ids ) > 0:
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = self._regen_tags_managers_tag_ids )
tags = { tag_ids_to_tags[ tag_id ] for tag_id in self._regen_tags_managers_tag_ids }
hash_ids_to_do = self._weakref_media_result_cache.FilterFilesWithTags( tags )
if len( hash_ids_to_do ) > 0:
hash_ids_to_tags_managers = self._GetForceRefreshTagsManagers( hash_ids_to_do )
self._weakref_media_result_cache.SilentlyTakeNewTagsManagers( hash_ids_to_tags_managers )
self._cursor_transaction_wrapper.pub_after_job( 'refresh_all_tag_presentation_gui' )
HydrusDB.HydrusDB._DoAfterJobWork( self )
def _DuplicatesGetRandomPotentialDuplicateHashes( self, file_search_context: ClientSearch.FileSearchContext, both_files_match, pixel_dupes_preference, max_hamming_distance ):
db_location_context = self.modules_files_storage.GetDBLocationContext( file_search_context.GetLocationContext() )
is_complicated_search = False
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_table_name:
allowed_hash_ids = None
preferred_hash_ids = None
if file_search_context.IsJustSystemEverything() or file_search_context.HasNoPredicates():
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnEverythingSearchResults( db_location_context, pixel_dupes_preference, max_hamming_distance )
else:
is_complicated_search = True
query_hash_ids = self._GetHashIdsFromQuery( file_search_context, apply_implicit_limit = False )
if both_files_match:
allowed_hash_ids = query_hash_ids
else:
preferred_hash_ids = query_hash_ids
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( temp_table_name ), ( ( hash_id, ) for hash_id in query_hash_ids ) )
self._AnalyzeTempTable( temp_table_name )
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnSearchResults( db_location_context, temp_table_name, both_files_match, pixel_dupes_preference, max_hamming_distance )
potential_media_ids = set()
for ( smaller_media_id, larger_media_id ) in self._Execute( 'SELECT DISTINCT smaller_media_id, larger_media_id FROM {};'.format( table_join ) ):
potential_media_ids.add( smaller_media_id )
potential_media_ids.add( larger_media_id )
if len( potential_media_ids ) >= 1000:
break
potential_media_ids = list( potential_media_ids )
random.shuffle( potential_media_ids )
chosen_hash_id = None
for potential_media_id in potential_media_ids:
best_king_hash_id = self.modules_files_duplicates.DuplicatesGetBestKingId( potential_media_id, db_location_context, allowed_hash_ids = allowed_hash_ids, preferred_hash_ids = preferred_hash_ids )
if best_king_hash_id is not None:
chosen_hash_id = best_king_hash_id
break
if chosen_hash_id is None:
return []
hash = self.modules_hashes_local_cache.GetHash( chosen_hash_id )
if is_complicated_search and both_files_match:
allowed_hash_ids = query_hash_ids
else:
allowed_hash_ids = None
location_context = file_search_context.GetLocationContext()
return self.modules_files_duplicates.DuplicatesGetFileHashesByDuplicateType( location_context, hash, HC.DUPLICATE_POTENTIAL, allowed_hash_ids = allowed_hash_ids, preferred_hash_ids = preferred_hash_ids )
def _DuplicatesGetPotentialDuplicatePairsForFiltering( self, file_search_context: ClientSearch.FileSearchContext, both_files_match, pixel_dupes_preference, max_hamming_distance ):
# we need to batch non-intersecting decisions here to keep it simple at the gui-level
# we also want to maximise per-decision value
# now we will fetch some unknown pairs
db_location_context = self.modules_files_storage.GetDBLocationContext( file_search_context.GetLocationContext() )
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_table_name:
allowed_hash_ids = None
preferred_hash_ids = None
if file_search_context.IsJustSystemEverything() or file_search_context.HasNoPredicates():
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnEverythingSearchResults( db_location_context, pixel_dupes_preference, max_hamming_distance )
else:
query_hash_ids = self._GetHashIdsFromQuery( file_search_context, apply_implicit_limit = False )
if both_files_match:
allowed_hash_ids = query_hash_ids
else:
preferred_hash_ids = query_hash_ids
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( temp_table_name ), ( ( hash_id, ) for hash_id in query_hash_ids ) )
self._AnalyzeTempTable( temp_table_name )
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnSearchResults( db_location_context, temp_table_name, both_files_match, pixel_dupes_preference, max_hamming_distance )
# distinct important here for the search results table join
result = self._Execute( 'SELECT DISTINCT smaller_media_id, larger_media_id, distance FROM {} LIMIT 2500;'.format( table_join ) ).fetchall()
MAX_BATCH_SIZE = HG.client_controller.new_options.GetInteger( 'duplicate_filter_max_batch_size' )
batch_of_pairs_of_media_ids = []
seen_media_ids = set()
distances_to_pairs = HydrusData.BuildKeyToListDict( ( ( distance, ( smaller_media_id, larger_media_id ) ) for ( smaller_media_id, larger_media_id, distance ) in result ) )
distances = sorted( distances_to_pairs.keys() )
# we want to preference pairs that have the smallest distance between them. deciding on more similar files first helps merge dupes before dealing with alts so reduces potentials more quickly
for distance in distances:
result_pairs_for_this_distance = distances_to_pairs[ distance ]
# convert them into possible groups per each possible 'master hash_id', and value them
master_media_ids_to_groups = collections.defaultdict( list )
for pair in result_pairs_for_this_distance:
( smaller_media_id, larger_media_id ) = pair
master_media_ids_to_groups[ smaller_media_id ].append( pair )
master_media_ids_to_groups[ larger_media_id ].append( pair )
master_hash_ids_to_values = collections.Counter()
for ( media_id, pairs ) in master_media_ids_to_groups.items():
# negative so we later serve up smallest groups first
# we shall say for now that smaller groups are more useful to front-load because it lets us solve simple problems first
master_hash_ids_to_values[ media_id ] = - len( pairs )
# now let's add decision groups to our batch
for ( master_media_id, count ) in master_hash_ids_to_values.most_common():
if master_media_id in seen_media_ids:
continue
seen_media_ids_for_this_master_media_id = set()
for pair in master_media_ids_to_groups[ master_media_id ]:
( smaller_media_id, larger_media_id ) = pair
if smaller_media_id in seen_media_ids or larger_media_id in seen_media_ids:
continue
seen_media_ids_for_this_master_media_id.add( smaller_media_id )
seen_media_ids_for_this_master_media_id.add( larger_media_id )
batch_of_pairs_of_media_ids.append( pair )
if len( batch_of_pairs_of_media_ids ) >= MAX_BATCH_SIZE:
break
seen_media_ids.update( seen_media_ids_for_this_master_media_id )
if len( batch_of_pairs_of_media_ids ) >= MAX_BATCH_SIZE:
break
if len( batch_of_pairs_of_media_ids ) >= MAX_BATCH_SIZE:
break
seen_hash_ids = set()
media_ids_to_best_king_ids = {}
for media_id in seen_media_ids:
best_king_hash_id = self.modules_files_duplicates.DuplicatesGetBestKingId( media_id, db_location_context, allowed_hash_ids = allowed_hash_ids, preferred_hash_ids = preferred_hash_ids )
if best_king_hash_id is not None:
seen_hash_ids.add( best_king_hash_id )
media_ids_to_best_king_ids[ media_id ] = best_king_hash_id
batch_of_pairs_of_hash_ids = [ ( media_ids_to_best_king_ids[ smaller_media_id ], media_ids_to_best_king_ids[ larger_media_id ] ) for ( smaller_media_id, larger_media_id ) in batch_of_pairs_of_media_ids if smaller_media_id in media_ids_to_best_king_ids and larger_media_id in media_ids_to_best_king_ids ]
hash_ids_to_hashes = self.modules_hashes_local_cache.GetHashIdsToHashes( hash_ids = seen_hash_ids )
batch_of_pairs_of_hashes = [ ( hash_ids_to_hashes[ hash_id_a ], hash_ids_to_hashes[ hash_id_b ] ) for ( hash_id_a, hash_id_b ) in batch_of_pairs_of_hash_ids ]
return batch_of_pairs_of_hashes
def _DuplicatesGetPotentialDuplicatesCount( self, file_search_context, both_files_match, pixel_dupes_preference, max_hamming_distance ):
db_location_context = self.modules_files_storage.GetDBLocationContext( file_search_context.GetLocationContext() )
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_table_name:
if file_search_context.IsJustSystemEverything() or file_search_context.HasNoPredicates():
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnEverythingSearchResults( db_location_context, pixel_dupes_preference, max_hamming_distance )
else:
query_hash_ids = self._GetHashIdsFromQuery( file_search_context, apply_implicit_limit = False )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( temp_table_name ), ( ( hash_id, ) for hash_id in query_hash_ids ) )
self._AnalyzeTempTable( temp_table_name )
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnSearchResults( db_location_context, temp_table_name, both_files_match, pixel_dupes_preference, max_hamming_distance )
# distinct important here for the search results table join
( potential_duplicates_count, ) = self._Execute( 'SELECT COUNT( * ) FROM ( SELECT DISTINCT smaller_media_id, larger_media_id FROM {} );'.format( table_join ) ).fetchone()
return potential_duplicates_count
def _DuplicatesSetDuplicatePairStatus( self, pair_info ):
for ( duplicate_type, hash_a, hash_b, service_keys_to_content_updates ) in pair_info:
if len( service_keys_to_content_updates ) > 0:
self._ProcessContentUpdates( service_keys_to_content_updates )
hash_id_a = self.modules_hashes_local_cache.GetHashId( hash_a )
hash_id_b = self.modules_hashes_local_cache.GetHashId( hash_b )
media_id_a = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_a )
media_id_b = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_b )
smaller_media_id = min( media_id_a, media_id_b )
larger_media_id = max( media_id_a, media_id_b )
# this shouldn't be strictly needed, but lets do it here anyway to catch unforeseen problems
self._Execute( 'DELETE FROM potential_duplicate_pairs WHERE smaller_media_id = ? AND larger_media_id = ?;', ( smaller_media_id, larger_media_id ) )
if hash_id_a == hash_id_b:
continue
if duplicate_type in ( HC.DUPLICATE_FALSE_POSITIVE, HC.DUPLICATE_ALTERNATE ):
if duplicate_type == HC.DUPLICATE_FALSE_POSITIVE:
alternates_group_id_a = self.modules_files_duplicates.DuplicatesGetAlternatesGroupId( media_id_a )
alternates_group_id_b = self.modules_files_duplicates.DuplicatesGetAlternatesGroupId( media_id_b )
self.modules_files_duplicates.DuplicatesSetFalsePositive( alternates_group_id_a, alternates_group_id_b )
elif duplicate_type == HC.DUPLICATE_ALTERNATE:
if media_id_a == media_id_b:
king_hash_id = self.modules_files_duplicates.DuplicatesGetKingHashId( media_id_a )
hash_id_to_remove = hash_id_b if king_hash_id == hash_id_a else hash_id_a
self.modules_files_duplicates.DuplicatesRemoveMediaIdMember( hash_id_to_remove )
media_id_a = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_a )
media_id_b = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_b )
smaller_media_id = min( media_id_a, media_id_b )
larger_media_id = max( media_id_a, media_id_b )
self.modules_files_duplicates.DuplicatesSetAlternates( media_id_a, media_id_b )
elif duplicate_type in ( HC.DUPLICATE_BETTER, HC.DUPLICATE_WORSE, HC.DUPLICATE_SAME_QUALITY ):
if duplicate_type == HC.DUPLICATE_WORSE:
( hash_id_a, hash_id_b ) = ( hash_id_b, hash_id_a )
( media_id_a, media_id_b ) = ( media_id_b, media_id_a )
duplicate_type = HC.DUPLICATE_BETTER
king_hash_id_a = self.modules_files_duplicates.DuplicatesGetKingHashId( media_id_a )
king_hash_id_b = self.modules_files_duplicates.DuplicatesGetKingHashId( media_id_b )
if duplicate_type == HC.DUPLICATE_BETTER:
if media_id_a == media_id_b:
if hash_id_b == king_hash_id_b:
self.modules_files_duplicates.DuplicatesSetKing( hash_id_a, media_id_a )
else:
if hash_id_b != king_hash_id_b:
self.modules_files_duplicates.DuplicatesRemoveMediaIdMember( hash_id_b )
media_id_b = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_b )
self.modules_files_duplicates.DuplicatesMergeMedias( media_id_a, media_id_b )
elif duplicate_type == HC.DUPLICATE_SAME_QUALITY:
if media_id_a != media_id_b:
a_is_king = hash_id_a == king_hash_id_a
b_is_king = hash_id_b == king_hash_id_b
if not ( a_is_king or b_is_king ):
self.modules_files_duplicates.DuplicatesRemoveMediaIdMember( hash_id_b )
media_id_b = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id_b )
superior_media_id = media_id_a
mergee_media_id = media_id_b
elif not a_is_king:
superior_media_id = media_id_a
mergee_media_id = media_id_b
elif not b_is_king:
superior_media_id = media_id_b
mergee_media_id = media_id_a
else:
superior_media_id = media_id_a
mergee_media_id = media_id_b
self.modules_files_duplicates.DuplicatesMergeMedias( superior_media_id, mergee_media_id )
elif duplicate_type == HC.DUPLICATE_POTENTIAL:
potential_duplicate_media_ids_and_distances = [ ( media_id_b, 0 ) ]
self.modules_files_duplicates.DuplicatesAddPotentialDuplicates( media_id_a, potential_duplicate_media_ids_and_distances )
def _FilterExistingTags( self, service_key, tags ):
service_id = self.modules_services.GetServiceId( service_key )
tag_ids_to_tags = { self.modules_tags.GetTagId( tag ) : tag for tag in tags }
tag_ids = set( tag_ids_to_tags.keys() )
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_id_table_name:
counts = self.modules_mappings_counts.GetCountsForTags( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, service_id, temp_tag_id_table_name )
existing_tag_ids = [ tag_id for ( tag_id, current_count, pending_count ) in counts if current_count > 0 ]
filtered_tags = { tag_ids_to_tags[ tag_id ] for tag_id in existing_tag_ids }
return filtered_tags
def _FilterExistingUpdateMappings( self, tag_service_id, mappings_ids, action ):
if len( mappings_ids ) == 0:
return mappings_ids
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
culled_mappings_ids = []
for ( tag_id, hash_ids ) in mappings_ids:
if len( hash_ids ) == 0:
continue
elif len( hash_ids ) == 1:
( hash_id, ) = hash_ids
if action == HC.CONTENT_UPDATE_ADD:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( current_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
valid_hash_ids = hash_ids
else:
continue
elif action == HC.CONTENT_UPDATE_DELETE:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( deleted_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
valid_hash_ids = hash_ids
else:
continue
elif action == HC.CONTENT_UPDATE_PEND:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( current_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( pending_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
valid_hash_ids = hash_ids
else:
continue
else:
continue
elif action == HC.CONTENT_UPDATE_RESCIND_PEND:
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( pending_mappings_table_name ), ( tag_id, hash_id ) ).fetchone()
if result is None:
continue
else:
valid_hash_ids = hash_ids
else:
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
if action == HC.CONTENT_UPDATE_ADD:
existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, current_mappings_table_name ), ( tag_id, ) ) )
valid_hash_ids = set( hash_ids ).difference( existing_hash_ids )
elif action == HC.CONTENT_UPDATE_DELETE:
existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, deleted_mappings_table_name ), ( tag_id, ) ) )
valid_hash_ids = set( hash_ids ).difference( existing_hash_ids )
elif action == HC.CONTENT_UPDATE_PEND:
existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, current_mappings_table_name ), ( tag_id, ) ) )
existing_hash_ids.update( self._STI( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, pending_mappings_table_name ), ( tag_id, ) ) ) )
valid_hash_ids = set( hash_ids ).difference( existing_hash_ids )
elif action == HC.CONTENT_UPDATE_RESCIND_PEND:
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?;'.format( temp_hash_ids_table_name, pending_mappings_table_name ), ( tag_id, ) ) )
if len( valid_hash_ids ) > 0:
culled_mappings_ids.append( ( tag_id, valid_hash_ids ) )
return culled_mappings_ids
def _FilterForFileDeleteLock( self, service_id, hash_ids ):
if HG.client_controller.new_options.GetBoolean( 'delete_lock_for_archived_files' ):
service = self.modules_services.GetService( service_id )
if service.GetServiceType() in HC.LOCAL_FILE_SERVICES:
hash_ids = set( hash_ids ).intersection( self.modules_files_metadata_basic.inbox_hash_ids )
return hash_ids
def _FilterHashesByService( self, file_service_key: bytes, hashes: typing.Sequence[ bytes ] ) -> typing.List[ bytes ]:
if file_service_key == CC.COMBINED_FILE_SERVICE_KEY:
return list( hashes )
service_id = self.modules_services.GetServiceId( file_service_key )
hashes_to_hash_ids = { hash : self.modules_hashes_local_cache.GetHashId( hash ) for hash in hashes if self.modules_hashes.HasHash( hash ) }
valid_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, set( hashes_to_hash_ids.values() ), HC.CONTENT_STATUS_CURRENT )
return [ hash for hash in hashes if hash in hashes_to_hash_ids and hashes_to_hash_ids[ hash ] in valid_hash_ids ]
def _FixLogicallyInconsistentMappings( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
total_fixed = 0
try:
job_key.SetStatusTitle( 'fixing logically inconsistent mappings' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'fixing {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
time.sleep( 0.01 )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
both_current_and_pending_mappings = list(
HydrusData.BuildKeyToSetDict(
self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( tag_id, hash_id );'.format( pending_mappings_table_name, current_mappings_table_name ) )
).items()
)
total_fixed += sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in both_current_and_pending_mappings ) )
self._UpdateMappings( tag_service_id, pending_rescinded_mappings_ids = both_current_and_pending_mappings )
both_deleted_and_petitioned_mappings = list(
HydrusData.BuildKeyToSetDict(
self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( tag_id, hash_id );'.format( petitioned_mappings_table_name, deleted_mappings_table_name ) )
).items()
)
total_fixed += sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in both_deleted_and_petitioned_mappings ) )
self._UpdateMappings( tag_service_id, petitioned_rescinded_mappings_ids = both_deleted_and_petitioned_mappings )
finally:
if total_fixed == 0:
HydrusData.ShowText( 'No inconsistent mappings found!' )
else:
self._Execute( 'DELETE FROM service_info where info_type IN ( ?, ? );', ( HC.SERVICE_INFO_NUM_PENDING_MAPPINGS, HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS ) )
self._controller.pub( 'notify_new_pending' )
HydrusData.ShowText( 'Found {} bad mappings! They _should_ be deleted, and your pending counts should be updated.'.format( HydrusData.ToHumanInt( total_fixed ) ) )
job_key.DeleteVariable( 'popup_text_2' )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _GenerateDBJob( self, job_type, synchronous, action, *args, **kwargs ):
return JobDatabaseClient( job_type, synchronous, action, *args, **kwargs )
def _GeneratePredicatesFromTagIdsAndCounts( self, tag_display_type: int, display_tag_service_id: int, tag_ids_to_full_counts, inclusive, job_key = None ):
tag_ids = set( tag_ids_to_full_counts.keys() )
predicates = []
if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE:
if display_tag_service_id != self.modules_services.combined_tag_service_id:
tag_ids_to_ideal_tag_ids = self.modules_tag_siblings.GetTagsToIdeals( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, tag_ids )
tag_ids_that_are_sibling_chained = self.modules_tag_siblings.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, tag_ids )
tag_ids_to_ideal_tag_ids_for_siblings = { tag_id : ideal_tag_id for ( tag_id, ideal_tag_id ) in tag_ids_to_ideal_tag_ids.items() if tag_id in tag_ids_that_are_sibling_chained }
ideal_tag_ids_to_sibling_chain_tag_ids = self.modules_tag_siblings.GetIdealsToChains( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, set( tag_ids_to_ideal_tag_ids_for_siblings.values() ) )
ideal_tag_ids = set( tag_ids_to_ideal_tag_ids.values() )
ideal_tag_ids_that_are_parent_chained = self.modules_tag_parents.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, ideal_tag_ids )
tag_ids_to_ideal_tag_ids_for_parents = { tag_id : ideal_tag_id for ( tag_id, ideal_tag_id ) in tag_ids_to_ideal_tag_ids.items() if ideal_tag_id in ideal_tag_ids_that_are_parent_chained }
ideal_tag_ids_to_ancestor_tag_ids = self.modules_tag_parents.GetTagsToAncestors( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, set( tag_ids_to_ideal_tag_ids_for_parents.values() ) )
else:
tag_ids_to_ideal_tag_ids_for_siblings = {}
tag_ids_to_ideal_tag_ids_for_parents = {}
ideal_tag_ids_to_sibling_chain_tag_ids = {}
ideal_tag_ids_to_ancestor_tag_ids = {}
tag_ids_we_want_to_look_up = set( tag_ids )
tag_ids_we_want_to_look_up.update( itertools.chain.from_iterable( ideal_tag_ids_to_sibling_chain_tag_ids.values() ) )
tag_ids_we_want_to_look_up.update( itertools.chain.from_iterable( ideal_tag_ids_to_ancestor_tag_ids.values() ) )
if job_key is not None and job_key.IsCancelled():
return []
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = tag_ids_we_want_to_look_up )
if job_key is not None and job_key.IsCancelled():
return []
ideal_tag_ids_to_chain_tags = { ideal_tag_id : { tag_ids_to_tags[ chain_tag_id ] for chain_tag_id in chain_tag_ids } for ( ideal_tag_id, chain_tag_ids ) in ideal_tag_ids_to_sibling_chain_tag_ids.items() }
ideal_tag_ids_to_ancestor_tags = { ideal_tag_id : { tag_ids_to_tags[ ancestor_tag_id ] for ancestor_tag_id in ancestor_tag_ids } for ( ideal_tag_id, ancestor_tag_ids ) in ideal_tag_ids_to_ancestor_tag_ids.items() }
for ( tag_id, ( min_current_count, max_current_count, min_pending_count, max_pending_count ) ) in tag_ids_to_full_counts.items():
tag = tag_ids_to_tags[ tag_id ]
predicate = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, value = tag, inclusive = inclusive, count = ClientSearch.PredicateCount( min_current_count, min_pending_count, max_current_count, max_pending_count ) )
if tag_id in tag_ids_to_ideal_tag_ids_for_siblings:
ideal_tag_id = tag_ids_to_ideal_tag_ids_for_siblings[ tag_id ]
if ideal_tag_id != tag_id:
predicate.SetIdealSibling( tag_ids_to_tags[ ideal_tag_id ] )
predicate.SetKnownSiblings( ideal_tag_ids_to_chain_tags[ ideal_tag_id ] )
if tag_id in tag_ids_to_ideal_tag_ids_for_parents:
ideal_tag_id = tag_ids_to_ideal_tag_ids_for_parents[ tag_id ]
parents = ideal_tag_ids_to_ancestor_tags[ ideal_tag_id ]
if len( parents ) > 0:
predicate.SetKnownParents( parents )
predicates.append( predicate )
elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL:
tag_ids_to_known_chain_tag_ids = collections.defaultdict( set )
if display_tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( display_tag_service_id, )
for search_tag_service_id in search_tag_service_ids:
tag_ids_that_are_sibling_chained = self.modules_tag_siblings.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, search_tag_service_id, tag_ids )
tag_ids_to_ideal_tag_ids_for_siblings = self.modules_tag_siblings.GetTagsToIdeals( ClientTags.TAG_DISPLAY_ACTUAL, search_tag_service_id, tag_ids_that_are_sibling_chained )
ideal_tag_ids = set( tag_ids_to_ideal_tag_ids_for_siblings.values() )
ideal_tag_ids_to_sibling_chain_tag_ids = self.modules_tag_siblings.GetIdealsToChains( ClientTags.TAG_DISPLAY_ACTUAL, search_tag_service_id, ideal_tag_ids )
for ( tag_id, ideal_tag_id ) in tag_ids_to_ideal_tag_ids_for_siblings.items():
tag_ids_to_known_chain_tag_ids[ tag_id ].update( ideal_tag_ids_to_sibling_chain_tag_ids[ ideal_tag_id ] )
tag_ids_we_want_to_look_up = set( tag_ids ).union( itertools.chain.from_iterable( tag_ids_to_known_chain_tag_ids.values() ) )
if job_key is not None and job_key.IsCancelled():
return []
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = tag_ids_we_want_to_look_up )
if job_key is not None and job_key.IsCancelled():
return []
for ( tag_id, ( min_current_count, max_current_count, min_pending_count, max_pending_count ) ) in tag_ids_to_full_counts.items():
tag = tag_ids_to_tags[ tag_id ]
predicate = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, value = tag, inclusive = inclusive, count = ClientSearch.PredicateCount( min_current_count, min_pending_count, max_current_count, max_pending_count ) )
if tag_id in tag_ids_to_known_chain_tag_ids:
chain_tags = { tag_ids_to_tags[ chain_tag_id ] for chain_tag_id in tag_ids_to_known_chain_tag_ids[ tag_id ] }
predicate.SetKnownSiblings( chain_tags )
predicates.append( predicate )
return predicates
def _GetAllTagIds( self, leaf: ClientDBServices.FileSearchContextLeaf, job_key = None ):
tag_ids = set()
query = '{};'.format( self.modules_tag_search.GetQueryPhraseForTagIds( leaf.file_service_id, leaf.tag_service_id ) )
cursor = self._Execute( query )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
loop_of_tag_ids = self._STS( HydrusDB.ReadFromCancellableCursor( cursor, 1024, cancelled_hook = cancelled_hook ) )
if job_key is not None and job_key.IsCancelled():
return set()
tag_ids.update( loop_of_tag_ids )
return tag_ids
def _GetAutocompleteCountEstimate( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ], include_current_tags: bool, include_pending_tags: bool ):
count = 0
if not include_current_tags and not include_pending_tags:
return count
( current_count, pending_count ) = self._GetAutocompleteCountEstimateStatuses( tag_display_type, tag_service_id, file_service_id, tag_ids )
if include_current_tags:
count += current_count
if include_current_tags:
count += pending_count
return count
def _GetAutocompleteCountEstimateStatuses( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ] ):
include_current_tags = True
include_pending_tags = True
ids_to_count = self.modules_mappings_counts.GetCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current_tags, include_pending_tags )
current_count = 0
pending_count = 0
for ( current_min, current_max, pending_min, pending_max ) in ids_to_count.values():
current_count += current_min
pending_count += pending_min
return ( current_count, pending_count )
def _GetAutocompleteTagIdsLeaf( self, tag_display_type: int, leaf: ClientDBServices.FileSearchContextLeaf, search_text, exact_match, job_key = None ):
if search_text == '':
return set()
( namespace, half_complete_searchable_subtag ) = HydrusTags.SplitTag( search_text )
if half_complete_searchable_subtag == '':
return set()
if namespace == '*':
namespace = ''
if exact_match:
if '*' in namespace or '*' in half_complete_searchable_subtag:
return []
if namespace == '':
namespace_ids = []
elif '*' in namespace:
namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace )
else:
if not self.modules_tags.NamespaceExists( namespace ):
return set()
namespace_ids = ( self.modules_tags.GetNamespaceId( namespace ), )
if half_complete_searchable_subtag == '*':
if namespace == '':
# hellmode 'get all tags' search
tag_ids = self._GetAllTagIds( leaf, job_key = job_key )
else:
tag_ids = self._GetTagIdsFromNamespaceIds( leaf, namespace_ids, job_key = job_key )
else:
tag_ids = set()
with self._MakeTemporaryIntegerTable( [], 'subtag_id' ) as temp_subtag_ids_table_name:
self.modules_tag_search.GetSubtagIdsFromWildcardIntoTable( leaf.file_service_id, leaf.tag_service_id, half_complete_searchable_subtag, temp_subtag_ids_table_name, job_key = job_key )
if namespace == '':
loop_of_tag_ids = self._GetTagIdsFromSubtagIdsTable( leaf.file_service_id, leaf.tag_service_id, temp_subtag_ids_table_name, job_key = job_key )
else:
with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
loop_of_tag_ids = self._GetTagIdsFromNamespaceIdsSubtagIdsTables( leaf.file_service_id, leaf.tag_service_id, temp_namespace_ids_table_name, temp_subtag_ids_table_name, job_key = job_key )
tag_ids.update( loop_of_tag_ids )
# now fetch siblings, add to set
if not isinstance( tag_ids, set ):
tag_ids = set( tag_ids )
tag_ids_without_siblings = list( tag_ids )
seen_ideal_tag_ids = collections.defaultdict( set )
for batch_of_tag_ids in HydrusData.SplitListIntoChunks( tag_ids_without_siblings, 10240 ):
with self._MakeTemporaryIntegerTable( batch_of_tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
if job_key is not None and job_key.IsCancelled():
return set()
with self._MakeTemporaryIntegerTable( [], 'ideal_tag_id' ) as temp_ideal_tag_ids_table_name:
self.modules_tag_siblings.FilterChainedIdealsIntoTable( ClientTags.TAG_DISPLAY_ACTUAL, leaf.tag_service_id, temp_tag_ids_table_name, temp_ideal_tag_ids_table_name )
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_chained_tag_ids_table_name:
self.modules_tag_siblings.GetChainsMembersFromIdealsTables( ClientTags.TAG_DISPLAY_ACTUAL, leaf.tag_service_id, temp_ideal_tag_ids_table_name, temp_chained_tag_ids_table_name )
tag_ids.update( self._STI( self._Execute( 'SELECT tag_id FROM {};'.format( temp_chained_tag_ids_table_name ) ) ) )
return tag_ids
def _GetAutocompletePredicates(
self,
tag_display_type: int,
file_search_context: ClientSearch.FileSearchContext,
search_text: str = '',
exact_match = False,
inclusive = True,
add_namespaceless = False,
search_namespaces_into_full_tags = False,
zero_count_ok = False,
job_key = None
):
location_context = file_search_context.GetLocationContext()
tag_search_context = file_search_context.GetTagSearchContext()
display_tag_service_id = self.modules_services.GetServiceId( tag_search_context.display_service_key )
if tag_search_context.IsAllKnownTags() and location_context.IsAllKnownFiles():
return []
include_current = tag_search_context.include_current_tags
include_pending = tag_search_context.include_pending_tags
all_predicates = []
file_search_context_branch = self._GetFileSearchContextBranch( file_search_context )
for leaf in file_search_context_branch.IterateLeaves():
tag_ids = self._GetAutocompleteTagIdsLeaf( tag_display_type, leaf, search_text, exact_match, job_key = job_key )
if ':' not in search_text and search_namespaces_into_full_tags and not exact_match:
# 'char' -> 'character:samus aran'
special_search_text = '{}*:*'.format( search_text )
tag_ids.update( self._GetAutocompleteTagIdsLeaf( tag_display_type, leaf, special_search_text, exact_match, job_key = job_key ) )
if job_key is not None and job_key.IsCancelled():
return []
domain_is_cross_referenced = leaf.file_service_id != self.modules_services.combined_deleted_file_service_id
for group_of_tag_ids in HydrusData.SplitIteratorIntoChunks( tag_ids, 1000 ):
if job_key is not None and job_key.IsCancelled():
return []
ids_to_count = self.modules_mappings_counts.GetCounts( tag_display_type, leaf.tag_service_id, leaf.file_service_id, group_of_tag_ids, include_current, include_pending, domain_is_cross_referenced = domain_is_cross_referenced, zero_count_ok = zero_count_ok, job_key = job_key )
if len( ids_to_count ) == 0:
continue
#
predicates = self._GeneratePredicatesFromTagIdsAndCounts( tag_display_type, display_tag_service_id, ids_to_count, inclusive, job_key = job_key )
all_predicates.extend( predicates )
if job_key is not None and job_key.IsCancelled():
return []
predicates = ClientSearch.MergePredicates( all_predicates, add_namespaceless = add_namespaceless )
return predicates
def _GetBonedStats( self ):
boned_stats = {}
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_hash_id_table_name:
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
self._Execute( 'INSERT INTO {} ( hash_id ) SELECT hash_id FROM {};'.format( temp_hash_id_table_name, current_files_table_name ) )
for service_id in ( self.modules_services.trash_service_id, self.modules_services.local_update_service_id ):
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
self._Execute( 'DELETE FROM {} WHERE hash_id IN ( SELECT hash_id FROM {} );'.format( temp_hash_id_table_name, current_files_table_name ) )
( num_total, size_total ) = self._Execute( 'SELECT COUNT( hash_id ), SUM( size ) FROM {} CROSS JOIN files_info USING ( hash_id );'.format( temp_hash_id_table_name ) ).fetchone()
( num_inbox, size_inbox ) = self._Execute( 'SELECT COUNT( hash_id ), SUM( size ) FROM files_info NATURAL JOIN {} NATURAL JOIN file_inbox;'.format( temp_hash_id_table_name ) ).fetchone()
if size_total is None:
size_total = 0
if size_inbox is None:
size_inbox = 0
with self._MakeTemporaryIntegerTable( [], 'hash_id' ) as temp_hash_id_table_name:
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_DELETED )
self._Execute( 'INSERT INTO {} ( hash_id ) SELECT hash_id FROM {};'.format( temp_hash_id_table_name, deleted_files_table_name ) )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.trash_service_id, HC.CONTENT_STATUS_CURRENT )
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id ) SELECT hash_id FROM {};'.format( temp_hash_id_table_name, current_files_table_name ) )
( num_deleted, size_deleted ) = self._Execute( 'SELECT COUNT( hash_id ), SUM( size ) FROM {} CROSS JOIN files_info USING ( hash_id );'.format( temp_hash_id_table_name ) ).fetchone()
if size_deleted is None:
size_deleted = 0
num_archive = num_total - num_inbox
size_archive = size_total - size_inbox
boned_stats[ 'num_inbox' ] = num_inbox
boned_stats[ 'num_archive' ] = num_archive
boned_stats[ 'num_deleted' ] = num_deleted
boned_stats[ 'size_inbox' ] = size_inbox
boned_stats[ 'size_archive' ] = size_archive
boned_stats[ 'size_deleted' ] = size_deleted
canvas_types_to_total_viewtimes = { canvas_type : ( views, viewtime ) for ( canvas_type, views, viewtime ) in self._Execute( 'SELECT canvas_type, SUM( views ), SUM( viewtime ) FROM file_viewing_stats GROUP BY canvas_type;' ) }
if CC.CANVAS_PREVIEW not in canvas_types_to_total_viewtimes:
canvas_types_to_total_viewtimes[ CC.CANVAS_PREVIEW ] = ( 0, 0 )
if CC.CANVAS_MEDIA_VIEWER not in canvas_types_to_total_viewtimes:
canvas_types_to_total_viewtimes[ CC.CANVAS_MEDIA_VIEWER ] = ( 0, 0 )
total_viewtime = canvas_types_to_total_viewtimes[ CC.CANVAS_MEDIA_VIEWER ] + canvas_types_to_total_viewtimes[ CC.CANVAS_PREVIEW ]
#
earliest_import_time = 0
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
result = self._Execute( 'SELECT MIN( timestamp ) FROM {};'.format( current_files_table_name ) ).fetchone()
if result is not None and result[0] is not None:
earliest_import_time = result[0]
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_DELETED )
result = self._Execute( 'SELECT MIN( original_timestamp ) FROM {};'.format( deleted_files_table_name ) ).fetchone()
if result is not None and result[0] is not None:
if earliest_import_time == 0:
earliest_import_time = result[0]
else:
earliest_import_time = min( earliest_import_time, result[0] )
if earliest_import_time > 0:
boned_stats[ 'earliest_import_time' ] = earliest_import_time
#
boned_stats[ 'total_viewtime' ] = total_viewtime
total_alternate_files = sum( ( count for ( alternates_group_id, count ) in self._Execute( 'SELECT alternates_group_id, COUNT( * ) FROM alternate_file_group_members GROUP BY alternates_group_id;' ) if count > 1 ) )
total_duplicate_files = sum( ( count for ( media_id, count ) in self._Execute( 'SELECT media_id, COUNT( * ) FROM duplicate_file_members GROUP BY media_id;' ) if count > 1 ) )
location_context = ClientLocation.GetLocationContextForAllLocalMedia()
db_location_context = self.modules_files_storage.GetDBLocationContext( location_context )
table_join = self.modules_files_duplicates.DuplicatesGetPotentialDuplicatePairsTableJoinOnFileService( db_location_context )
( total_potential_pairs, ) = self._Execute( 'SELECT COUNT( * ) FROM ( SELECT DISTINCT smaller_media_id, larger_media_id FROM {} );'.format( table_join ) ).fetchone()
boned_stats[ 'total_alternate_files' ] = total_alternate_files
boned_stats[ 'total_duplicate_files' ] = total_duplicate_files
boned_stats[ 'total_potential_pairs' ] = total_potential_pairs
return boned_stats
def _GetClientFilesLocations( self ):
result = { prefix : HydrusPaths.ConvertPortablePathToAbsPath( location ) for ( prefix, location ) in self._Execute( 'SELECT prefix, location FROM client_files_locations;' ) }
if len( result ) < 512:
message = 'When fetching the directories where your files are stored, the database discovered some entries were missing!'
message += os.linesep * 2
message += 'Default values will now be inserted. If you have previously migrated your files or thumbnails, and assuming this is occuring on boot, you will next be presented with a dialog to remap them to the correct location.'
message += os.linesep * 2
message += 'If this is not happening on client boot, you should kill the hydrus process right now, as a serious hard drive fault has likely recently occurred.'
self._DisplayCatastrophicError( message )
client_files_default = os.path.join( self._db_dir, 'client_files' )
HydrusPaths.MakeSureDirectoryExists( client_files_default )
location = HydrusPaths.ConvertAbsPathToPortablePath( client_files_default )
for prefix in HydrusData.IterateHexPrefixes():
self._Execute( 'INSERT OR IGNORE INTO client_files_locations ( prefix, location ) VALUES ( ?, ? );', ( 'f' + prefix, location ) )
self._Execute( 'INSERT OR IGNORE INTO client_files_locations ( prefix, location ) VALUES ( ?, ? );', ( 't' + prefix, location ) )
return result
def _GetFileHistory( self, num_steps: int ):
# get all sorts of stats and present them in ( timestamp, cumulative_num ) tuple pairs
file_history = {}
# first let's do current files. we increment when added, decrement when we know removed
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
current_timestamps = self._STL( self._Execute( 'SELECT timestamp FROM {};'.format( current_files_table_name ) ) )
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_DELETED )
since_deleted = self._STL( self._Execute( 'SELECT original_timestamp FROM {} WHERE original_timestamp IS NOT NULL;'.format( deleted_files_table_name ) ) )
current_timestamps.extend( since_deleted )
current_timestamps.sort()
deleted_timestamps = self._STL( self._Execute( 'SELECT timestamp FROM {} WHERE timestamp IS NOT NULL ORDER BY timestamp ASC;'.format( deleted_files_table_name ) ) )
combined_timestamps_with_delta = [ ( timestamp, 1 ) for timestamp in current_timestamps ]
combined_timestamps_with_delta.extend( ( ( timestamp, -1 ) for timestamp in deleted_timestamps ) )
combined_timestamps_with_delta.sort()
current_file_history = []
if len( combined_timestamps_with_delta ) > 0:
if len( combined_timestamps_with_delta ) < 2:
step_gap = 1
else:
step_gap = max( ( combined_timestamps_with_delta[-1][0] - combined_timestamps_with_delta[0][0] ) // num_steps, 1 )
total_current_files = 0
step_timestamp = combined_timestamps_with_delta[0][0]
for ( timestamp, delta ) in combined_timestamps_with_delta:
if timestamp > step_timestamp + step_gap:
current_file_history.append( ( step_timestamp, total_current_files ) )
step_timestamp = timestamp
total_current_files += delta
file_history[ 'current' ] = current_file_history
( total_deleted_files, ) = self._Execute( 'SELECT COUNT( * ) FROM {} WHERE timestamp IS NULL;'.format( deleted_files_table_name ) ).fetchone()
deleted_file_history = []
if len( deleted_timestamps ) > 0:
if len( deleted_timestamps ) < 2:
step_gap = 1
else:
step_gap = max( ( deleted_timestamps[-1] - deleted_timestamps[0] ) // num_steps, 1 )
step_timestamp = deleted_timestamps[0]
for deleted_timestamp in deleted_timestamps:
if deleted_timestamp > step_timestamp + step_gap:
deleted_file_history.append( ( step_timestamp, total_deleted_files ) )
step_timestamp = deleted_timestamp
total_deleted_files += 1
file_history[ 'deleted' ] = deleted_file_history
( total_inbox_files, ) = self._Execute( 'SELECT COUNT( * ) FROM file_inbox;' ).fetchone()
archive_timestamps = self._STL( self._Execute( 'SELECT archived_timestamp FROM archive_timestamps ORDER BY archived_timestamp ASC;' ) )
inbox_file_history = []
if len( archive_timestamps ) > 0:
if len( archive_timestamps ) < 2:
step_gap = 1
else:
step_gap = max( ( archive_timestamps[-1] - archive_timestamps[0] ) // num_steps, 1 )
archive_timestamps.reverse()
step_timestamp = archive_timestamps[0]
for archived_timestamp in archive_timestamps:
if archived_timestamp < step_timestamp - step_gap:
inbox_file_history.append( ( archived_timestamp, total_inbox_files ) )
step_timestamp = archived_timestamp
total_inbox_files += 1
inbox_file_history.reverse()
file_history[ 'inbox' ] = inbox_file_history
return file_history
def _GetFileNotes( self, hash ):
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
names_to_notes = { name : note for ( name, note ) in self._Execute( 'SELECT label, note FROM file_notes, labels, notes ON ( file_notes.name_id = labels.label_id AND file_notes.note_id = notes.note_id ) WHERE hash_id = ?;', ( hash_id, ) ) }
return names_to_notes
def _GetFileSearchContextBranch( self, file_search_context: ClientSearch.FileSearchContext ) -> ClientDBServices.FileSearchContextBranch:
location_context = file_search_context.GetLocationContext()
tag_search_context = file_search_context.GetTagSearchContext()
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
search_file_service_ids = []
for file_service_key in file_service_keys:
try:
search_file_service_id = self.modules_services.GetServiceId( file_service_key )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A query was run for a file service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
continue
search_file_service_ids.append( search_file_service_id )
if tag_search_context.IsAllKnownTags():
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
try:
search_tag_service_ids = ( self.modules_services.GetServiceId( tag_search_context.service_key ), )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A query was run for a tag service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
search_tag_service_ids = []
return ClientDBServices.FileSearchContextBranch( file_search_context, search_file_service_ids, search_tag_service_ids, file_location_is_cross_referenced )
def _GetFileSystemPredicates( self, file_search_context: ClientSearch.FileSearchContext, force_system_everything = False ):
location_context = file_search_context.GetLocationContext()
system_everything_limit = 10000
system_everything_suffix = ''
predicates = []
system_everythings = [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING ) ]
blank_pred_types = {
ClientSearch.PREDICATE_TYPE_SYSTEM_NUM_TAGS,
ClientSearch.PREDICATE_TYPE_SYSTEM_LIMIT,
ClientSearch.PREDICATE_TYPE_SYSTEM_KNOWN_URLS,
ClientSearch.PREDICATE_TYPE_SYSTEM_HASH,
ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_SERVICE,
ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_RELATIONSHIPS,
ClientSearch.PREDICATE_TYPE_SYSTEM_TAG_AS_NUMBER,
ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_VIEWING_STATS
}
if len( self.modules_services.GetServiceIds( HC.RATINGS_SERVICES ) ) > 0:
blank_pred_types.add( ClientSearch.PREDICATE_TYPE_SYSTEM_RATING )
if location_context.IsAllKnownFiles():
tag_service_key = file_search_context.GetTagSearchContext().service_key
if tag_service_key == CC.COMBINED_TAG_SERVICE_KEY:
if force_system_everything or self._controller.new_options.GetBoolean( 'always_show_system_everything' ):
predicates.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING ) )
else:
service_id = self.modules_services.GetServiceId( tag_service_key )
service_type = self.modules_services.GetServiceType( service_id )
service_info = self._GetServiceInfoSpecific( service_id, service_type, { HC.SERVICE_INFO_NUM_FILES }, calculate_missing = False )
if HC.SERVICE_INFO_NUM_FILES in service_info:
num_everything = service_info[ HC.SERVICE_INFO_NUM_FILES ]
system_everythings.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_everything ) ) )
else:
jobs = []
jobs.extend( ( ( file_service_key, HC.CONTENT_STATUS_CURRENT ) for file_service_key in location_context.current_service_keys ) )
jobs.extend( ( ( file_service_key, HC.CONTENT_STATUS_DELETED ) for file_service_key in location_context.deleted_service_keys ) )
file_repo_preds = []
inbox_archive_preds = []
we_saw_a_file_repo = False
for ( file_service_key, status ) in jobs:
service_id = self.modules_services.GetServiceId( file_service_key )
service_type = self.modules_services.GetServiceType( service_id )
if service_type not in HC.FILE_SERVICES:
continue
if status == HC.CONTENT_STATUS_CURRENT:
service_info = self._GetServiceInfoSpecific( service_id, service_type, { HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_NUM_INBOX } )
num_everything = service_info[ HC.SERVICE_INFO_NUM_VIEWABLE_FILES ]
system_everythings.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_everything ) ) )
if location_context.IncludesDeleted():
continue
num_inbox = service_info[ HC.SERVICE_INFO_NUM_INBOX ]
num_archive = num_everything - num_inbox
if service_type == HC.FILE_REPOSITORY:
we_saw_a_file_repo = True
num_local = self.modules_files_storage.GetNumLocal( service_id )
num_not_local = num_everything - num_local
else:
num_local = num_everything
num_not_local = 0
file_repo_preds.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_LOCAL, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_local ) ) )
file_repo_preds.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_NOT_LOCAL, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_not_local ) ) )
num_archive = num_local - num_inbox
inbox_archive_preds.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_INBOX, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_inbox ) ) )
inbox_archive_preds.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_ARCHIVE, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_archive ) ) )
elif status == HC.CONTENT_STATUS_DELETED:
service_info = self._GetServiceInfoSpecific( service_id, service_type, { HC.SERVICE_INFO_NUM_DELETED_FILES } )
num_everything = service_info[ HC.SERVICE_INFO_NUM_DELETED_FILES ]
system_everythings.append( ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( num_everything ) ) )
if we_saw_a_file_repo:
predicates.extend( file_repo_preds )
if len( inbox_archive_preds ) > 0:
inbox_archive_preds = ClientSearch.MergePredicates( inbox_archive_preds )
zero_counts = [ pred.GetCount().HasZeroCount() for pred in inbox_archive_preds ]
if True in zero_counts and self._controller.new_options.GetBoolean( 'filter_inbox_and_archive_predicates' ):
if False in zero_counts and location_context.IsOneDomain():
# something is in here, but we are hiding, so let's inform system everything
useful_pred = list( ( pred for pred in inbox_archive_preds if pred.GetCount().HasNonZeroCount() ) )[0]
if useful_pred.GetType() == ClientSearch.PREDICATE_TYPE_SYSTEM_INBOX:
system_everything_suffix = 'all in inbox'
else:
system_everything_suffix = 'all in archive'
else:
predicates.extend( inbox_archive_preds )
blank_pred_types.update( [
ClientSearch.PREDICATE_TYPE_SYSTEM_SIZE,
ClientSearch.PREDICATE_TYPE_SYSTEM_TIME,
ClientSearch.PREDICATE_TYPE_SYSTEM_DIMENSIONS,
ClientSearch.PREDICATE_TYPE_SYSTEM_DURATION,
ClientSearch.PREDICATE_TYPE_SYSTEM_HAS_AUDIO,
ClientSearch.PREDICATE_TYPE_SYSTEM_HAS_ICC_PROFILE,
ClientSearch.PREDICATE_TYPE_SYSTEM_NOTES,
ClientSearch.PREDICATE_TYPE_SYSTEM_NUM_WORDS,
ClientSearch.PREDICATE_TYPE_SYSTEM_MIME,
ClientSearch.PREDICATE_TYPE_SYSTEM_SIMILAR_TO
] )
if len( system_everythings ) > 0:
system_everythings = ClientSearch.MergePredicates( system_everythings )
system_everything = list( system_everythings )[0]
system_everything.SetCountTextSuffix( system_everything_suffix )
num_everything = system_everything.GetCount().GetMinCount()
if force_system_everything or ( num_everything <= system_everything_limit or self._controller.new_options.GetBoolean( 'always_show_system_everything' ) ):
predicates.append( system_everything )
predicates.extend( [ ClientSearch.Predicate( predicate_type ) for predicate_type in blank_pred_types ] )
predicates = ClientSearch.MergePredicates( predicates )
def sys_preds_key( s ):
t = s.GetType()
if t == ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING:
return ( 0, 0 )
elif t == ClientSearch.PREDICATE_TYPE_SYSTEM_INBOX:
return ( 1, 0 )
elif t == ClientSearch.PREDICATE_TYPE_SYSTEM_ARCHIVE:
return ( 2, 0 )
elif t == ClientSearch.PREDICATE_TYPE_SYSTEM_LOCAL:
return ( 3, 0 )
elif t == ClientSearch.PREDICATE_TYPE_SYSTEM_NOT_LOCAL:
return ( 4, 0 )
else:
return ( 5, s.ToString() )
predicates.sort( key = sys_preds_key )
return predicates
def _GetForceRefreshTagsManagers( self, hash_ids, hash_ids_to_current_file_service_ids = None ):
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
return self._GetForceRefreshTagsManagersWithTableHashIds( hash_ids, temp_table_name, hash_ids_to_current_file_service_ids = hash_ids_to_current_file_service_ids )
def _GetForceRefreshTagsManagersWithTableHashIds( self, hash_ids, hash_ids_table_name, hash_ids_to_current_file_service_ids = None ):
if hash_ids_to_current_file_service_ids is None:
hash_ids_to_current_file_service_ids = self.modules_files_storage.GetHashIdsToCurrentServiceIds( hash_ids_table_name )
common_file_service_ids_to_hash_ids = self._GroupHashIdsByTagCachedFileServiceId( hash_ids, hash_ids_table_name, hash_ids_to_current_file_service_ids = hash_ids_to_current_file_service_ids )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
storage_tag_data = []
display_tag_data = []
for ( common_file_service_id, batch_of_hash_ids ) in common_file_service_ids_to_hash_ids.items():
if len( batch_of_hash_ids ) == len( hash_ids ):
( batch_of_storage_tag_data, batch_of_display_tag_data ) = self._GetForceRefreshTagsManagersWithTableHashIdsTagData( common_file_service_id, tag_service_ids, hash_ids_table_name )
else:
with self._MakeTemporaryIntegerTable( batch_of_hash_ids, 'hash_id' ) as temp_batch_hash_ids_table_name:
( batch_of_storage_tag_data, batch_of_display_tag_data ) = self._GetForceRefreshTagsManagersWithTableHashIdsTagData( common_file_service_id, tag_service_ids, temp_batch_hash_ids_table_name )
storage_tag_data.extend( batch_of_storage_tag_data )
display_tag_data.extend( batch_of_display_tag_data )
seen_tag_ids = { tag_id for ( hash_id, ( tag_service_id, status, tag_id ) ) in storage_tag_data }
seen_tag_ids.update( ( tag_id for ( hash_id, ( tag_service_id, status, tag_id ) ) in display_tag_data ) )
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = seen_tag_ids )
service_ids_to_service_keys = self.modules_services.GetServiceIdsToServiceKeys()
hash_ids_to_raw_storage_tag_data = HydrusData.BuildKeyToListDict( storage_tag_data )
hash_ids_to_raw_display_tag_data = HydrusData.BuildKeyToListDict( display_tag_data )
hash_ids_to_tag_managers = {}
for hash_id in hash_ids:
raw_storage_tag_data = hash_ids_to_raw_storage_tag_data[ hash_id ]
service_ids_to_storage_tag_data = HydrusData.BuildKeyToListDict( ( ( tag_service_id, ( status, tag_ids_to_tags[ tag_id ] ) ) for ( tag_service_id, status, tag_id ) in raw_storage_tag_data ) )
service_keys_to_statuses_to_storage_tags = collections.defaultdict(
HydrusData.default_dict_set,
{ service_ids_to_service_keys[ tag_service_id ] : HydrusData.BuildKeyToSetDict( status_and_tag ) for ( tag_service_id, status_and_tag ) in service_ids_to_storage_tag_data.items() }
)
raw_display_tag_data = hash_ids_to_raw_display_tag_data[ hash_id ]
service_ids_to_display_tag_data = HydrusData.BuildKeyToListDict( ( ( tag_service_id, ( status, tag_ids_to_tags[ tag_id ] ) ) for ( tag_service_id, status, tag_id ) in raw_display_tag_data ) )
service_keys_to_statuses_to_display_tags = collections.defaultdict(
HydrusData.default_dict_set,
{ service_ids_to_service_keys[ tag_service_id ] : HydrusData.BuildKeyToSetDict( status_and_tag ) for ( tag_service_id, status_and_tag ) in service_ids_to_display_tag_data.items() }
)
tags_manager = ClientMediaManagers.TagsManager( service_keys_to_statuses_to_storage_tags, service_keys_to_statuses_to_display_tags )
hash_ids_to_tag_managers[ hash_id ] = tags_manager
return hash_ids_to_tag_managers
def _GetForceRefreshTagsManagersWithTableHashIdsTagData( self, common_file_service_id, tag_service_ids, hash_ids_table_name ):
storage_tag_data = []
display_tag_data = []
for tag_service_id in tag_service_ids:
statuses_to_table_names = self.modules_mappings_storage.GetFastestStorageMappingTableNames( common_file_service_id, tag_service_id )
for ( status, mappings_table_name ) in statuses_to_table_names.items():
storage_tag_data.extend( ( hash_id, ( tag_service_id, status, tag_id ) ) for ( hash_id, tag_id ) in self._Execute( 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, mappings_table_name ) ) )
if common_file_service_id != self.modules_services.combined_file_service_id:
( cache_current_display_mappings_table_name, cache_pending_display_mappings_table_name ) = ClientDBMappingsCacheSpecificDisplay.GenerateSpecificDisplayMappingsCacheTableNames( common_file_service_id, tag_service_id )
display_tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_CURRENT, tag_id ) ) for ( hash_id, tag_id ) in self._Execute( 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, cache_current_display_mappings_table_name ) ) )
display_tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_PENDING, tag_id ) ) for ( hash_id, tag_id ) in self._Execute( 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, cache_pending_display_mappings_table_name ) ) )
if common_file_service_id == self.modules_services.combined_file_service_id:
current_and_pending_storage_tag_data = [ ( hash_id, ( tag_service_id, status, tag_id ) ) for ( hash_id, ( tag_service_id, status, tag_id ) ) in storage_tag_data if status in ( HC.CONTENT_STATUS_CURRENT, HC.CONTENT_STATUS_PENDING ) ]
seen_service_ids_to_seen_tag_ids = HydrusData.BuildKeyToSetDict( ( ( tag_service_id, tag_id ) for ( hash_id, ( tag_service_id, status, tag_id ) ) in current_and_pending_storage_tag_data ) )
seen_service_ids_to_tag_ids_to_implied_tag_ids = { tag_service_id : self.modules_tag_display.GetTagsToImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids ) for ( tag_service_id, tag_ids ) in seen_service_ids_to_seen_tag_ids.items() }
display_tag_data = []
for ( hash_id, ( tag_service_id, status, tag_id ) ) in current_and_pending_storage_tag_data:
display_tag_data.extend( ( ( hash_id, ( tag_service_id, status, implied_tag_id ) ) for implied_tag_id in seen_service_ids_to_tag_ids_to_implied_tag_ids[ tag_service_id ][ tag_id ] ) )
return ( storage_tag_data, display_tag_data )
def _GetHashIdsAndNonZeroTagCounts( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, hash_ids, namespace_wildcard = None, job_key = None ):
if namespace_wildcard == '*':
namespace_wildcard = None
if namespace_wildcard is None:
namespace_ids = []
else:
namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace_wildcard )
with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
mapping_and_tag_table_names = set()
for file_service_key in file_service_keys:
mapping_and_tag_table_names.update( self._GetMappingAndTagTables( tag_display_type, file_service_key, tag_search_context ) )
results = []
BLOCK_SIZE = max( 64, int( len( hash_ids ) ** 0.5 ) )
for group_of_hash_ids in HydrusData.SplitIteratorIntoChunks( hash_ids, BLOCK_SIZE ):
with self._MakeTemporaryIntegerTable( group_of_hash_ids, 'hash_id' ) as hash_ids_table_name:
if namespace_wildcard is None:
select_statements = [ 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id )'.format( hash_ids_table_name, mappings_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
else:
select_statements = [ 'SELECT hash_id, tag_id FROM {} CROSS JOIN {} USING ( hash_id ) CROSS JOIN {} USING ( tag_id ) CROSS JOIN {} USING ( namespace_id )'.format( hash_ids_table_name, mappings_table_name, tags_table_name, temp_namespace_ids_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
unions = '( {} )'.format( ' UNION '.join( select_statements ) )
query = 'SELECT hash_id, COUNT( tag_id ) FROM {} GROUP BY hash_id;'.format( unions )
cursor = self._Execute( query )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
loop_of_results = HydrusDB.ReadFromCancellableCursor( cursor, 64, cancelled_hook = cancelled_hook )
if job_key is not None and job_key.IsCancelled():
return results
results.extend( loop_of_results )
return results
def _GetHashIdsFromFileViewingStatistics( self, view_type, viewing_locations, operator, viewing_value ):
include_media = 'media' in viewing_locations
include_preview = 'preview' in viewing_locations
canvas_type_predicate = '1=1'
group_by_phrase = ''
having_phrase = ''
if view_type == 'views':
content_phrase = 'views'
elif view_type == 'viewtime':
content_phrase = 'viewtime'
if include_media and include_preview:
group_by_phrase = ' GROUP BY hash_id'
if view_type == 'views':
content_phrase = 'SUM( views )'
elif view_type == 'viewtime':
content_phrase = 'SUM( viewtime )'
elif include_media:
canvas_type_predicate = 'canvas_type = {}'.format( CC.CANVAS_MEDIA_VIEWER )
elif include_preview:
canvas_type_predicate = 'canvas_type = {}'.format( CC.CANVAS_PREVIEW )
else:
return []
if operator == CC.UNICODE_ALMOST_EQUAL_TO:
lower_bound = int( 0.8 * viewing_value )
upper_bound = int( 1.2 * viewing_value )
test_phrase = '{} BETWEEN {} AND {}'.format( content_phrase, str( lower_bound ), str( upper_bound ) )
else:
test_phrase = '{} {} {}'.format( content_phrase, operator, str( viewing_value ) )
if include_media and include_preview:
select_statement = 'SELECT hash_id FROM file_viewing_stats {} HAVING {};'.format( group_by_phrase, test_phrase )
else:
select_statement = 'SELECT hash_id FROM file_viewing_stats WHERE {} AND {}{};'.format( test_phrase, canvas_type_predicate, group_by_phrase )
hash_ids = self._STS( self._Execute( select_statement ) )
return hash_ids
def _GetHashIdsFromNamespaceIdsSubtagIds( self, tag_display_type: int, file_service_key, tag_search_context: ClientSearch.TagSearchContext, namespace_ids, subtag_ids, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
tag_ids = self._GetTagIdsFromNamespaceIdsSubtagIds( file_service_id, tag_service_id, namespace_ids, subtag_ids, job_key = job_key )
return self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsFromNamespaceIdsSubtagIdsTables( self, tag_display_type: int, file_service_key, tag_search_context: ClientSearch.TagSearchContext, namespace_ids_table_name, subtag_ids_table_name, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
tag_ids = self._GetTagIdsFromNamespaceIdsSubtagIdsTables( file_service_id, tag_service_id, namespace_ids_table_name, subtag_ids_table_name, job_key = job_key )
return self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsFromNoteName( self, name: str, hash_ids_table_name: str ):
label_id = self.modules_texts.GetLabelId( name )
# as note name is rare, we force this to run opposite to typical: notes to temp hashes
return self._STS( self._Execute( 'SELECT hash_id FROM file_notes CROSS JOIN {} USING ( hash_id ) WHERE name_id = ?;'.format( hash_ids_table_name ), ( label_id, ) ) )
def _GetHashIdsFromNumNotes( self, min_num_notes: typing.Optional[ int ], max_num_notes: typing.Optional[ int ], hash_ids_table_name: str ):
has_notes = max_num_notes is None and min_num_notes == 1
not_has_notes = ( min_num_notes is None or min_num_notes == 0 ) and max_num_notes is not None and max_num_notes == 0
if has_notes or not_has_notes:
has_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} WHERE EXISTS ( SELECT 1 FROM file_notes WHERE file_notes.hash_id = {}.hash_id );'.format( hash_ids_table_name, hash_ids_table_name ) ) )
if has_notes:
hash_ids = has_hash_ids
else:
all_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( hash_ids_table_name ) ) )
hash_ids = all_hash_ids.difference( has_hash_ids )
else:
if min_num_notes is None:
filt = lambda c: c <= max_num_notes
elif max_num_notes is None:
filt = lambda c: min_num_notes <= c
else:
filt = lambda c: min_num_notes <= c <= max_num_notes
# temp hashes to notes
query = 'SELECT hash_id, COUNT( * ) FROM {} CROSS JOIN file_notes USING ( hash_id ) GROUP BY hash_id;'.format( hash_ids_table_name )
hash_ids = { hash_id for ( hash_id, count ) in self._Execute( query ) if filt( count ) }
return hash_ids
def _GetHashIdsFromQuery( self, file_search_context: ClientSearch.FileSearchContext, job_key = None, query_hash_ids: typing.Optional[ set ] = None, apply_implicit_limit = True, sort_by = None, limit_sort_by = None ):
if job_key is None:
job_key = ClientThreading.JobKey( cancellable = True )
if query_hash_ids is not None:
query_hash_ids = set( query_hash_ids )
have_cross_referenced_file_locations = False
self._controller.ResetIdleTimer()
system_predicates = file_search_context.GetSystemPredicates()
location_context = file_search_context.GetLocationContext()
tag_search_context = file_search_context.GetTagSearchContext()
tag_service_key = tag_search_context.service_key
include_current_tags = tag_search_context.include_current_tags
include_pending_tags = tag_search_context.include_pending_tags
if not location_context.SearchesAnything():
return set()
current_file_service_ids = set()
for current_service_key in location_context.current_service_keys:
try:
current_file_service_id = self.modules_services.GetServiceId( current_service_key )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A file search query was run for a file service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
return set()
current_file_service_ids.add( current_file_service_id )
deleted_file_service_ids = set()
for deleted_service_key in location_context.deleted_service_keys:
try:
deleted_file_service_id = self.modules_services.GetServiceId( deleted_service_key )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A file search query was run for a file service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
return set()
deleted_file_service_ids.add( deleted_file_service_id )
db_location_context = self.modules_files_storage.GetDBLocationContext( location_context )
try:
tag_service_id = self.modules_services.GetServiceId( tag_service_key )
except HydrusExceptions.DataMissing:
HydrusData.ShowText( 'A file search query was run for a tag service that does not exist! If you just removed a service, you might want to try checking the search and/or restarting the client.' )
return set()
tags_to_include = file_search_context.GetTagsToInclude()
tags_to_exclude = file_search_context.GetTagsToExclude()
namespaces_to_include = file_search_context.GetNamespacesToInclude()
namespaces_to_exclude = file_search_context.GetNamespacesToExclude()
wildcards_to_include = file_search_context.GetWildcardsToInclude()
wildcards_to_exclude = file_search_context.GetWildcardsToExclude()
simple_preds = system_predicates.GetSimpleInfo()
king_filter = system_predicates.GetKingFilter()
or_predicates = file_search_context.GetORPredicates()
need_file_domain_cross_reference = not location_context.IsAllKnownFiles()
there_are_tags_to_search = len( tags_to_include ) > 0 or len( namespaces_to_include ) > 0 or len( wildcards_to_include ) > 0
# ok, let's set up the big list of simple search preds
files_info_predicates = []
if 'min_size' in simple_preds:
files_info_predicates.append( 'size > ' + str( simple_preds[ 'min_size' ] ) )
if 'size' in simple_preds:
files_info_predicates.append( 'size = ' + str( simple_preds[ 'size' ] ) )
if 'not_size' in simple_preds:
files_info_predicates.append( 'size != ' + str( simple_preds[ 'not_size' ] ) )
if 'max_size' in simple_preds:
files_info_predicates.append( 'size < ' + str( simple_preds[ 'max_size' ] ) )
if 'mimes' in simple_preds:
mimes = simple_preds[ 'mimes' ]
if len( mimes ) == 1:
( mime, ) = mimes
files_info_predicates.append( 'mime = ' + str( mime ) )
else:
files_info_predicates.append( 'mime IN ' + HydrusData.SplayListForDB( mimes ) )
if 'has_audio' in simple_preds:
has_audio = simple_preds[ 'has_audio' ]
files_info_predicates.append( 'has_audio = {}'.format( int( has_audio ) ) )
if 'min_width' in simple_preds:
files_info_predicates.append( 'width > ' + str( simple_preds[ 'min_width' ] ) )
if 'width' in simple_preds:
files_info_predicates.append( 'width = ' + str( simple_preds[ 'width' ] ) )
if 'not_width' in simple_preds:
files_info_predicates.append( 'width != ' + str( simple_preds[ 'not_width' ] ) )
if 'max_width' in simple_preds:
files_info_predicates.append( 'width < ' + str( simple_preds[ 'max_width' ] ) )
if 'min_height' in simple_preds:
files_info_predicates.append( 'height > ' + str( simple_preds[ 'min_height' ] ) )
if 'height' in simple_preds:
files_info_predicates.append( 'height = ' + str( simple_preds[ 'height' ] ) )
if 'not_height' in simple_preds:
files_info_predicates.append( 'height != ' + str( simple_preds[ 'not_height' ] ) )
if 'max_height' in simple_preds:
files_info_predicates.append( 'height < ' + str( simple_preds[ 'max_height' ] ) )
if 'min_num_pixels' in simple_preds:
files_info_predicates.append( 'width * height > ' + str( simple_preds[ 'min_num_pixels' ] ) )
if 'num_pixels' in simple_preds:
files_info_predicates.append( 'width * height = ' + str( simple_preds[ 'num_pixels' ] ) )
if 'not_num_pixels' in simple_preds:
files_info_predicates.append( 'width * height != ' + str( simple_preds[ 'not_num_pixels' ] ) )
if 'max_num_pixels' in simple_preds:
files_info_predicates.append( 'width * height < ' + str( simple_preds[ 'max_num_pixels' ] ) )
if 'min_ratio' in simple_preds:
( ratio_width, ratio_height ) = simple_preds[ 'min_ratio' ]
files_info_predicates.append( '( width * 1.0 ) / height > ' + str( float( ratio_width ) ) + ' / ' + str( ratio_height ) )
if 'ratio' in simple_preds:
( ratio_width, ratio_height ) = simple_preds[ 'ratio' ]
files_info_predicates.append( '( width * 1.0 ) / height = ' + str( float( ratio_width ) ) + ' / ' + str( ratio_height ) )
if 'not_ratio' in simple_preds:
( ratio_width, ratio_height ) = simple_preds[ 'not_ratio' ]
files_info_predicates.append( '( width * 1.0 ) / height != ' + str( float( ratio_width ) ) + ' / ' + str( ratio_height ) )
if 'max_ratio' in simple_preds:
( ratio_width, ratio_height ) = simple_preds[ 'max_ratio' ]
files_info_predicates.append( '( width * 1.0 ) / height < ' + str( float( ratio_width ) ) + ' / ' + str( ratio_height ) )
if 'min_num_words' in simple_preds: files_info_predicates.append( 'num_words > ' + str( simple_preds[ 'min_num_words' ] ) )
if 'num_words' in simple_preds:
num_words = simple_preds[ 'num_words' ]
if num_words == 0: files_info_predicates.append( '( num_words IS NULL OR num_words = 0 )' )
else: files_info_predicates.append( 'num_words = ' + str( num_words ) )
if 'not_num_words' in simple_preds:
num_words = simple_preds[ 'not_num_words' ]
files_info_predicates.append( '( num_words IS NULL OR num_words != {} )'.format( num_words ) )
if 'max_num_words' in simple_preds:
max_num_words = simple_preds[ 'max_num_words' ]
if max_num_words == 0: files_info_predicates.append( 'num_words < ' + str( max_num_words ) )
else: files_info_predicates.append( '( num_words < ' + str( max_num_words ) + ' OR num_words IS NULL )' )
if 'min_duration' in simple_preds: files_info_predicates.append( 'duration > ' + str( simple_preds[ 'min_duration' ] ) )
if 'duration' in simple_preds:
duration = simple_preds[ 'duration' ]
if duration == 0:
files_info_predicates.append( '( duration = 0 OR duration IS NULL )' )
else:
files_info_predicates.append( 'duration = ' + str( duration ) )
if 'not_duration' in simple_preds:
duration = simple_preds[ 'not_duration' ]
files_info_predicates.append( '( duration IS NULL OR duration != {} )'.format( duration ) )
if 'max_duration' in simple_preds:
max_duration = simple_preds[ 'max_duration' ]
if max_duration == 0: files_info_predicates.append( 'duration < ' + str( max_duration ) )
else: files_info_predicates.append( '( duration < ' + str( max_duration ) + ' OR duration IS NULL )' )
if 'min_framerate' in simple_preds or 'framerate' in simple_preds or 'max_framerate' in simple_preds or 'not_framerate' in simple_preds:
if 'not_framerate' in simple_preds:
pred = '( duration IS NULL OR num_frames = 0 OR ( duration IS NOT NULL AND duration != 0 AND num_frames != 0 AND num_frames IS NOT NULL AND {} ) )'
min_framerate_sql = simple_preds[ 'not_framerate' ] * 0.95
max_framerate_sql = simple_preds[ 'not_framerate' ] * 1.05
pred = pred.format( '( num_frames * 1.0 ) / ( duration / 1000.0 ) NOT BETWEEN {} AND {}'.format( min_framerate_sql, max_framerate_sql ) )
else:
min_framerate_sql = None
max_framerate_sql = None
pred = '( duration IS NOT NULL AND duration != 0 AND num_frames != 0 AND num_frames IS NOT NULL AND {} )'
if 'min_framerate' in simple_preds:
min_framerate_sql = simple_preds[ 'min_framerate' ] * 1.05
if 'framerate' in simple_preds:
min_framerate_sql = simple_preds[ 'framerate' ] * 0.95
max_framerate_sql = simple_preds[ 'framerate' ] * 1.05
if 'max_framerate' in simple_preds:
max_framerate_sql = simple_preds[ 'max_framerate' ] * 0.95
if min_framerate_sql is None:
pred = pred.format( '( num_frames * 1.0 ) / ( duration / 1000.0 ) < {}'.format( max_framerate_sql ) )
elif max_framerate_sql is None:
pred = pred.format( '( num_frames * 1.0 ) / ( duration / 1000.0 ) > {}'.format( min_framerate_sql ) )
else:
pred = pred.format( '( num_frames * 1.0 ) / ( duration / 1000.0 ) BETWEEN {} AND {}'.format( min_framerate_sql, max_framerate_sql ) )
files_info_predicates.append( pred )
if 'min_num_frames' in simple_preds: files_info_predicates.append( 'num_frames > ' + str( simple_preds[ 'min_num_frames' ] ) )
if 'num_frames' in simple_preds:
num_frames = simple_preds[ 'num_frames' ]
if num_frames == 0: files_info_predicates.append( '( num_frames IS NULL OR num_frames = 0 )' )
else: files_info_predicates.append( 'num_frames = ' + str( num_frames ) )
if 'not_num_frames' in simple_preds:
num_frames = simple_preds[ 'not_num_frames' ]
files_info_predicates.append( '( num_frames IS NULL OR num_frames != {} )'.format( num_frames ) )
if 'max_num_frames' in simple_preds:
max_num_frames = simple_preds[ 'max_num_frames' ]
if max_num_frames == 0: files_info_predicates.append( 'num_frames < ' + str( max_num_frames ) )
else: files_info_predicates.append( '( num_frames < ' + str( max_num_frames ) + ' OR num_frames IS NULL )' )
there_are_simple_files_info_preds_to_search_for = len( files_info_predicates ) > 0
def intersection_update_qhi( query_hash_ids, some_hash_ids, force_create_new_set = False ) -> set:
if query_hash_ids is None:
if not isinstance( some_hash_ids, set ) or force_create_new_set:
some_hash_ids = set( some_hash_ids )
return some_hash_ids
else:
query_hash_ids.intersection_update( some_hash_ids )
return query_hash_ids
def do_or_preds( or_predicates, query_hash_ids ) -> set:
def or_sort_key( p ):
return len( p.GetValue() )
or_predicates = sorted( or_predicates, key = or_sort_key )
for or_predicate in or_predicates:
or_query_hash_ids = set()
for or_subpredicate in or_predicate.GetValue():
or_search_context = file_search_context.Duplicate()
or_search_context.SetPredicates( [ or_subpredicate ] )
or_query_hash_ids.update( self._GetHashIdsFromQuery( or_search_context, job_key, query_hash_ids = query_hash_ids, apply_implicit_limit = False, sort_by = None, limit_sort_by = None ) )
if job_key.IsCancelled():
return set()
query_hash_ids = intersection_update_qhi( query_hash_ids, or_query_hash_ids )
return query_hash_ids
done_or_predicates = len( or_predicates ) == 0
if not done_or_predicates and not ( there_are_tags_to_search or there_are_simple_files_info_preds_to_search_for ):
query_hash_ids = do_or_preds( or_predicates, query_hash_ids )
have_cross_referenced_file_locations = True
done_or_predicates = True
#
if 'hash' in simple_preds:
( search_hashes, search_hash_type, inclusive ) = simple_preds[ 'hash' ]
if inclusive:
if search_hash_type == 'sha256':
matching_sha256_hashes = [ search_hash for search_hash in search_hashes if self.modules_hashes.HasHash( search_hash ) ]
else:
matching_sha256_hashes = self.modules_hashes.GetFileHashes( search_hashes, search_hash_type, 'sha256' )
specific_hash_ids = self.modules_hashes_local_cache.GetHashIds( matching_sha256_hashes )
query_hash_ids = intersection_update_qhi( query_hash_ids, specific_hash_ids )
#
if need_file_domain_cross_reference:
# in future we will hang an explicit service off this predicate and specify import/deleted time
# for now we'll wangle a compromise and just check all, and if domain is deleted, then search deletion time
import_timestamp_predicates = []
if 'min_import_timestamp' in simple_preds: import_timestamp_predicates.append( 'timestamp >= ' + str( simple_preds[ 'min_import_timestamp' ] ) )
if 'max_import_timestamp' in simple_preds: import_timestamp_predicates.append( 'timestamp <= ' + str( simple_preds[ 'max_import_timestamp' ] ) )
if len( import_timestamp_predicates ) > 0:
pred_string = ' AND '.join( import_timestamp_predicates )
table_names = []
table_names.extend( ( ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.GetServiceId( service_key ), HC.CONTENT_STATUS_CURRENT ) for service_key in location_context.current_service_keys ) )
table_names.extend( ( ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.GetServiceId( service_key ), HC.CONTENT_STATUS_DELETED ) for service_key in location_context.deleted_service_keys ) )
import_timestamp_hash_ids = set()
for table_name in table_names:
import_timestamp_hash_ids.update( self._STS( self._Execute( 'SELECT hash_id FROM {} WHERE {};'.format( table_name, pred_string ) ) ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, import_timestamp_hash_ids )
have_cross_referenced_file_locations = True
modified_timestamp_predicates = []
if 'min_modified_timestamp' in simple_preds: modified_timestamp_predicates.append( 'MIN( file_modified_timestamp ) >= ' + str( simple_preds[ 'min_modified_timestamp' ] ) )
if 'max_modified_timestamp' in simple_preds: modified_timestamp_predicates.append( 'MIN( file_modified_timestamp ) <= ' + str( simple_preds[ 'max_modified_timestamp' ] ) )
if len( modified_timestamp_predicates ) > 0:
pred_string = ' AND '.join( modified_timestamp_predicates )
q1 = 'SELECT hash_id, file_modified_timestamp FROM file_modified_timestamps'
q2 = 'SELECT hash_id, file_modified_timestamp FROM file_domain_modified_timestamps'
query = 'SELECT hash_id FROM ( {} UNION {} ) GROUP BY hash_id HAVING {};'.format( q1, q2, pred_string )
modified_timestamp_hash_ids = self._STS( self._Execute( query ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, modified_timestamp_hash_ids )
last_viewed_timestamp_predicates = []
if 'min_last_viewed_timestamp' in simple_preds: last_viewed_timestamp_predicates.append( 'last_viewed_timestamp >= ' + str( simple_preds[ 'min_last_viewed_timestamp' ] ) )
if 'max_last_viewed_timestamp' in simple_preds: last_viewed_timestamp_predicates.append( 'last_viewed_timestamp <= ' + str( simple_preds[ 'max_last_viewed_timestamp' ] ) )
if len( last_viewed_timestamp_predicates ) > 0:
pred_string = ' AND '.join( last_viewed_timestamp_predicates )
last_viewed_timestamp_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM file_viewing_stats WHERE canvas_type = ? AND {};'.format( pred_string ), ( CC.CANVAS_MEDIA_VIEWER, ) ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, last_viewed_timestamp_hash_ids )
if system_predicates.HasSimilarTo():
( similar_to_hashes, max_hamming ) = system_predicates.GetSimilarTo()
all_similar_hash_ids = set()
for similar_to_hash in similar_to_hashes:
hash_id = self.modules_hashes_local_cache.GetHashId( similar_to_hash )
similar_hash_ids_and_distances = self.modules_similar_files.Search( hash_id, max_hamming )
similar_hash_ids = [ similar_hash_id for ( similar_hash_id, distance ) in similar_hash_ids_and_distances ]
all_similar_hash_ids.update( similar_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, all_similar_hash_ids )
for ( operator, value, rating_service_key ) in system_predicates.GetRatingsPredicates():
service_id = self.modules_services.GetServiceId( rating_service_key )
if value == 'not rated':
continue
if value == 'rated':
rating_hash_ids = self._STI( self._Execute( 'SELECT hash_id FROM local_ratings WHERE service_id = ?;', ( service_id, ) ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, rating_hash_ids )
else:
service = HG.client_controller.services_manager.GetService( rating_service_key )
if service.GetServiceType() == HC.LOCAL_RATING_LIKE:
half_a_star_value = 0.5
else:
one_star_value = service.GetOneStarValue()
half_a_star_value = one_star_value / 2
if isinstance( value, str ):
value = float( value )
if operator == CC.UNICODE_ALMOST_EQUAL_TO:
predicate = str( ( value - half_a_star_value ) * 0.8 ) + ' < rating AND rating < ' + str( ( value + half_a_star_value ) * 1.2 )
elif operator == '<':
predicate = 'rating <= ' + str( value - half_a_star_value )
elif operator == '>':
predicate = 'rating > ' + str( value + half_a_star_value )
elif operator == '=':
predicate = str( value - half_a_star_value ) + ' < rating AND rating <= ' + str( value + half_a_star_value )
rating_hash_ids = self._STI( self._Execute( 'SELECT hash_id FROM local_ratings WHERE service_id = ? AND ' + predicate + ';', ( service_id, ) ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, rating_hash_ids )
is_inbox = system_predicates.MustBeInbox()
if is_inbox:
query_hash_ids = intersection_update_qhi( query_hash_ids, self.modules_files_metadata_basic.inbox_hash_ids, force_create_new_set = True )
for ( operator, num_relationships, dupe_type ) in system_predicates.GetDuplicateRelationshipCountPredicates():
only_do_zero = ( operator in ( '=', CC.UNICODE_ALMOST_EQUAL_TO ) and num_relationships == 0 ) or ( operator == '<' and num_relationships == 1 )
include_zero = operator == '<'
if only_do_zero:
continue
elif include_zero:
continue
else:
dupe_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, operator, num_relationships, dupe_type )
query_hash_ids = intersection_update_qhi( query_hash_ids, dupe_hash_ids )
have_cross_referenced_file_locations = True
for ( view_type, viewing_locations, operator, viewing_value ) in system_predicates.GetFileViewingStatsPredicates():
only_do_zero = ( operator in ( '=', CC.UNICODE_ALMOST_EQUAL_TO ) and viewing_value == 0 ) or ( operator == '<' and viewing_value == 1 )
include_zero = operator == '<'
if only_do_zero:
continue
elif include_zero:
continue
else:
viewing_hash_ids = self._GetHashIdsFromFileViewingStatistics( view_type, viewing_locations, operator, viewing_value )
query_hash_ids = intersection_update_qhi( query_hash_ids, viewing_hash_ids )
if there_are_tags_to_search:
def sort_longest_tag_first_key( s ):
return ( 1 if HydrusTags.IsUnnamespaced( s ) else 0, -len( s ) )
tags_to_include = list( tags_to_include )
tags_to_include.sort( key = sort_longest_tag_first_key )
for tag in tags_to_include:
if query_hash_ids is None:
tag_query_hash_ids = self._GetHashIdsFromTag( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, tag, job_key = job_key )
elif is_inbox and len( query_hash_ids ) == len( self.modules_files_metadata_basic.inbox_hash_ids ):
tag_query_hash_ids = self._GetHashIdsFromTag( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, tag, hash_ids = self.modules_files_metadata_basic.inbox_hash_ids, hash_ids_table_name = 'file_inbox', job_key = job_key )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
tag_query_hash_ids = self._GetHashIdsFromTag( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, tag, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, tag_query_hash_ids )
have_cross_referenced_file_locations = True
if len( query_hash_ids ) == 0:
return query_hash_ids
namespaces_to_include = list( namespaces_to_include )
namespaces_to_include.sort( key = lambda n: -len( n ) )
for namespace in namespaces_to_include:
if query_hash_ids is None or ( is_inbox and len( query_hash_ids ) == len( self.modules_files_metadata_basic.inbox_hash_ids ) ):
namespace_query_hash_ids = self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace_wildcard = namespace, job_key = job_key )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
namespace_query_hash_ids = self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace_wildcard = namespace, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, namespace_query_hash_ids )
have_cross_referenced_file_locations = True
if len( query_hash_ids ) == 0:
return query_hash_ids
wildcards_to_include = list( wildcards_to_include )
wildcards_to_include.sort( key = lambda w: -len( w ) )
for wildcard in wildcards_to_include:
if query_hash_ids is None:
wildcard_query_hash_ids = self._GetHashIdsFromWildcardComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, wildcard, job_key = job_key )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
wildcard_query_hash_ids = self._GetHashIdsFromWildcardComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, wildcard, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, wildcard_query_hash_ids )
have_cross_referenced_file_locations = True
if len( query_hash_ids ) == 0:
return query_hash_ids
if not done_or_predicates and not there_are_simple_files_info_preds_to_search_for:
query_hash_ids = do_or_preds( or_predicates, query_hash_ids )
have_cross_referenced_file_locations = True
done_or_predicates = True
# now the simple preds and desperate last shot to populate query_hash_ids
done_files_info_predicates = False
we_need_some_results = query_hash_ids is None
we_need_to_cross_reference = need_file_domain_cross_reference and not have_cross_referenced_file_locations
if we_need_some_results or we_need_to_cross_reference:
if location_context.IsAllKnownFiles():
query_hash_ids = intersection_update_qhi( query_hash_ids, self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, job_key = job_key ) )
else:
files_table_name = db_location_context.files_table_name
if len( files_info_predicates ) == 0:
files_info_predicates.insert( 0, '1=1' )
else:
# if a file is missing a files_info row, we can't search it with a file system pred. it is just unknown
files_table_name = '{} NATURAL JOIN files_info'.format( files_table_name )
if query_hash_ids is None:
query_hash_ids = intersection_update_qhi( query_hash_ids, self._STS( self._Execute( 'SELECT hash_id FROM {} WHERE {};'.format( files_table_name, ' AND '.join( files_info_predicates ) ) ) ) )
else:
if is_inbox and len( query_hash_ids ) == len( self.modules_files_metadata_basic.inbox_hash_ids ):
query_hash_ids = intersection_update_qhi( query_hash_ids, self._STS( self._Execute( 'SELECT hash_id FROM {} NATURAL JOIN {} WHERE {};'.format( 'file_inbox', files_table_name, ' AND '.join( files_info_predicates ) ) ) ) )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
query_hash_ids = intersection_update_qhi( query_hash_ids, self._STS( self._Execute( 'SELECT hash_id FROM {} NATURAL JOIN {} WHERE {};'.format( temp_table_name, files_table_name, ' AND '.join( files_info_predicates ) ) ) ) )
have_cross_referenced_file_locations = True
done_files_info_predicates = True
if 'hash' in simple_preds:
( search_hashes, search_hash_type, inclusive ) = simple_preds[ 'hash' ]
if not inclusive:
if search_hash_type == 'sha256':
matching_sha256_hashes = [ search_hash for search_hash in search_hashes if self.modules_hashes.HasHash( search_hash ) ]
else:
matching_sha256_hashes = self.modules_hashes.GetFileHashes( search_hashes, search_hash_type, 'sha256' )
specific_hash_ids = self.modules_hashes_local_cache.GetHashIds( matching_sha256_hashes )
query_hash_ids.difference_update( specific_hash_ids )
if 'has_icc_profile' in simple_preds:
has_icc_profile = simple_preds[ 'has_icc_profile' ]
has_icc_profile_has_ids = self.modules_files_metadata_basic.GetHasICCProfileHashIds( query_hash_ids )
if has_icc_profile:
query_hash_ids.intersection_update( has_icc_profile_has_ids )
else:
query_hash_ids.difference_update( has_icc_profile_has_ids )
if system_predicates.MustBeArchive():
query_hash_ids.difference_update( self.modules_files_metadata_basic.inbox_hash_ids )
if king_filter is not None and king_filter:
king_hash_ids = self.modules_files_duplicates.DuplicatesFilterKingHashIds( query_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, king_hash_ids )
if there_are_simple_files_info_preds_to_search_for and not done_files_info_predicates:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
predicate_string = ' AND '.join( files_info_predicates )
select = 'SELECT hash_id FROM {} NATURAL JOIN files_info WHERE {};'.format( temp_table_name, predicate_string )
files_info_hash_ids = self._STI( self._Execute( select ) )
query_hash_ids = intersection_update_qhi( query_hash_ids, files_info_hash_ids )
done_files_info_predicates = True
if job_key.IsCancelled():
return set()
if not done_or_predicates:
query_hash_ids = do_or_preds( or_predicates, query_hash_ids )
done_or_predicates = True
# hide update files
if location_context.IsAllLocalFiles():
repo_update_hash_ids = set( self.modules_files_storage.GetCurrentHashIdsList( self.modules_services.local_update_service_id ) )
query_hash_ids.difference_update( repo_update_hash_ids )
# now subtract bad results
if len( tags_to_exclude ) + len( namespaces_to_exclude ) + len( wildcards_to_exclude ) > 0:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
for tag in tags_to_exclude:
unwanted_hash_ids = self._GetHashIdsFromTag( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, tag, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids.difference_update( unwanted_hash_ids )
if len( query_hash_ids ) == 0:
return query_hash_ids
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( temp_table_name ), ( ( hash_id, ) for hash_id in unwanted_hash_ids ) )
for namespace in namespaces_to_exclude:
unwanted_hash_ids = self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace_wildcard = namespace, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids.difference_update( unwanted_hash_ids )
if len( query_hash_ids ) == 0:
return query_hash_ids
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( temp_table_name ), ( ( hash_id, ) for hash_id in unwanted_hash_ids ) )
for wildcard in wildcards_to_exclude:
unwanted_hash_ids = self._GetHashIdsFromWildcardComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, wildcard, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids.difference_update( unwanted_hash_ids )
if len( query_hash_ids ) == 0:
return query_hash_ids
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( temp_table_name ), ( ( hash_id, ) for hash_id in unwanted_hash_ids ) )
if job_key.IsCancelled():
return set()
#
( required_file_service_statuses, excluded_file_service_statuses ) = system_predicates.GetFileServiceStatuses()
for ( service_key, statuses ) in required_file_service_statuses.items():
service_id = self.modules_services.GetServiceId( service_key )
for status in statuses:
required_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, query_hash_ids, status )
query_hash_ids = intersection_update_qhi( query_hash_ids, required_hash_ids )
for ( service_key, statuses ) in excluded_file_service_statuses.items():
service_id = self.modules_services.GetServiceId( service_key )
for status in statuses:
excluded_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, query_hash_ids, status )
query_hash_ids.difference_update( excluded_hash_ids )
#
for ( operator, value, service_key ) in system_predicates.GetRatingsPredicates():
service_id = self.modules_services.GetServiceId( service_key )
if value == 'not rated':
query_hash_ids.difference_update( self._STI( self._Execute( 'SELECT hash_id FROM local_ratings WHERE service_id = ?;', ( service_id, ) ) ) )
if king_filter is not None and not king_filter:
king_hash_ids = self.modules_files_duplicates.DuplicatesFilterKingHashIds( query_hash_ids )
query_hash_ids.difference_update( king_hash_ids )
for ( operator, num_relationships, dupe_type ) in system_predicates.GetDuplicateRelationshipCountPredicates():
only_do_zero = ( operator in ( '=', CC.UNICODE_ALMOST_EQUAL_TO ) and num_relationships == 0 ) or ( operator == '<' and num_relationships == 1 )
include_zero = operator == '<'
if only_do_zero:
nonzero_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, '>', 0, dupe_type )
query_hash_ids.difference_update( nonzero_hash_ids )
elif include_zero:
nonzero_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, '>', 0, dupe_type )
zero_hash_ids = query_hash_ids.difference( nonzero_hash_ids )
accurate_except_zero_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, operator, num_relationships, dupe_type )
hash_ids = zero_hash_ids.union( accurate_except_zero_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, hash_ids )
min_num_notes = None
max_num_notes = None
if 'num_notes' in simple_preds:
min_num_notes = simple_preds[ 'num_notes' ]
max_num_notes = min_num_notes
else:
if 'min_num_notes' in simple_preds:
min_num_notes = simple_preds[ 'min_num_notes' ] + 1
if 'max_num_notes' in simple_preds:
max_num_notes = simple_preds[ 'max_num_notes' ] - 1
if min_num_notes is not None or max_num_notes is not None:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
num_notes_hash_ids = self._GetHashIdsFromNumNotes( min_num_notes, max_num_notes, temp_table_name )
query_hash_ids = intersection_update_qhi( query_hash_ids, num_notes_hash_ids )
if 'has_note_names' in simple_preds:
inclusive_note_names = simple_preds[ 'has_note_names' ]
for note_name in inclusive_note_names:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
notes_hash_ids = self._GetHashIdsFromNoteName( note_name, temp_table_name )
query_hash_ids = intersection_update_qhi( query_hash_ids, notes_hash_ids )
if 'not_has_note_names' in simple_preds:
exclusive_note_names = simple_preds[ 'not_has_note_names' ]
for note_name in exclusive_note_names:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
notes_hash_ids = self._GetHashIdsFromNoteName( note_name, temp_table_name )
query_hash_ids.difference_update( notes_hash_ids )
for ( view_type, viewing_locations, operator, viewing_value ) in system_predicates.GetFileViewingStatsPredicates():
only_do_zero = ( operator in ( '=', CC.UNICODE_ALMOST_EQUAL_TO ) and viewing_value == 0 ) or ( operator == '<' and viewing_value == 1 )
include_zero = operator == '<'
if only_do_zero:
nonzero_hash_ids = self._GetHashIdsFromFileViewingStatistics( view_type, viewing_locations, '>', 0 )
query_hash_ids.difference_update( nonzero_hash_ids )
elif include_zero:
nonzero_hash_ids = self._GetHashIdsFromFileViewingStatistics( view_type, viewing_locations, '>', 0 )
zero_hash_ids = query_hash_ids.difference( nonzero_hash_ids )
accurate_except_zero_hash_ids = self._GetHashIdsFromFileViewingStatistics( view_type, viewing_locations, operator, viewing_value )
hash_ids = zero_hash_ids.union( accurate_except_zero_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, hash_ids )
if job_key.IsCancelled():
return set()
#
file_location_is_all_local = self.modules_services.LocationContextIsCoveredByCombinedLocalFiles( location_context )
file_location_is_all_combined_local_files_deleted = location_context.IsOneDomain() and CC.COMBINED_LOCAL_FILE_SERVICE_KEY in location_context.deleted_service_keys
must_be_local = system_predicates.MustBeLocal() or system_predicates.MustBeArchive()
must_not_be_local = system_predicates.MustNotBeLocal()
if file_location_is_all_local:
# if must be all local, we are great already
if must_not_be_local:
query_hash_ids = set()
elif file_location_is_all_combined_local_files_deleted:
if must_be_local:
query_hash_ids = set()
elif must_be_local or must_not_be_local:
if must_be_local:
query_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( self.modules_services.combined_local_file_service_id, query_hash_ids, HC.CONTENT_STATUS_CURRENT )
elif must_not_be_local:
local_hash_ids = self.modules_files_storage.GetCurrentHashIdsList( self.modules_services.combined_local_file_service_id )
query_hash_ids.difference_update( local_hash_ids )
#
if 'known_url_rules' in simple_preds:
for ( operator, rule_type, rule ) in simple_preds[ 'known_url_rules' ]:
if rule_type == 'exact_match' or ( is_inbox and len( query_hash_ids ) == len( self.modules_files_metadata_basic.inbox_hash_ids ) ):
url_hash_ids = self._GetHashIdsFromURLRule( rule_type, rule )
else:
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
url_hash_ids = self._GetHashIdsFromURLRule( rule_type, rule, hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name )
if operator: # inclusive
query_hash_ids = intersection_update_qhi( query_hash_ids, url_hash_ids )
else:
query_hash_ids.difference_update( url_hash_ids )
#
namespaces_to_tests = system_predicates.GetNumTagsNumberTests()
for ( namespace, number_tests ) in namespaces_to_tests.items():
is_zero = True in ( number_test.IsZero() for number_test in number_tests )
is_anything_but_zero = True in ( number_test.IsAnythingButZero() for number_test in number_tests )
specific_number_tests = [ number_test for number_test in number_tests if not ( number_test.IsZero() or number_test.IsAnythingButZero() ) ]
lambdas = [ number_test.GetLambda() for number_test in specific_number_tests ]
megalambda = lambda x: False not in ( l( x ) for l in lambdas )
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
nonzero_tag_query_hash_ids = set()
nonzero_tag_query_hash_ids_populated = False
if is_zero or is_anything_but_zero:
nonzero_tag_query_hash_ids = self._GetHashIdsThatHaveTagsComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, hash_ids_table_name = temp_table_name, namespace_wildcard = namespace, job_key = job_key )
nonzero_tag_query_hash_ids_populated = True
if is_zero:
query_hash_ids.difference_update( nonzero_tag_query_hash_ids )
if is_anything_but_zero:
query_hash_ids = intersection_update_qhi( query_hash_ids, nonzero_tag_query_hash_ids )
if len( specific_number_tests ) > 0:
hash_id_tag_counts = self._GetHashIdsAndNonZeroTagCounts( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, query_hash_ids, namespace_wildcard = namespace, job_key = job_key )
good_tag_count_hash_ids = { hash_id for ( hash_id, count ) in hash_id_tag_counts if megalambda( count ) }
if megalambda( 0 ): # files with zero count are needed
if not nonzero_tag_query_hash_ids_populated:
nonzero_tag_query_hash_ids = { hash_id for ( hash_id, count ) in hash_id_tag_counts }
zero_hash_ids = query_hash_ids.difference( nonzero_tag_query_hash_ids )
good_tag_count_hash_ids.update( zero_hash_ids )
query_hash_ids = intersection_update_qhi( query_hash_ids, good_tag_count_hash_ids )
if job_key.IsCancelled():
return set()
#
if 'min_tag_as_number' in simple_preds:
( namespace, num ) = simple_preds[ 'min_tag_as_number' ]
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
good_hash_ids = self._GetHashIdsThatHaveTagAsNumComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace, num, '>', hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, good_hash_ids )
if 'max_tag_as_number' in simple_preds:
( namespace, num ) = simple_preds[ 'max_tag_as_number' ]
with self._MakeTemporaryIntegerTable( query_hash_ids, 'hash_id' ) as temp_table_name:
self._AnalyzeTempTable( temp_table_name )
good_hash_ids = self._GetHashIdsThatHaveTagAsNumComplexLocation( ClientTags.TAG_DISPLAY_ACTUAL, location_context, tag_search_context, namespace, num, '<', hash_ids = query_hash_ids, hash_ids_table_name = temp_table_name, job_key = job_key )
query_hash_ids = intersection_update_qhi( query_hash_ids, good_hash_ids )
if job_key.IsCancelled():
return set()
#
query_hash_ids = list( query_hash_ids )
#
limit = system_predicates.GetLimit( apply_implicit_limit = apply_implicit_limit )
we_are_applying_limit = limit is not None and limit < len( query_hash_ids )
if we_are_applying_limit and limit_sort_by is not None and sort_by is None:
sort_by = limit_sort_by
did_sort = False
if sort_by is not None and not location_context.IsAllKnownFiles():
( did_sort, query_hash_ids ) = self._TryToSortHashIds( location_context, query_hash_ids, sort_by )
#
if we_are_applying_limit:
if not did_sort:
query_hash_ids = random.sample( query_hash_ids, limit )
else:
query_hash_ids = query_hash_ids[:limit]
return query_hash_ids
def _GetHashIdsFromSubtagIds( self, tag_display_type: int, file_service_key, tag_search_context: ClientSearch.TagSearchContext, subtag_ids, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
tag_ids = self._GetTagIdsFromSubtagIds( file_service_id, tag_service_id, subtag_ids, job_key = job_key )
return self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsFromSubtagIdsTable( self, tag_display_type: int, file_service_key, tag_search_context: ClientSearch.TagSearchContext, subtag_ids_table_name, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
tag_ids = self._GetTagIdsFromSubtagIdsTable( file_service_id, tag_service_id, subtag_ids_table_name, job_key = job_key )
return self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsFromTag( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, tag, hash_ids = None, hash_ids_table_name = None, allow_unnamespaced_to_fetch_namespaced = True, job_key = None ):
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
if not file_location_is_cross_referenced and hash_ids_table_name is not None:
file_location_is_cross_referenced = True
( namespace, subtag ) = HydrusTags.SplitTag( tag )
subtag_id = self.modules_tags.GetSubtagId( subtag )
if not self.modules_tags.SubtagExists( subtag ):
return set()
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
results = set()
for file_service_key in file_service_keys:
if namespace == '' and allow_unnamespaced_to_fetch_namespaced:
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_ids = self._GetTagIdsFromSubtagIds( file_service_id, tag_service_id, ( subtag_id, ) )
else:
if not self.modules_tags.TagExists( tag ):
return set()
tag_id = self.modules_tags.GetTagId( tag )
tag_ids = ( tag_id, )
some_results = self._GetHashIdsFromTagIds( tag_display_type, file_service_key, tag_search_context, tag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
if len( results ) == 0:
results = some_results
else:
results.update( some_results )
if not file_location_is_cross_referenced:
results = self.modules_files_storage.FilterHashIds( location_context, results )
return results
def _GetHashIdsFromTagIds( self, tag_display_type: int, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext, tag_ids: typing.Collection[ int ], hash_ids = None, hash_ids_table_name = None, job_key = None ):
do_hash_table_join = False
if hash_ids_table_name is not None and hash_ids is not None:
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
file_service_id = self.modules_services.GetServiceId( file_service_key )
estimated_count = self._GetAutocompleteCountEstimate( tag_display_type, tag_service_id, file_service_id, tag_ids, tag_search_context.include_current_tags, tag_search_context.include_pending_tags )
# experimentally, file lookups are about 2.5x as slow as tag lookups
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( len( hash_ids ), estimated_count ):
do_hash_table_join = True
result_hash_ids = set()
table_names = self._GetMappingTables( tag_display_type, file_service_key, tag_search_context )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
if len( tag_ids ) == 1:
( tag_id, ) = tag_ids
if do_hash_table_join:
# temp hashes to mappings
queries = [ 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = ?'.format( hash_ids_table_name, table_name ) for table_name in table_names ]
else:
queries = [ 'SELECT hash_id FROM {} WHERE tag_id = ?;'.format( table_name ) for table_name in table_names ]
for query in queries:
cursor = self._Execute( query, ( tag_id, ) )
result_hash_ids.update( self._STI( HydrusDB.ReadFromCancellableCursor( cursor, 1024, cancelled_hook ) ) )
else:
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
if do_hash_table_join:
# temp hashes to mappings to temp tags
# old method, does not do EXISTS efficiently, it makes a list instead and checks that
# queries = [ 'SELECT hash_id FROM {} WHERE EXISTS ( SELECT 1 FROM {} CROSS JOIN {} USING ( tag_id ) WHERE {}.hash_id = {}.hash_id );'.format( hash_ids_table_name, table_name, temp_tag_ids_table_name, table_name, hash_ids_table_name ) for table_name in table_names ]
# new method, this seems to actually do the correlated scalar subquery, although it does seem to be sqlite voodoo
queries = [ 'SELECT hash_id FROM {} WHERE EXISTS ( SELECT 1 FROM {} WHERE {}.hash_id = {}.hash_id AND EXISTS ( SELECT 1 FROM {} WHERE {}.tag_id = {}.tag_id ) );'.format( hash_ids_table_name, table_name, table_name, hash_ids_table_name, temp_tag_ids_table_name, table_name, temp_tag_ids_table_name ) for table_name in table_names ]
else:
# temp tags to mappings
queries = [ 'SELECT hash_id FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_tag_ids_table_name, table_name ) for table_name in table_names ]
for query in queries:
cursor = self._Execute( query )
result_hash_ids.update( self._STI( HydrusDB.ReadFromCancellableCursor( cursor, 1024, cancelled_hook ) ) )
return result_hash_ids
def _GetHashIdsFromURLRule( self, rule_type, rule, hash_ids = None, hash_ids_table_name = None ):
if rule_type == 'exact_match':
url = rule
table_name = 'url_map NATURAL JOIN urls'
if hash_ids_table_name is not None and hash_ids is not None and len( hash_ids ) < 50000:
table_name += ' NATURAL JOIN {}'.format( hash_ids_table_name )
select = 'SELECT hash_id FROM {} WHERE url = ?;'.format( table_name )
result_hash_ids = self._STS( self._Execute( select, ( url, ) ) )
return result_hash_ids
elif rule_type in ( 'url_class', 'url_match' ):
url_class = rule
domain = url_class.GetDomain()
if url_class.MatchesSubdomains():
domain_ids = self.modules_urls.GetURLDomainAndSubdomainIds( domain )
else:
domain_ids = self.modules_urls.GetURLDomainAndSubdomainIds( domain, only_www_subdomains = True )
result_hash_ids = set()
with self._MakeTemporaryIntegerTable( domain_ids, 'domain_id' ) as temp_domain_table_name:
if hash_ids_table_name is not None and hash_ids is not None and len( hash_ids ) < 50000:
# if we aren't gonk mode with the number of files, temp hashes to url map to urls to domains
select = 'SELECT hash_id, url FROM {} CROSS JOIN url_map USING ( hash_id ) CROSS JOIN urls USING ( url_id ) CROSS JOIN {} USING ( domain_id );'.format( hash_ids_table_name, temp_domain_table_name )
else:
select = 'SELECT hash_id, url FROM {} CROSS JOIN urls USING ( domain_id ) CROSS JOIN url_map USING ( url_id );'.format( temp_domain_table_name )
for ( hash_id, url ) in self._Execute( select ):
if hash_id not in result_hash_ids and url_class.Matches( url ):
result_hash_ids.add( hash_id )
return result_hash_ids
elif rule_type in 'domain':
domain = rule
domain_ids = self.modules_urls.GetURLDomainAndSubdomainIds( domain )
result_hash_ids = set()
with self._MakeTemporaryIntegerTable( domain_ids, 'domain_id' ) as temp_domain_table_name:
if hash_ids_table_name is not None and hash_ids is not None and len( hash_ids ) < 50000:
# next step here is irl profiling and a domain->url_count cache so I can decide whether to do this or not based on url domain count
select = 'SELECT hash_id FROM {} CROSS JOIN url_map USING ( hash_id ) CROSS JOIN urls USING ( url_id ) CROSS JOIN {} USING ( domain_id )'.format( hash_ids_table_name, temp_domain_table_name )
else:
# domains to urls to url map
select = 'SELECT hash_id FROM {} CROSS JOIN urls USING ( domain_id ) CROSS JOIN url_map USING ( url_id );'.format( temp_domain_table_name )
result_hash_ids = self._STS( self._Execute( select ) )
return result_hash_ids
else:
regex = rule
if hash_ids_table_name is not None and hash_ids is not None and len( hash_ids ) < 50000:
# if we aren't gonk mode with the number of files, temp hashes to url map to urls
select = 'SELECT hash_id, url FROM {} CROSS JOIN url_map USING ( hash_id ) CROSS JOIN urls USING ( url_id );'.format( hash_ids_table_name )
else:
select = 'SELECT hash_id, url FROM url_map NATURAL JOIN urls;'
result_hash_ids = set()
for ( hash_id, url ) in self._Execute( select ):
if hash_id not in result_hash_ids and re.search( regex, url ) is not None:
result_hash_ids.add( hash_id )
return result_hash_ids
def _GetHashIdsFromWildcardComplexLocation( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, wildcard, hash_ids = None, hash_ids_table_name = None, job_key = None ):
( namespace_wildcard, subtag_wildcard ) = HydrusTags.SplitTag( wildcard )
if namespace_wildcard in ( '*', '' ):
namespace_wildcard = None
if subtag_wildcard == '*':
return self._GetHashIdsThatHaveTagsComplexLocation( tag_display_type, location_context, tag_search_context, namespace_wildcard = namespace_wildcard, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
results = set()
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
if not file_location_is_cross_referenced and hash_ids_table_name is not None:
file_location_is_cross_referenced = True
if namespace_wildcard is None:
possible_namespace_ids = []
else:
possible_namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace_wildcard )
if len( possible_namespace_ids ) == 0:
return set()
with self._MakeTemporaryIntegerTable( possible_namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
if namespace_wildcard is None:
namespace_ids_table_name = None
else:
namespace_ids_table_name = temp_namespace_ids_table_name
for file_service_key in file_service_keys:
some_results = self._GetHashIdsFromWildcardSimpleLocation( tag_display_type, file_service_key, tag_search_context, subtag_wildcard, namespace_ids_table_name = namespace_ids_table_name, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
if len( results ) == 0:
results = some_results
else:
results.update( some_results )
if not file_location_is_cross_referenced:
results = self.modules_files_storage.FilterHashIds( location_context, results )
return results
def _GetHashIdsFromWildcardSimpleLocation( self, tag_display_type: int, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext, subtag_wildcard, namespace_ids_table_name = None, hash_ids = None, hash_ids_table_name = None, job_key = None ):
with self._MakeTemporaryIntegerTable( [], 'subtag_id' ) as temp_subtag_ids_table_name:
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
self.modules_tag_search.GetSubtagIdsFromWildcardIntoTable( file_service_id, tag_service_id, subtag_wildcard, temp_subtag_ids_table_name, job_key = job_key )
if namespace_ids_table_name is None:
return self._GetHashIdsFromSubtagIdsTable( tag_display_type, file_service_key, tag_search_context, temp_subtag_ids_table_name, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
else:
return self._GetHashIdsFromNamespaceIdsSubtagIdsTables( tag_display_type, file_service_key, tag_search_context, namespace_ids_table_name, temp_subtag_ids_table_name, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdsThatHaveTagsComplexLocation( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, namespace_wildcard = None, hash_ids_table_name = None, job_key = None ):
if not location_context.SearchesAnything():
return set()
if namespace_wildcard == '*':
namespace_wildcard = None
if namespace_wildcard is None:
possible_namespace_ids = []
else:
possible_namespace_ids = self.modules_tag_search.GetNamespaceIdsFromWildcard( namespace_wildcard )
if len( possible_namespace_ids ) == 0:
return set()
results = set()
with self._MakeTemporaryIntegerTable( possible_namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
if namespace_wildcard is None:
namespace_ids_table_name = None
else:
namespace_ids_table_name = temp_namespace_ids_table_name
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
if not file_location_is_cross_referenced and hash_ids_table_name is not None:
file_location_is_cross_referenced = True
for file_service_key in file_service_keys:
some_results = self._GetHashIdsThatHaveTagsSimpleLocation( tag_display_type, file_service_key, tag_search_context, namespace_ids_table_name = namespace_ids_table_name, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
if len( results ) == 0:
results = some_results
else:
results.update( some_results )
if not file_location_is_cross_referenced:
results = self.modules_files_storage.FilterHashIds( location_context, results )
return results
def _GetHashIdsThatHaveTagsSimpleLocation( self, tag_display_type: int, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext, namespace_ids_table_name = None, hash_ids_table_name = None, job_key = None ):
mapping_and_tag_table_names = self._GetMappingAndTagTables( tag_display_type, file_service_key, tag_search_context )
if hash_ids_table_name is None:
if namespace_ids_table_name is None:
queries = [ 'SELECT DISTINCT hash_id FROM {};'.format( mappings_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
else:
queries = [ 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( namespace_id ) CROSS JOIN {} USING ( tag_id );'.format( namespace_ids_table_name, tags_table_name, mappings_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
else:
if namespace_ids_table_name is None:
queries = [ 'SELECT hash_id FROM {} WHERE EXISTS ( SELECT 1 FROM {} WHERE {}.hash_id = {}.hash_id );'.format( hash_ids_table_name, mappings_table_name, mappings_table_name, hash_ids_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
else:
queries = [ 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) CROSS JOIN {} USING ( tag_id ) CROSS JOIN {} USING ( namespace_id );'.format( hash_ids_table_name, mappings_table_name, tags_table_name, namespace_ids_table_name ) for ( mappings_table_name, tags_table_name ) in mapping_and_tag_table_names ]
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
nonzero_tag_hash_ids = set()
for query in queries:
cursor = self._Execute( query )
nonzero_tag_hash_ids.update( self._STI( HydrusDB.ReadFromCancellableCursor( cursor, 10240, cancelled_hook ) ) )
if job_key is not None and job_key.IsCancelled():
return set()
return nonzero_tag_hash_ids
def _GetHashIdsThatHaveTagAsNumComplexLocation( self, tag_display_type: int, location_context: ClientLocation.LocationContext, tag_search_context: ClientSearch.TagSearchContext, namespace, num, operator, hash_ids = None, hash_ids_table_name = None, job_key = None ):
if not location_context.SearchesAnything():
return set()
( file_service_keys, file_location_is_cross_referenced ) = location_context.GetCoveringCurrentFileServiceKeys()
if not file_location_is_cross_referenced and hash_ids_table_name is not None:
file_location_is_cross_referenced = True
results = set()
for file_service_key in file_service_keys:
some_results = self._GetHashIdsThatHaveTagAsNumSimpleLocation( tag_display_type, file_service_key, tag_search_context, namespace, num, operator, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
if len( results ) == 0:
results = some_results
else:
results.update( some_results )
if not file_location_is_cross_referenced:
results = self.modules_files_storage.FilterHashIds( location_context, results )
return results
def _GetHashIdsThatHaveTagAsNumSimpleLocation( self, tag_display_type: int, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext, namespace, num, operator, hash_ids = None, hash_ids_table_name = None, job_key = None ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_search_context.service_key )
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( tag_service_id, )
possible_subtag_ids = set()
for search_tag_service_id in search_tag_service_ids:
some_possible_subtag_ids = self.modules_tag_search.GetTagAsNumSubtagIds( file_service_id, search_tag_service_id, operator, num )
possible_subtag_ids.update( some_possible_subtag_ids )
if namespace == '':
return self._GetHashIdsFromSubtagIds( tag_display_type, file_service_key, tag_search_context, possible_subtag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
else:
namespace_id = self.modules_tags.GetNamespaceId( namespace )
possible_namespace_ids = { namespace_id }
return self._GetHashIdsFromNamespaceIdsSubtagIds( tag_display_type, file_service_key, tag_search_context, possible_namespace_ids, possible_subtag_ids, hash_ids = hash_ids, hash_ids_table_name = hash_ids_table_name, job_key = job_key )
def _GetHashIdStatus( self, hash_id, prefix = '' ) -> ClientImportFiles.FileImportStatus:
if prefix != '':
prefix += ': '
hash = self.modules_hashes_local_cache.GetHash( hash_id )
( is_deleted, timestamp, file_deletion_reason ) = self.modules_files_storage.GetDeletionStatus( self.modules_services.combined_local_file_service_id, hash_id )
if is_deleted:
if timestamp is None:
note = 'Deleted from the client before delete times were tracked ({}).'.format( file_deletion_reason )
else:
note = 'Deleted from the client {} ({}), which was {} before this check.'.format( HydrusData.ConvertTimestampToPrettyTime( timestamp ), file_deletion_reason, HydrusData.BaseTimestampToPrettyTimeDelta( timestamp ) )
return ClientImportFiles.FileImportStatus( CC.STATUS_DELETED, hash, note = prefix + note )
result = self.modules_files_storage.GetCurrentTimestamp( self.modules_services.trash_service_id, hash_id )
if result is not None:
timestamp = result
note = 'Currently in trash ({}). Sent there at {}, which was {} before this check.'.format( file_deletion_reason, HydrusData.ConvertTimestampToPrettyTime( timestamp ), HydrusData.BaseTimestampToPrettyTimeDelta( timestamp, just_now_threshold = 0 ) )
return ClientImportFiles.FileImportStatus( CC.STATUS_DELETED, hash, note = prefix + note )
result = self.modules_files_storage.GetCurrentTimestamp( self.modules_services.combined_local_file_service_id, hash_id )
if result is not None:
timestamp = result
mime = self.modules_files_metadata_basic.GetMime( hash_id )
note = 'Imported at {}, which was {} before this check.'.format( HydrusData.ConvertTimestampToPrettyTime( timestamp ), HydrusData.BaseTimestampToPrettyTimeDelta( timestamp, just_now_threshold = 0 ) )
return ClientImportFiles.FileImportStatus( CC.STATUS_SUCCESSFUL_BUT_REDUNDANT, hash, mime = mime, note = prefix + note )
return ClientImportFiles.FileImportStatus( CC.STATUS_UNKNOWN, hash )
def _GetHashStatus( self, hash_type, hash, prefix = None ) -> ClientImportFiles.FileImportStatus:
if prefix is None:
prefix = hash_type + ' recognised'
if hash_type == 'sha256':
if not self.modules_hashes.HasHash( hash ):
f = ClientImportFiles.FileImportStatus.STATICGetUnknownStatus()
f.hash = hash
return f
else:
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
else:
try:
hash_id = self.modules_hashes.GetHashIdFromExtraHash( hash_type, hash )
except HydrusExceptions.DataMissing:
return ClientImportFiles.FileImportStatus.STATICGetUnknownStatus()
return self._GetHashIdStatus( hash_id, prefix = prefix )
def _GetIdealClientFilesLocations( self ):
locations_to_ideal_weights = {}
for ( portable_location, weight ) in self._Execute( 'SELECT location, weight FROM ideal_client_files_locations;' ):
abs_location = HydrusPaths.ConvertPortablePathToAbsPath( portable_location )
locations_to_ideal_weights[ abs_location ] = weight
result = self._Execute( 'SELECT location FROM ideal_thumbnail_override_location;' ).fetchone()
if result is None:
abs_ideal_thumbnail_override_location = None
else:
( portable_ideal_thumbnail_override_location, ) = result
abs_ideal_thumbnail_override_location = HydrusPaths.ConvertPortablePathToAbsPath( portable_ideal_thumbnail_override_location )
return ( locations_to_ideal_weights, abs_ideal_thumbnail_override_location )
def _GetMaintenanceDue( self, stop_time ):
jobs_to_do = []
# analyze
names_to_analyze = self.modules_db_maintenance.GetTableNamesDueAnalysis()
if len( names_to_analyze ) > 0:
jobs_to_do.append( 'analyze ' + HydrusData.ToHumanInt( len( names_to_analyze ) ) + ' table_names' )
similar_files_due = self.modules_similar_files.MaintenanceDue()
if similar_files_due:
jobs_to_do.append( 'similar files work' )
return jobs_to_do
def _GetMappingTables( self, tag_display_type, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_key = tag_search_context.service_key
if tag_service_key == CC.COMBINED_TAG_SERVICE_KEY:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = [ self.modules_services.GetServiceId( tag_service_key ) ]
current_tables = []
pending_tables = []
for tag_service_id in tag_service_ids:
if file_service_id == self.modules_services.combined_file_service_id:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
current_tables.append( current_mappings_table_name )
pending_tables.append( pending_mappings_table_name )
else:
if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE:
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
current_tables.append( cache_current_mappings_table_name )
pending_tables.append( cache_pending_mappings_table_name )
elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL:
( cache_current_display_mappings_table_name, cache_pending_display_mappings_table_name ) = ClientDBMappingsCacheSpecificDisplay.GenerateSpecificDisplayMappingsCacheTableNames( file_service_id, tag_service_id )
current_tables.append( cache_current_display_mappings_table_name )
pending_tables.append( cache_pending_display_mappings_table_name )
table_names = []
if tag_search_context.include_current_tags:
table_names.extend( current_tables )
if tag_search_context.include_pending_tags:
table_names.extend( pending_tables )
return table_names
def _GetMappingAndTagTables( self, tag_display_type, file_service_key: bytes, tag_search_context: ClientSearch.TagSearchContext ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_key = tag_search_context.service_key
if tag_service_key == CC.COMBINED_TAG_SERVICE_KEY:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = [ self.modules_services.GetServiceId( tag_service_key ) ]
current_tables = []
pending_tables = []
for tag_service_id in tag_service_ids:
tags_table_name = self.modules_tag_search.GetTagsTableName( file_service_id, tag_service_id )
if file_service_id == self.modules_services.combined_file_service_id:
# yo this does not support tag_display_actual--big tricky problem
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
current_tables.append( ( current_mappings_table_name, tags_table_name ) )
pending_tables.append( ( pending_mappings_table_name, tags_table_name ) )
else:
if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE:
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
current_tables.append( ( cache_current_mappings_table_name, tags_table_name ) )
pending_tables.append( ( cache_pending_mappings_table_name, tags_table_name ) )
elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL:
( cache_current_display_mappings_table_name, cache_pending_display_mappings_table_name ) = ClientDBMappingsCacheSpecificDisplay.GenerateSpecificDisplayMappingsCacheTableNames( file_service_id, tag_service_id )
current_tables.append( ( cache_current_display_mappings_table_name, tags_table_name ) )
pending_tables.append( ( cache_pending_display_mappings_table_name, tags_table_name ) )
table_names = []
if tag_search_context.include_current_tags:
table_names.extend( current_tables )
if tag_search_context.include_pending_tags:
table_names.extend( pending_tables )
return table_names
def _GetMediaPredicates( self, tag_search_context: ClientSearch.TagSearchContext, tags_to_counts, inclusive, job_key = None ):
display_tag_service_id = self.modules_services.GetServiceId( tag_search_context.display_service_key )
max_current_count = None
max_pending_count = None
tag_ids_to_full_counts = {}
showed_bad_tag_error = False
for ( i, ( tag, ( current_count, pending_count ) ) ) in enumerate( tags_to_counts.items() ):
try:
tag_id = self.modules_tags.GetTagId( tag )
except HydrusExceptions.TagSizeException:
if not showed_bad_tag_error:
showed_bad_tag_error = True
HydrusData.ShowText( 'Hey, you seem to have an invalid tag in view right now! Please run the \'repair invalid tags\' routine under the \'database\' menu asap!' )
continue
tag_ids_to_full_counts[ tag_id ] = ( current_count, max_current_count, pending_count, max_pending_count )
if i % 100 == 0:
if job_key is not None and job_key.IsCancelled():
return []
if job_key is not None and job_key.IsCancelled():
return []
predicates = self._GeneratePredicatesFromTagIdsAndCounts( ClientTags.TAG_DISPLAY_ACTUAL, display_tag_service_id, tag_ids_to_full_counts, inclusive, job_key = job_key )
return predicates
def _GetMediaResults( self, hash_ids: typing.Iterable[ int ], sorted = False ):
( cached_media_results, missing_hash_ids ) = self._weakref_media_result_cache.GetMediaResultsAndMissing( hash_ids )
if len( missing_hash_ids ) > 0:
# get first detailed results
missing_hash_ids_to_hashes = self.modules_hashes_local_cache.GetHashIdsToHashes( hash_ids = missing_hash_ids )
with self._MakeTemporaryIntegerTable( missing_hash_ids, 'hash_id' ) as temp_table_name:
# everything here is temp hashes to metadata
hash_ids_to_info = { hash_id : ClientMediaManagers.FileInfoManager( hash_id, missing_hash_ids_to_hashes[ hash_id ], size, mime, width, height, duration, num_frames, has_audio, num_words ) for ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) in self._Execute( 'SELECT * FROM {} CROSS JOIN files_info USING ( hash_id );'.format( temp_table_name ) ) }
( hash_ids_to_current_file_service_ids_and_timestamps,
hash_ids_to_deleted_file_service_ids_and_timestamps,
hash_ids_to_pending_file_service_ids,
hash_ids_to_petitioned_file_service_ids
) = self.modules_files_storage.GetHashIdsToServiceInfoDicts( temp_table_name )
hash_ids_to_urls = HydrusData.BuildKeyToSetDict( self._Execute( 'SELECT hash_id, url FROM {} CROSS JOIN url_map USING ( hash_id ) CROSS JOIN urls USING ( url_id );'.format( temp_table_name ) ) )
hash_ids_to_service_ids_and_filenames = HydrusData.BuildKeyToListDict( ( ( hash_id, ( service_id, filename ) ) for ( hash_id, service_id, filename ) in self._Execute( 'SELECT hash_id, service_id, filename FROM {} CROSS JOIN service_filenames USING ( hash_id );'.format( temp_table_name ) ) ) )
hash_ids_to_local_ratings = HydrusData.BuildKeyToListDict( ( ( hash_id, ( service_id, rating ) ) for ( service_id, hash_id, rating ) in self._Execute( 'SELECT service_id, hash_id, rating FROM {} CROSS JOIN local_ratings USING ( hash_id );'.format( temp_table_name ) ) ) )
hash_ids_to_names_and_notes = HydrusData.BuildKeyToListDict( ( ( hash_id, ( name, note ) ) for ( hash_id, name, note ) in self._Execute( 'SELECT file_notes.hash_id, label, note FROM {} CROSS JOIN file_notes USING ( hash_id ), labels, notes ON ( file_notes.name_id = labels.label_id AND file_notes.note_id = notes.note_id );'.format( temp_table_name ) ) ) )
hash_ids_to_file_viewing_stats = HydrusData.BuildKeyToListDict( ( ( hash_id, ( canvas_type, last_viewed_timestamp, views, viewtime ) ) for ( hash_id, canvas_type, last_viewed_timestamp, views, viewtime ) in self._Execute( 'SELECT hash_id, canvas_type, last_viewed_timestamp, views, viewtime FROM {} CROSS JOIN file_viewing_stats USING ( hash_id );'.format( temp_table_name ) ) ) )
hash_ids_to_file_viewing_stats_managers = { hash_id : ClientMediaManagers.FileViewingStatsManager( file_viewing_stats ) for ( hash_id, file_viewing_stats ) in hash_ids_to_file_viewing_stats.items() }
hash_ids_to_file_modified_timestamps = dict( self._Execute( 'SELECT hash_id, file_modified_timestamp FROM {} CROSS JOIN file_modified_timestamps USING ( hash_id );'.format( temp_table_name ) ) )
hash_ids_to_domain_modified_timestamps = HydrusData.BuildKeyToListDict( ( ( hash_id, ( domain, timestamp ) ) for ( hash_id, domain, timestamp ) in self._Execute( 'SELECT hash_id, domain, file_modified_timestamp FROM {} CROSS JOIN file_domain_modified_timestamps USING ( hash_id ) CROSS JOIN url_domains USING ( domain_id );'.format( temp_table_name ) ) ) )
hash_ids_to_archive_timestamps = dict( self._Execute( 'SELECT hash_id, archived_timestamp FROM {} CROSS JOIN archive_timestamps USING ( hash_id );'.format( temp_table_name ) ) )
hash_ids_to_local_file_deletion_reasons = self.modules_files_storage.GetHashIdsToFileDeletionReasons( temp_table_name )
hash_ids_to_current_file_service_ids = { hash_id : [ file_service_id for ( file_service_id, timestamp ) in file_service_ids_and_timestamps ] for ( hash_id, file_service_ids_and_timestamps ) in hash_ids_to_current_file_service_ids_and_timestamps.items() }
hash_ids_to_tags_managers = self._GetForceRefreshTagsManagersWithTableHashIds( missing_hash_ids, temp_table_name, hash_ids_to_current_file_service_ids = hash_ids_to_current_file_service_ids )
# build it
service_ids_to_service_keys = self.modules_services.GetServiceIdsToServiceKeys()
missing_media_results = []
for hash_id in missing_hash_ids:
tags_manager = hash_ids_to_tags_managers[ hash_id ]
#
current_file_service_keys_to_timestamps = { service_ids_to_service_keys[ service_id ] : timestamp for ( service_id, timestamp ) in hash_ids_to_current_file_service_ids_and_timestamps[ hash_id ] }
deleted_file_service_keys_to_timestamps = { service_ids_to_service_keys[ service_id ] : ( timestamp, original_timestamp ) for ( service_id, timestamp, original_timestamp ) in hash_ids_to_deleted_file_service_ids_and_timestamps[ hash_id ] }
pending_file_service_keys = { service_ids_to_service_keys[ service_id ] for service_id in hash_ids_to_pending_file_service_ids[ hash_id ] }
petitioned_file_service_keys = { service_ids_to_service_keys[ service_id ] for service_id in hash_ids_to_petitioned_file_service_ids[ hash_id ] }
inbox = hash_id in self.modules_files_metadata_basic.inbox_hash_ids
urls = hash_ids_to_urls[ hash_id ]
service_ids_to_filenames = HydrusData.BuildKeyToListDict( hash_ids_to_service_ids_and_filenames[ hash_id ] )
service_keys_to_filenames = { service_ids_to_service_keys[ service_id ] : filenames for ( service_id, filenames ) in list(service_ids_to_filenames.items()) }
timestamp_manager = ClientMediaManagers.TimestampManager()
if hash_id in hash_ids_to_file_modified_timestamps:
timestamp_manager.SetFileModifiedTimestamp( hash_ids_to_file_modified_timestamps[ hash_id ] )
if hash_id in hash_ids_to_domain_modified_timestamps:
for ( domain, modified_timestamp ) in hash_ids_to_domain_modified_timestamps[ hash_id ]:
timestamp_manager.SetDomainModifiedTimestamp( domain, modified_timestamp )
if hash_id in hash_ids_to_archive_timestamps:
timestamp_manager.SetArchivedTimestamp( hash_ids_to_archive_timestamps[ hash_id ] )
if hash_id in hash_ids_to_local_file_deletion_reasons:
local_file_deletion_reason = hash_ids_to_local_file_deletion_reasons[ hash_id ]
else:
local_file_deletion_reason = None
locations_manager = ClientMediaManagers.LocationsManager(
current_file_service_keys_to_timestamps,
deleted_file_service_keys_to_timestamps,
pending_file_service_keys,
petitioned_file_service_keys,
inbox = inbox,
urls = urls,
service_keys_to_filenames = service_keys_to_filenames,
timestamp_manager = timestamp_manager,
local_file_deletion_reason = local_file_deletion_reason
)
#
local_ratings = { service_ids_to_service_keys[ service_id ] : rating for ( service_id, rating ) in hash_ids_to_local_ratings[ hash_id ] }
ratings_manager = ClientMediaManagers.RatingsManager( local_ratings )
#
if hash_id in hash_ids_to_names_and_notes:
names_to_notes = dict( hash_ids_to_names_and_notes[ hash_id ] )
else:
names_to_notes = dict()
notes_manager = ClientMediaManagers.NotesManager( names_to_notes )
#
if hash_id in hash_ids_to_file_viewing_stats_managers:
file_viewing_stats_manager = hash_ids_to_file_viewing_stats_managers[ hash_id ]
else:
file_viewing_stats_manager = ClientMediaManagers.FileViewingStatsManager.STATICGenerateEmptyManager()
#
if hash_id in hash_ids_to_info:
file_info_manager = hash_ids_to_info[ hash_id ]
else:
hash = missing_hash_ids_to_hashes[ hash_id ]
file_info_manager = ClientMediaManagers.FileInfoManager( hash_id, hash )
missing_media_results.append( ClientMediaResult.MediaResult( file_info_manager, tags_manager, locations_manager, ratings_manager, notes_manager, file_viewing_stats_manager ) )
self._weakref_media_result_cache.AddMediaResults( missing_media_results )
cached_media_results.extend( missing_media_results )
media_results = cached_media_results
if sorted:
hash_ids_to_media_results = { media_result.GetHashId() : media_result for media_result in media_results }
media_results = [ hash_ids_to_media_results[ hash_id ] for hash_id in hash_ids if hash_id in hash_ids_to_media_results ]
return media_results
def _GetMediaResultFromHash( self, hash ) -> ClientMediaResult.MediaResult:
media_results = self._GetMediaResultsFromHashes( [ hash ] )
return media_results[0]
def _GetMediaResultsFromHashes( self, hashes: typing.Collection[ bytes ], sorted: bool = False ) -> typing.List[ ClientMediaResult.MediaResult ]:
query_hash_ids = set( self.modules_hashes_local_cache.GetHashIds( hashes ) )
media_results = self._GetMediaResults( query_hash_ids )
if sorted:
if len( hashes ) > len( query_hash_ids ):
hashes = HydrusData.DedupeList( hashes )
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
media_results = [ hashes_to_media_results[ hash ] for hash in hashes if hash in hashes_to_media_results ]
return media_results
def _GetNumsPending( self ):
services = self.modules_services.GetServices( ( HC.TAG_REPOSITORY, HC.FILE_REPOSITORY, HC.IPFS ) )
pendings = {}
for service in services:
service_key = service.GetServiceKey()
service_type = service.GetServiceType()
service_id = self.modules_services.GetServiceId( service_key )
info_types = set()
if service_type in ( HC.FILE_REPOSITORY, HC.IPFS ):
info_types = { HC.SERVICE_INFO_NUM_PENDING_FILES, HC.SERVICE_INFO_NUM_PETITIONED_FILES }
elif service_type == HC.TAG_REPOSITORY:
info_types = { HC.SERVICE_INFO_NUM_PENDING_MAPPINGS, HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS, HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS, HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS, HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS, HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS }
pendings[ service_key ] = self._GetServiceInfoSpecific( service_id, service_type, info_types )
return pendings
def _GetOptions( self ):
result = self._Execute( 'SELECT options FROM options;' ).fetchone()
if result is None:
options = ClientDefaults.GetClientDefaultOptions()
self._Execute( 'INSERT INTO options ( options ) VALUES ( ? );', ( options, ) )
else:
( options, ) = result
default_options = ClientDefaults.GetClientDefaultOptions()
for key in default_options:
if key not in options: options[ key ] = default_options[ key ]
return options
def _GetPending( self, service_key, content_types ):
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
if service_type in HC.REPOSITORIES:
account = service.GetAccount()
client_to_server_update = HydrusNetwork.ClientToServerUpdate()
if service_type == HC.TAG_REPOSITORY:
if HC.CONTENT_TYPE_MAPPINGS in content_types:
if account.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_CREATE ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( service_id )
pending_dict = HydrusData.BuildKeyToListDict( self._Execute( 'SELECT tag_id, hash_id FROM ' + pending_mappings_table_name + ' ORDER BY tag_id LIMIT 100;' ) )
pending_mapping_ids = list( pending_dict.items() )
# dealing with a scary situation when (due to some bug) mappings are current and pending. they get uploaded, but the content update makes no changes, so we cycle infitely!
addable_pending_mapping_ids = self._FilterExistingUpdateMappings( service_id, pending_mapping_ids, HC.CONTENT_UPDATE_ADD )
pending_mapping_weight = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in pending_mapping_ids ) )
addable_pending_mapping_weight = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in addable_pending_mapping_ids ) )
if pending_mapping_weight != addable_pending_mapping_weight:
message = 'Hey, while going through the pending tags to upload, it seemed some were simultaneously already in the \'current\' state. This looks like a bug.'
message += os.linesep * 2
message += 'Please run _database->check and repair->fix logically inconsistent mappings_. If everything seems good after that and you do not get this message again, you should be all fixed. If not, you may need to regenerate your mappings storage cache under the \'database\' menu. If that does not work, hydev would like to know about it!'
HydrusData.ShowText( message )
raise HydrusExceptions.VetoException( 'Logically inconsistent mappings detected!' )
for ( tag_id, hash_ids ) in pending_mapping_ids:
tag = self.modules_tags_local_cache.GetTag( tag_id )
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPINGS, ( tag, hashes ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PEND, content )
if account.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_PETITION ):
petitioned_dict = HydrusData.BuildKeyToListDict( [ ( ( tag_id, reason_id ), hash_id ) for ( tag_id, hash_id, reason_id ) in self._Execute( 'SELECT tag_id, hash_id, reason_id FROM ' + petitioned_mappings_table_name + ' ORDER BY reason_id LIMIT 100;' ) ] )
petitioned_mapping_ids = list( petitioned_dict.items() )
# dealing with a scary situation when (due to some bug) mappings are deleted and petitioned. they get uploaded, but the content update makes no changes, so we cycle infitely!
deletable_and_petitioned_mappings = self._FilterExistingUpdateMappings(
service_id,
[ ( tag_id, hash_ids ) for ( ( tag_id, reason_id ), hash_ids ) in petitioned_mapping_ids ],
HC.CONTENT_UPDATE_DELETE
)
petitioned_mapping_weight = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in petitioned_mapping_ids ) )
deletable_petitioned_mapping_weight = sum( ( len( hash_ids ) for ( tag_id, hash_ids ) in deletable_and_petitioned_mappings ) )
if petitioned_mapping_weight != deletable_petitioned_mapping_weight:
message = 'Hey, while going through the petitioned tags to upload, it seemed some were simultaneously already in the \'deleted\' state. This looks like a bug.'
message += os.linesep * 2
message += 'Please run _database->check and repair->fix logically inconsistent mappings_. If everything seems good after that and you do not get this message again, you should be all fixed. If not, you may need to regenerate your mappings storage cache under the \'database\' menu. If that does not work, hydev would like to know about it!'
HydrusData.ShowText( message )
raise HydrusExceptions.VetoException( 'Logically inconsistent mappings detected!' )
for ( ( tag_id, reason_id ), hash_ids ) in petitioned_mapping_ids:
tag = self.modules_tags_local_cache.GetTag( tag_id )
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPINGS, ( tag, hashes ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PETITION, content, reason )
if HC.CONTENT_TYPE_TAG_PARENTS in content_types:
if account.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_PETITION ):
pending = self._Execute( 'SELECT child_tag_id, parent_tag_id, reason_id FROM tag_parent_petitions WHERE service_id = ? AND status = ? ORDER BY reason_id LIMIT 1;', ( service_id, HC.CONTENT_STATUS_PENDING ) ).fetchall()
for ( child_tag_id, parent_tag_id, reason_id ) in pending:
child_tag = self.modules_tags_local_cache.GetTag( child_tag_id )
parent_tag = self.modules_tags_local_cache.GetTag( parent_tag_id )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_PARENTS, ( child_tag, parent_tag ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PEND, content, reason )
petitioned = self._Execute( 'SELECT child_tag_id, parent_tag_id, reason_id FROM tag_parent_petitions WHERE service_id = ? AND status = ? ORDER BY reason_id LIMIT 100;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) ).fetchall()
for ( child_tag_id, parent_tag_id, reason_id ) in petitioned:
child_tag = self.modules_tags_local_cache.GetTag( child_tag_id )
parent_tag = self.modules_tags_local_cache.GetTag( parent_tag_id )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_PARENTS, ( child_tag, parent_tag ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PETITION, content, reason )
if HC.CONTENT_TYPE_TAG_SIBLINGS in content_types:
if account.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_PETITION ):
pending = self._Execute( 'SELECT bad_tag_id, good_tag_id, reason_id FROM tag_sibling_petitions WHERE service_id = ? AND status = ? ORDER BY reason_id LIMIT 100;', ( service_id, HC.CONTENT_STATUS_PENDING ) ).fetchall()
for ( bad_tag_id, good_tag_id, reason_id ) in pending:
bad_tag = self.modules_tags_local_cache.GetTag( bad_tag_id )
good_tag = self.modules_tags_local_cache.GetTag( good_tag_id )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_SIBLINGS, ( bad_tag, good_tag ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PEND, content, reason )
petitioned = self._Execute( 'SELECT bad_tag_id, good_tag_id, reason_id FROM tag_sibling_petitions WHERE service_id = ? AND status = ? ORDER BY reason_id LIMIT 100;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) ).fetchall()
for ( bad_tag_id, good_tag_id, reason_id ) in petitioned:
bad_tag = self.modules_tags_local_cache.GetTag( bad_tag_id )
good_tag = self.modules_tags_local_cache.GetTag( good_tag_id )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_SIBLINGS, ( bad_tag, good_tag ) )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PETITION, content, reason )
elif service_type == HC.FILE_REPOSITORY:
if HC.CONTENT_TYPE_FILES in content_types:
if account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_CREATE ):
result = self.modules_files_storage.GetAPendingHashId( service_id )
if result is not None:
hash_id = result
media_result = self._GetMediaResults( ( hash_id, ) )[ 0 ]
return media_result
if account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_PETITION ):
petitioned_rows = self.modules_files_storage.GetSomePetitionedRows( service_id )
for ( reason_id, hash_ids ) in petitioned_rows:
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
reason = self.modules_texts.GetText( reason_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_FILES, hashes )
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PETITION, content, reason )
if client_to_server_update.HasContent():
return client_to_server_update
elif service_type == HC.IPFS:
result = self.modules_files_storage.GetAPendingHashId( service_id )
if result is not None:
hash_id = result
media_result = self._GetMediaResults( ( hash_id, ) )[ 0 ]
return media_result
while True:
result = self.modules_files_storage.GetAPetitionedHashId( service_id )
if result is None:
break
else:
hash_id = result
hash = self.modules_hashes_local_cache.GetHash( hash_id )
try:
multihash = self._GetServiceFilename( service_id, hash_id )
except HydrusExceptions.DataMissing:
# somehow this file exists in ipfs (or at least is petitioned), but there is no multihash.
# this is probably due to a legacy sync issue
# so lets just process that now and continue
# in future we'll have ipfs service sync to repopulate missing filenames
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, ( hash, ) )
service_keys_to_content_updates = { service_key : [ content_update ] }
self._ProcessContentUpdates( service_keys_to_content_updates )
continue
return ( hash, multihash )
return None
def _GetPossibleAdditionalDBFilenames( self ):
paths = HydrusDB.HydrusDB._GetPossibleAdditionalDBFilenames( self )
paths.append( 'mpv.conf' )
return paths
def _GetRecentTags( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
tag_ids_to_timestamp = { tag_id : timestamp for ( tag_id, timestamp ) in self._Execute( 'SELECT tag_id, timestamp FROM recent_tags WHERE service_id = ?;', ( service_id, ) ) }
def sort_key( key ):
return tag_ids_to_timestamp[ key ]
newest_first = list(tag_ids_to_timestamp.keys())
newest_first.sort( key = sort_key, reverse = True )
num_we_want = HG.client_controller.new_options.GetNoneableInteger( 'num_recent_tags' )
if num_we_want == None:
num_we_want = 20
decayed = newest_first[ num_we_want : ]
if len( decayed ) > 0:
self._ExecuteMany( 'DELETE FROM recent_tags WHERE service_id = ? AND tag_id = ?;', ( ( service_id, tag_id ) for tag_id in decayed ) )
sorted_recent_tag_ids = newest_first[ : num_we_want ]
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = sorted_recent_tag_ids )
sorted_recent_tags = [ tag_ids_to_tags[ tag_id ] for tag_id in sorted_recent_tag_ids ]
return sorted_recent_tags
def _GetRelatedTags( self, service_key, skip_hash, search_tags, max_results, max_time_to_take ):
stop_time_for_finding_files = HydrusData.GetNowPrecise() + ( max_time_to_take / 2 )
stop_time_for_finding_tags = HydrusData.GetNowPrecise() + ( max_time_to_take / 2 )
service_id = self.modules_services.GetServiceId( service_key )
skip_hash_id = self.modules_hashes_local_cache.GetHashId( skip_hash )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( service_id )
tag_ids = [ self.modules_tags.GetTagId( tag ) for tag in search_tags ]
random.shuffle( tag_ids )
hash_ids_counter = collections.Counter()
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_table_name:
# temp tags to mappings
cursor = self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_table_name, current_mappings_table_name ) )
cancelled_hook = lambda: HydrusData.TimeHasPassedPrecise( stop_time_for_finding_files )
for ( hash_id, ) in HydrusDB.ReadFromCancellableCursor( cursor, 128, cancelled_hook = cancelled_hook ):
hash_ids_counter[ hash_id ] += 1
if skip_hash_id in hash_ids_counter:
del hash_ids_counter[ skip_hash_id ]
#
if len( hash_ids_counter ) == 0:
return []
# this stuff is often 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1.....
# the 1 stuff often produces large quantities of the same very popular tag, so your search for [ 'eva', 'female' ] will produce 'touhou' because so many 2hu images have 'female'
# so we want to do a 'soft' intersect, only picking the files that have the greatest number of shared search_tags
# this filters to only the '2' results, which gives us eva females and their hair colour and a few choice other popular tags for that particular domain
[ ( gumpf, largest_count ) ] = hash_ids_counter.most_common( 1 )
hash_ids = [ hash_id for ( hash_id, current_count ) in hash_ids_counter.items() if current_count > largest_count * 0.8 ]
counter = collections.Counter()
random.shuffle( hash_ids )
for hash_id in hash_ids:
for tag_id in self._STI( self._Execute( 'SELECT tag_id FROM ' + current_mappings_table_name + ' WHERE hash_id = ?;', ( hash_id, ) ) ):
counter[ tag_id ] += 1
if HydrusData.TimeHasPassedPrecise( stop_time_for_finding_tags ):
break
#
for tag_id in tag_ids:
if tag_id in counter:
del counter[ tag_id ]
results = counter.most_common( max_results )
inclusive = True
pending_count = 0
tag_ids_to_full_counts = { tag_id : ( current_count, None, pending_count, None ) for ( tag_id, current_count ) in results }
predicates = self._GeneratePredicatesFromTagIdsAndCounts( ClientTags.TAG_DISPLAY_STORAGE, service_id, tag_ids_to_full_counts, inclusive )
return predicates
def _GetRepositoryThumbnailHashesIDoNotHave( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( service_id, HC.CONTENT_STATUS_CURRENT )
needed_hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} NATURAL JOIN files_info WHERE mime IN {} EXCEPT SELECT hash_id FROM remote_thumbnails WHERE service_id = ?;'.format( current_files_table_name, HydrusData.SplayListForDB( HC.MIMES_WITH_THUMBNAILS ) ), ( service_id, ) ) )
needed_hashes = []
client_files_manager = HG.client_controller.client_files_manager
for hash_id in needed_hash_ids:
hash = self.modules_hashes_local_cache.GetHash( hash_id )
if client_files_manager.LocklessHasThumbnail( hash ):
self._Execute( 'INSERT OR IGNORE INTO remote_thumbnails ( service_id, hash_id ) VALUES ( ?, ? );', ( service_id, hash_id ) )
else:
needed_hashes.append( hash )
if len( needed_hashes ) == 10000:
return needed_hashes
return needed_hashes
def _GetServiceDirectoryHashes( self, service_key, dirname ):
service_id = self.modules_services.GetServiceId( service_key )
directory_id = self.modules_texts.GetTextId( dirname )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM service_directory_file_map WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) ) )
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
return hashes
def _GetServiceDirectoriesInfo( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
incomplete_info = self._Execute( 'SELECT directory_id, num_files, total_size, note FROM service_directories WHERE service_id = ?;', ( service_id, ) ).fetchall()
info = [ ( self.modules_texts.GetText( directory_id ), num_files, total_size, note ) for ( directory_id, num_files, total_size, note ) in incomplete_info ]
return info
def _GetServiceFilename( self, service_id, hash_id ):
result = self._Execute( 'SELECT filename FROM service_filenames WHERE service_id = ? AND hash_id = ?;', ( service_id, hash_id ) ).fetchone()
if result is None:
raise HydrusExceptions.DataMissing( 'Service filename not found!' )
( filename, ) = result
return filename
def _GetServiceFilenames( self, service_key, hashes ):
service_id = self.modules_services.GetServiceId( service_key )
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
result = sorted( ( filename for ( filename, ) in self._Execute( 'SELECT filename FROM service_filenames WHERE service_id = ? AND hash_id IN ' + HydrusData.SplayListForDB( hash_ids ) + ';', ( service_id, ) ) ) )
return result
def _GetServiceInfo( self, service_key ):
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
if service_type in ( HC.COMBINED_LOCAL_FILE, HC.LOCAL_FILE_DOMAIN, HC.FILE_REPOSITORY ):
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_TOTAL_SIZE, HC.SERVICE_INFO_NUM_DELETED_FILES }
elif service_type == HC.LOCAL_FILE_TRASH_DOMAIN:
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_TOTAL_SIZE }
elif service_type == HC.IPFS:
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_TOTAL_SIZE }
elif service_type == HC.LOCAL_TAG:
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_TAGS, HC.SERVICE_INFO_NUM_MAPPINGS }
elif service_type == HC.TAG_REPOSITORY:
info_types = { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_TAGS, HC.SERVICE_INFO_NUM_MAPPINGS, HC.SERVICE_INFO_NUM_DELETED_MAPPINGS }
elif service_type in ( HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ):
info_types = { HC.SERVICE_INFO_NUM_FILES }
elif service_type == HC.LOCAL_BOORU:
info_types = { HC.SERVICE_INFO_NUM_SHARES }
else:
info_types = set()
service_info = self._GetServiceInfoSpecific( service_id, service_type, info_types )
return service_info
def _GetServiceInfoSpecific( self, service_id, service_type, info_types, calculate_missing = True ):
info_types = set( info_types )
results = { info_type : info for ( info_type, info ) in self._Execute( 'SELECT info_type, info FROM service_info WHERE service_id = ? AND info_type IN ' + HydrusData.SplayListForDB( info_types ) + ';', ( service_id, ) ) }
if len( results ) != len( info_types ) and calculate_missing:
info_types_hit = list( results.keys() )
info_types_missed = info_types.difference( info_types_hit )
for info_type in info_types_missed:
info = None
result = None
save_it = True
if service_type in HC.FILE_SERVICES:
if info_type in ( HC.SERVICE_INFO_NUM_PENDING_FILES, HC.SERVICE_INFO_NUM_PETITIONED_FILES ):
save_it = False
if info_type == HC.SERVICE_INFO_NUM_FILES:
info = self.modules_files_storage.GetCurrentFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_VIEWABLE_FILES:
info = self.modules_files_storage.GetCurrentFilesCount( service_id, only_viewable = True )
elif info_type == HC.SERVICE_INFO_TOTAL_SIZE:
info = self.modules_files_storage.GetCurrentFilesTotalSize( service_id )
elif info_type == HC.SERVICE_INFO_NUM_DELETED_FILES:
info = self.modules_files_storage.GetDeletedFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_PENDING_FILES:
info = self.modules_files_storage.GetPendingFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_PETITIONED_FILES:
info = self.modules_files_storage.GetPetitionedFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_INBOX:
info = self.modules_files_storage.GetCurrentFilesInboxCount( service_id )
elif service_type in HC.REAL_TAG_SERVICES:
if info_type in ( HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS, HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS, HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS, HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS ):
save_it = False
if info_type == HC.SERVICE_INFO_NUM_FILES:
info = self.modules_mappings_storage.GetCurrentFilesCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_TAGS:
info = self.modules_tag_search.GetTagCount( self.modules_services.combined_file_service_id, service_id )
elif info_type == HC.SERVICE_INFO_NUM_MAPPINGS:
info = self.modules_mappings_counts.GetTotalCurrentCount( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, service_id )
elif info_type == HC.SERVICE_INFO_NUM_PENDING_MAPPINGS:
# since pending is nearly always far smaller rowcount than current, if I pull this from a/c table, it is a HUGE waste of time and not faster than counting the raw table rows!
info = self.modules_mappings_storage.GetPendingMappingsCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_DELETED_MAPPINGS:
# since pending is nearly always far smaller rowcount than current, if I pull this from a/c table, it is a HUGE waste of time and not faster than counting the raw table rows!
info = self.modules_mappings_storage.GetDeletedMappingsCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS:
# since pending is nearly always far smaller rowcount than current, if I pull this from a/c table, it is a HUGE waste of time and not faster than counting the raw table rows!
info = self.modules_mappings_storage.GetPetitionedMappingsCount( service_id )
elif info_type == HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM tag_sibling_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PENDING ) ).fetchone()
elif info_type == HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM tag_sibling_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) ).fetchone()
elif info_type == HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM tag_parent_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PENDING ) ).fetchone()
elif info_type == HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM tag_parent_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) ).fetchone()
elif service_type in ( HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ):
if info_type == HC.SERVICE_INFO_NUM_FILES:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM local_ratings WHERE service_id = ?;', ( service_id, ) ).fetchone()
elif service_type == HC.LOCAL_BOORU:
if info_type == HC.SERVICE_INFO_NUM_SHARES:
( info, ) = self._Execute( 'SELECT COUNT( * ) FROM yaml_dumps WHERE dump_type = ?;', ( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU, ) ).fetchone()
if info is None:
info = 0
if save_it:
self._Execute( 'INSERT INTO service_info ( service_id, info_type, info ) VALUES ( ?, ?, ? );', ( service_id, info_type, info ) )
results[ info_type ] = info
return results
def _GetSiteId( self, name ):
result = self._Execute( 'SELECT site_id FROM imageboard_sites WHERE name = ?;', ( name, ) ).fetchone()
if result is None:
self._Execute( 'INSERT INTO imageboard_sites ( name ) VALUES ( ? );', ( name, ) )
site_id = self._GetLastRowId()
else:
( site_id, ) = result
return site_id
def _GetTagIdsFromNamespaceIds( self, leaf: ClientDBServices.FileSearchContextLeaf, namespace_ids: typing.Collection[ int ], job_key = None ):
if len( namespace_ids ) == 0:
return set()
final_result_tag_ids = set()
with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
tags_table_name = self.modules_tag_search.GetTagsTableName( leaf.file_service_id, leaf.tag_service_id )
if len( namespace_ids ) == 1:
( namespace_id, ) = namespace_ids
cursor = self._Execute( 'SELECT tag_id FROM {} WHERE namespace_id = ?;'.format( tags_table_name ), ( namespace_id, ) )
else:
# temp namespaces to tags
cursor = self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( namespace_id );'.format( temp_namespace_ids_table_name, tags_table_name ) )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
result_tag_ids = self._STS( HydrusDB.ReadFromCancellableCursor( cursor, 128, cancelled_hook = cancelled_hook ) )
if job_key is not None:
if job_key.IsCancelled():
return set()
final_result_tag_ids.update( result_tag_ids )
return final_result_tag_ids
def _GetTagIdsFromNamespaceIdsSubtagIds( self, file_service_id: int, tag_service_id: int, namespace_ids: typing.Collection[ int ], subtag_ids: typing.Collection[ int ], job_key = None ):
if len( namespace_ids ) == 0 or len( subtag_ids ) == 0:
return set()
with self._MakeTemporaryIntegerTable( subtag_ids, 'subtag_id' ) as temp_subtag_ids_table_name:
with self._MakeTemporaryIntegerTable( namespace_ids, 'namespace_id' ) as temp_namespace_ids_table_name:
return self._GetTagIdsFromNamespaceIdsSubtagIdsTables( file_service_id, tag_service_id, temp_namespace_ids_table_name, temp_subtag_ids_table_name, job_key = job_key )
def _GetTagIdsFromNamespaceIdsSubtagIdsTables( self, file_service_id: int, tag_service_id: int, namespace_ids_table_name: str, subtag_ids_table_name: str, job_key = None ):
final_result_tag_ids = set()
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( tag_service_id, )
for search_tag_service_id in search_tag_service_ids:
tags_table_name = self.modules_tag_search.GetTagsTableName( file_service_id, search_tag_service_id )
# temp subtags to tags to temp namespaces
cursor = self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( subtag_id ) CROSS JOIN {} USING ( namespace_id );'.format( subtag_ids_table_name, tags_table_name, namespace_ids_table_name ) )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
result_tag_ids = self._STS( HydrusDB.ReadFromCancellableCursor( cursor, 128, cancelled_hook = cancelled_hook ) )
if job_key is not None:
if job_key.IsCancelled():
return set()
final_result_tag_ids.update( result_tag_ids )
return final_result_tag_ids
def _GetTagIdsFromSubtagIds( self, file_service_id: int, tag_service_id: int, subtag_ids: typing.Collection[ int ], job_key = None ):
if len( subtag_ids ) == 0:
return set()
with self._MakeTemporaryIntegerTable( subtag_ids, 'subtag_id' ) as temp_subtag_ids_table_name:
return self._GetTagIdsFromSubtagIdsTable( file_service_id, tag_service_id, temp_subtag_ids_table_name, job_key = job_key )
def _GetTagIdsFromSubtagIdsTable( self, file_service_id: int, tag_service_id: int, subtag_ids_table_name: str, job_key = None ):
final_result_tag_ids = set()
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( tag_service_id, )
for search_tag_service_id in search_tag_service_ids:
tags_table_name = self.modules_tag_search.GetTagsTableName( file_service_id, search_tag_service_id )
# temp subtags to tags
cursor = self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( subtag_id );'.format( subtag_ids_table_name, tags_table_name ) )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
result_tag_ids = self._STS( HydrusDB.ReadFromCancellableCursor( cursor, 128, cancelled_hook = cancelled_hook ) )
if job_key is not None:
if job_key.IsCancelled():
return set()
final_result_tag_ids.update( result_tag_ids )
return final_result_tag_ids
def _GetTrashHashes( self, limit = None, minimum_age = None ):
if limit is None:
limit_phrase = ''
else:
limit_phrase = ' LIMIT ' + str( limit )
if minimum_age is None:
age_phrase = ' ORDER BY timestamp ASC' # when deleting until trash is small enough, let's delete oldest first
else:
timestamp_cutoff = HydrusData.GetNow() - minimum_age
age_phrase = ' WHERE timestamp < ' + str( timestamp_cutoff )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.trash_service_id, HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {}{}{};'.format( current_files_table_name, age_phrase, limit_phrase ) ) )
hash_ids = self._FilterForFileDeleteLock( self.modules_services.trash_service_id, hash_ids )
if HG.db_report_mode:
message = 'When asked for '
if limit is None:
message += 'all the'
else:
message += 'at most ' + HydrusData.ToHumanInt( limit )
message += ' trash files,'
if minimum_age is not None:
message += ' with minimum age ' + ClientData.TimestampToPrettyTimeDelta( timestamp_cutoff, just_now_threshold = 0 ) + ','
message += ' I found ' + HydrusData.ToHumanInt( len( hash_ids ) ) + '.'
HydrusData.ShowText( message )
return self.modules_hashes_local_cache.GetHashes( hash_ids )
def _GetURLStatuses( self, url ) -> typing.List[ ClientImportFiles.FileImportStatus ]:
search_urls = ClientNetworkingFunctions.GetSearchURLs( url )
hash_ids = set()
for search_url in search_urls:
results = self._STS( self._Execute( 'SELECT hash_id FROM url_map NATURAL JOIN urls WHERE url = ?;', ( search_url, ) ) )
hash_ids.update( results )
try:
results = [ self._GetHashIdStatus( hash_id, prefix = 'url recognised' ) for hash_id in hash_ids ]
except:
return []
return results
def _GetWithAndWithoutTagsForFilesFileCount( self, status, tag_service_id, with_these_tag_ids, without_these_tag_ids, hash_ids, hash_ids_table_name, file_service_ids_to_hash_ids ):
count = 0
with self._MakeTemporaryIntegerTable( with_these_tag_ids, 'tag_id' ) as temp_with_these_tag_ids_table_name:
with self._MakeTemporaryIntegerTable( without_these_tag_ids, 'tag_id' ) as temp_without_these_tag_ids_table_name:
for ( file_service_id, batch_of_hash_ids ) in file_service_ids_to_hash_ids.items():
if len( batch_of_hash_ids ) == len( hash_ids ):
subcount = self._GetWithAndWithoutTagsForFilesFileCountFileService( status, file_service_id, tag_service_id, with_these_tag_ids, temp_with_these_tag_ids_table_name, without_these_tag_ids, temp_without_these_tag_ids_table_name, hash_ids, hash_ids_table_name )
else:
with self._MakeTemporaryIntegerTable( batch_of_hash_ids, 'hash_id' ) as temp_batch_hash_ids_table_name:
subcount = self._GetWithAndWithoutTagsForFilesFileCountFileService( status, file_service_id, tag_service_id, with_these_tag_ids, temp_with_these_tag_ids_table_name, without_these_tag_ids, temp_without_these_tag_ids_table_name, batch_of_hash_ids, temp_batch_hash_ids_table_name )
count += subcount
return count
def _GetWithAndWithoutTagsForFilesFileCountFileService( self, status, file_service_id, tag_service_id, with_these_tag_ids, with_these_tag_ids_table_name, without_these_tag_ids, without_these_tag_ids_table_name, hash_ids, hash_ids_table_name ):
statuses_to_table_names = self.modules_mappings_storage.GetFastestStorageMappingTableNames( file_service_id, tag_service_id )
( current_with_tag_ids, current_with_tag_ids_weight, pending_with_tag_ids, pending_with_tag_ids_weight ) = self.modules_mappings_counts.GetCurrentPendingPositiveCountsAndWeights( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, with_these_tag_ids, tag_ids_table_name = with_these_tag_ids_table_name )
( current_without_tag_ids, current_without_tag_ids_weight, pending_without_tag_ids, pending_without_tag_ids_weight ) = self.modules_mappings_counts.GetCurrentPendingPositiveCountsAndWeights( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, without_these_tag_ids, tag_ids_table_name = without_these_tag_ids_table_name )
mappings_table_name = statuses_to_table_names[ status ]
if status == HC.CONTENT_STATUS_CURRENT:
with_tag_ids = current_with_tag_ids
with_tag_ids_weight = current_with_tag_ids_weight
without_tag_ids = current_without_tag_ids
without_tag_ids_weight = current_without_tag_ids_weight
elif status == HC.CONTENT_STATUS_PENDING:
with_tag_ids = pending_with_tag_ids
with_tag_ids_weight = pending_with_tag_ids_weight
without_tag_ids = pending_without_tag_ids
without_tag_ids_weight = pending_without_tag_ids_weight
if with_tag_ids_weight == 0:
return 0
hash_ids_weight = len( hash_ids )
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_with_tag_ids_table_name:
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_without_tag_ids_table_name:
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( hash_ids_weight, with_tag_ids_weight ):
select_with_weight = hash_ids_weight
else:
select_with_weight = with_tag_ids_weight
if len( with_tag_ids ) == 1:
( with_tag_id, ) = with_tag_ids
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( hash_ids_weight, with_tag_ids_weight ):
select_with_hash_ids_on_storage = 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = {}'.format( hash_ids_table_name, mappings_table_name, with_tag_id )
else:
select_with_hash_ids_on_storage = 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) WHERE tag_id = {}'.format( mappings_table_name, hash_ids_table_name, with_tag_id )
else:
self._ExecuteMany( 'INSERT INTO {} ( tag_id ) VALUES ( ? );'.format( temp_with_tag_ids_table_name ), ( ( with_tag_id, ) for with_tag_id in with_tag_ids ) )
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( hash_ids_weight, with_tag_ids_weight ):
select_with_hash_ids_on_storage = 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( hash_id ) CROSS JOIN {} USING ( tag_id )'.format( hash_ids_table_name, mappings_table_name, temp_with_tag_ids_table_name )
else:
select_with_hash_ids_on_storage = 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( tag_id ) CROSS JOIN {} USING ( hash_id )'.format( temp_with_tag_ids_table_name, mappings_table_name, hash_ids_table_name )
if without_tag_ids_weight == 0:
table_phrase = '({})'.format( select_with_hash_ids_on_storage )
else:
if len( without_tag_ids ) == 1:
( without_tag_id, ) = without_tag_ids
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( select_with_weight, without_tag_ids_weight ):
hash_id_not_in_storage_without = 'NOT EXISTS ( SELECT 1 FROM {} as mt2 WHERE mt1.hash_id = mt2.hash_id and tag_id = {} )'.format( mappings_table_name, without_tag_id )
else:
hash_id_not_in_storage_without = 'hash_id NOT IN ( SELECT hash_id FROM {} WHERE tag_id = {} )'.format( mappings_table_name, without_tag_id )
else:
self._ExecuteMany( 'INSERT INTO {} ( tag_id ) VALUES ( ? );'.format( temp_without_tag_ids_table_name ), ( ( without_tag_id, ) for without_tag_id in without_tag_ids ) )
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( select_with_weight, without_tag_ids_weight ):
hash_id_not_in_storage_without = 'NOT EXISTS ( SELECT 1 FROM {} as mt2 CROSS JOIN {} USING ( tag_id ) WHERE mt1.hash_id = mt2.hash_id )'.format( mappings_table_name, temp_without_tag_ids_table_name )
else:
hash_id_not_in_storage_without = 'hash_id NOT IN ( SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( tag_id ) )'.format( temp_without_tag_ids_table_name, mappings_table_name )
table_phrase = '({}) as mt1 WHERE {}'.format( select_with_hash_ids_on_storage, hash_id_not_in_storage_without )
query = 'SELECT COUNT ( * ) FROM {};'.format( table_phrase )
( count, ) = self._Execute( query ).fetchone()
return count
def _GetWithAndWithoutTagsFileCountCombined( self, tag_service_id, with_these_tag_ids, without_these_tag_ids ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
statuses_to_count = collections.Counter()
( current_with_tag_ids, current_with_tag_ids_weight, pending_with_tag_ids, pending_with_tag_ids_weight ) = self.modules_mappings_counts.GetCurrentPendingPositiveCountsAndWeights( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, with_these_tag_ids )
( current_without_tag_ids, current_without_tag_ids_weight, pending_without_tag_ids, pending_without_tag_ids_weight ) = self.modules_mappings_counts.GetCurrentPendingPositiveCountsAndWeights( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, without_these_tag_ids )
jobs = []
jobs.append( ( HC.CONTENT_STATUS_CURRENT, current_mappings_table_name, current_with_tag_ids, current_with_tag_ids_weight, current_without_tag_ids, current_without_tag_ids_weight ) )
jobs.append( ( HC.CONTENT_STATUS_PENDING, pending_mappings_table_name, pending_with_tag_ids, pending_with_tag_ids_weight, pending_without_tag_ids, pending_without_tag_ids_weight ) )
for ( status, mappings_table_name, with_tag_ids, with_tag_ids_weight, without_tag_ids, without_tag_ids_weight ) in jobs:
if with_tag_ids_weight == 0:
continue
if without_tag_ids_weight == 0 and len( with_tag_ids ) == 1:
statuses_to_count[ status ] = with_tag_ids_weight
continue
if len( with_tag_ids ) > 1:
with_tag_ids_weight = int( with_tag_ids_weight * 0.75 )
# ultimately here, we are doing "delete all display mappings with hash_ids that have a storage mapping for a removee tag and no storage mappings for a keep tag
# in order to reduce overhead, we go full meme and do a bunch of different situations
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_with_tag_ids_table_name:
with self._MakeTemporaryIntegerTable( [], 'tag_id' ) as temp_without_tag_ids_table_name:
if len( with_tag_ids ) == 1:
( with_tag_id, ) = with_tag_ids
select_with_hash_ids_on_storage = 'SELECT hash_id FROM {} WHERE tag_id = {}'.format( mappings_table_name, with_tag_id )
else:
self._ExecuteMany( 'INSERT INTO {} ( tag_id ) VALUES ( ? );'.format( temp_with_tag_ids_table_name ), ( ( with_tag_id, ) for with_tag_id in with_tag_ids ) )
# temp tags to mappings
select_with_hash_ids_on_storage = 'SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( tag_id )'.format( temp_with_tag_ids_table_name, mappings_table_name )
if without_tag_ids_weight == 0:
table_phrase = '({})'.format( select_with_hash_ids_on_storage )
else:
# WARNING, WARNING: Big Brain Query, potentially great/awful
# note that in the 'clever/file join' situation, the number of total mappings is many, but we are deleting a few
# we want to precisely scan the status of the potential hashes to delete, not scan through them all to see what not to do
# therefore, we do NOT EXISTS, which just scans the parts, rather than NOT IN, which does the whole query and then checks against all results
if len( without_tag_ids ) == 1:
( without_tag_id, ) = without_tag_ids
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( with_tag_ids_weight, without_tag_ids_weight ):
hash_id_not_in_storage_without = 'NOT EXISTS ( SELECT 1 FROM {} as mt2 WHERE mt1.hash_id = mt2.hash_id and tag_id = {} )'.format( mappings_table_name, without_tag_id )
else:
hash_id_not_in_storage_without = 'hash_id NOT IN ( SELECT hash_id FROM {} WHERE tag_id = {} )'.format( mappings_table_name, without_tag_id )
else:
self._ExecuteMany( 'INSERT INTO {} ( tag_id ) VALUES ( ? );'.format( temp_without_tag_ids_table_name ), ( ( without_tag_id, ) for without_tag_id in without_tag_ids ) )
if ClientDBMappingsStorage.DoingAFileJoinTagSearchIsFaster( with_tag_ids_weight, without_tag_ids_weight ):
# (files to) mappings to temp tags
hash_id_not_in_storage_without = 'NOT EXISTS ( SELECT 1 FROM {} as mt2 CROSS JOIN {} USING ( tag_id ) WHERE mt1.hash_id = mt2.hash_id )'.format( mappings_table_name, temp_without_tag_ids_table_name )
else:
# temp tags to mappings
hash_id_not_in_storage_without = 'hash_id NOT IN ( SELECT DISTINCT hash_id FROM {} CROSS JOIN {} USING ( tag_id ) )'.format( temp_without_tag_ids_table_name, mappings_table_name )
table_phrase = '({}) as mt1 WHERE {}'.format( select_with_hash_ids_on_storage, hash_id_not_in_storage_without )
query = 'SELECT COUNT ( * ) FROM {};'.format( table_phrase )
( count, ) = self._Execute( query ).fetchone()
statuses_to_count[ status ] = count
current_count = statuses_to_count[ HC.CONTENT_STATUS_CURRENT ]
pending_count = statuses_to_count[ HC.CONTENT_STATUS_PENDING ]
return ( current_count, pending_count )
def _GroupHashIdsByTagCachedFileServiceId( self, hash_ids, hash_ids_table_name, hash_ids_to_current_file_service_ids = None ):
# when we would love to do a fast cache lookup, it is useful to know if all the hash_ids are on one or two common file domains
if hash_ids_to_current_file_service_ids is None:
hash_ids_to_current_file_service_ids = self.modules_files_storage.GetHashIdsToCurrentServiceIds( hash_ids_table_name )
cached_file_service_ids = set( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
file_service_ids_to_hash_ids = collections.defaultdict( set )
for ( hash_id, file_service_ids ) in hash_ids_to_current_file_service_ids.items():
for file_service_id in file_service_ids:
if file_service_id in cached_file_service_ids:
file_service_ids_to_hash_ids[ file_service_id ].add( hash_id )
# ok, we have our map, let's sort it out
# sorting by most comprehensive service_id first
file_service_ids_to_value = sorted( ( ( file_service_id, len( hash_ids ) ) for ( file_service_id, hash_ids ) in file_service_ids_to_hash_ids.items() ), key = lambda p: p[1], reverse = True )
seen_hash_ids = set()
# make our mapping non-overlapping
for pair in file_service_ids_to_value:
file_service_id = pair[0]
this_services_hash_ids_set = file_service_ids_to_hash_ids[ file_service_id ]
if len( seen_hash_ids ) > 0:
this_services_hash_ids_set.difference_update( seen_hash_ids )
if len( this_services_hash_ids_set ) == 0:
del file_service_ids_to_hash_ids[ file_service_id ]
else:
seen_hash_ids.update( this_services_hash_ids_set )
unmapped_hash_ids = set( hash_ids ).difference( seen_hash_ids )
if len( unmapped_hash_ids ) > 0:
file_service_ids_to_hash_ids[ self.modules_services.combined_file_service_id ] = unmapped_hash_ids
return file_service_ids_to_hash_ids
def _ImportFile( self, file_import_job: ClientImportFiles.FileImportJob ):
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job starting db job' )
hash = file_import_job.GetHash()
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
file_import_status = self._GetHashIdStatus( hash_id, prefix = 'file recognised by database' )
if not file_import_status.AlreadyInDB():
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job adding new file' )
( size, mime, width, height, duration, num_frames, has_audio, num_words ) = file_import_job.GetFileInfo()
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job adding file info row' )
self.modules_files_metadata_basic.AddFilesInfo( [ ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) ], overwrite = True )
#
perceptual_hashes = file_import_job.GetPerceptualHashes()
if perceptual_hashes is not None:
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job associating perceptual_hashes' )
self.modules_similar_files.AssociatePerceptualHashes( hash_id, perceptual_hashes )
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job adding file to local file service' )
#
( md5, sha1, sha512 ) = file_import_job.GetExtraHashes()
self.modules_hashes.SetExtraHashes( hash_id, md5, sha1, sha512 )
#
self.modules_files_metadata_basic.SetHasICCProfile( hash_id, file_import_job.HasICCProfile() )
#
pixel_hash = file_import_job.GetPixelHash()
if pixel_hash is None:
self.modules_similar_files.ClearPixelHash( hash_id )
else:
pixel_hash_id = self.modules_hashes.GetHashId( pixel_hash )
self.modules_similar_files.SetPixelHash( hash_id, pixel_hash_id )
#
file_modified_timestamp = file_import_job.GetFileModifiedTimestamp()
self._Execute( 'REPLACE INTO file_modified_timestamps ( hash_id, file_modified_timestamp ) VALUES ( ?, ? );', ( hash_id, file_modified_timestamp ) )
#
file_import_options = file_import_job.GetFileImportOptions()
file_info_manager = ClientMediaManagers.FileInfoManager( hash_id, hash, size, mime, width, height, duration, num_frames, has_audio, num_words )
now = HydrusData.GetNow()
destination_location_context = file_import_options.GetDestinationLocationContext()
destination_location_context.FixMissingServices( ClientLocation.ValidLocalDomainsFilter )
if not destination_location_context.IncludesCurrent():
service_ids = self.modules_services.GetServiceIds( ( HC.LOCAL_FILE_DOMAIN, ) )
service_id = min( service_ids )
service_key = self.modules_services.GetService( service_id ).GetServiceKey()
destination_location_context = ClientLocation.LocationContext( current_service_keys = ( service_key, ) )
for destination_file_service_key in destination_location_context.current_service_keys:
destination_service_id = self.modules_services.GetServiceId( destination_file_service_key )
self._AddFiles( destination_service_id, [ ( hash_id, now ) ] )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ADD, ( file_info_manager, now ) )
self.pub_content_updates_after_commit( { destination_file_service_key : [ content_update ] } )
#
if file_import_options.AutomaticallyArchives():
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job archiving new file' )
self._ArchiveFiles( ( hash_id, ) )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ARCHIVE, ( hash, ) )
self.pub_content_updates_after_commit( { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] } )
else:
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job inboxing new file' )
self._InboxFiles( ( hash_id, ) )
#
if self._weakref_media_result_cache.HasFile( hash_id ):
self._weakref_media_result_cache.DropMediaResult( hash_id, hash )
self._controller.pub( 'new_file_info', { hash } )
#
file_import_status = ClientImportFiles.FileImportStatus( CC.STATUS_SUCCESSFUL_AND_NEW, hash, mime = mime )
if HG.file_import_report_mode:
HydrusData.ShowText( 'File import job done at db level, final status: {}'.format( file_import_status.ToString() ) )
return file_import_status
def _ImportUpdate( self, update_network_bytes, update_hash, mime ):
try:
HydrusSerialisable.CreateFromNetworkBytes( update_network_bytes )
except:
HydrusData.ShowText( 'Was unable to parse an incoming update!' )
raise
hash_id = self.modules_hashes_local_cache.GetHashId( update_hash )
size = len( update_network_bytes )
width = None
height = None
duration = None
num_frames = None
has_audio = None
num_words = None
client_files_manager = self._controller.client_files_manager
client_files_manager.LocklessAddFileFromBytes( update_hash, mime, update_network_bytes )
self.modules_files_metadata_basic.AddFilesInfo( [ ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) ], overwrite = True )
now = HydrusData.GetNow()
self._AddFiles( self.modules_services.local_update_service_id, [ ( hash_id, now ) ] )
def _InboxFiles( self, hash_ids ):
inboxed_hash_ids = self.modules_files_metadata_basic.InboxFiles( hash_ids )
if len( inboxed_hash_ids ) > 0:
service_ids_to_counts = self.modules_files_storage.GetServiceIdCounts( inboxed_hash_ids )
if len( service_ids_to_counts ) > 0:
self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', [ ( count, service_id, HC.SERVICE_INFO_NUM_INBOX ) for ( service_id, count ) in service_ids_to_counts.items() ] )
def _InitCaches( self ):
# this occurs after db update, so is safe to reference things in there but also cannot be relied upon in db update
HG.client_controller.frame_splash_status.SetText( 'preparing db caches' )
HG.client_controller.frame_splash_status.SetSubtext( 'inbox' )
def _InitExternalDatabases( self ):
self._db_filenames[ 'external_caches' ] = 'client.caches.db'
self._db_filenames[ 'external_mappings' ] = 'client.mappings.db'
self._db_filenames[ 'external_master' ] = 'client.master.db'
def _FilterInboxHashes( self, hashes: typing.Collection[ bytes ] ):
hash_ids_to_hashes = self.modules_hashes_local_cache.GetHashIdsToHashes( hashes = hashes )
inbox_hashes = { hash for ( hash_id, hash ) in hash_ids_to_hashes.items() if hash_id in self.modules_files_metadata_basic.inbox_hash_ids }
return inbox_hashes
def _IsAnOrphan( self, test_type, possible_hash ):
if self.modules_hashes.HasHash( possible_hash ):
hash = possible_hash
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
if test_type == 'file':
orphan_hash_ids = self.modules_files_storage.FilterOrphanFileHashIds( ( hash_id, ) )
return len( orphan_hash_ids ) == 1
elif test_type == 'thumbnail':
orphan_hash_ids = self.modules_files_storage.FilterOrphanThumbnailHashIds( ( hash_id, ) )
return len( orphan_hash_ids ) == 1
else:
return True
def _LoadModules( self ):
self.modules_db_maintenance = ClientDBMaintenance.ClientDBMaintenance( self._c, self._db_dir, self._db_filenames )
self._modules.append( self.modules_db_maintenance )
self.modules_services = ClientDBServices.ClientDBMasterServices( self._c )
self._modules.append( self.modules_services )
self.modules_hashes = ClientDBMaster.ClientDBMasterHashes( self._c )
self._modules.append( self.modules_hashes )
self.modules_tags = ClientDBMaster.ClientDBMasterTags( self._c )
self._modules.append( self.modules_tags )
self.modules_urls = ClientDBMaster.ClientDBMasterURLs( self._c )
self._modules.append( self.modules_urls )
self.modules_texts = ClientDBMaster.ClientDBMasterTexts( self._c )
self._modules.append( self.modules_texts )
self.modules_serialisable = ClientDBSerialisable.ClientDBSerialisable( self._c, self._db_dir, self._cursor_transaction_wrapper, self.modules_services )
self._modules.append( self.modules_serialisable )
#
self.modules_files_metadata_basic = ClientDBFilesMetadataBasic.ClientDBFilesMetadataBasic( self._c )
self._modules.append( self.modules_files_metadata_basic )
#
self.modules_files_storage = ClientDBFilesStorage.ClientDBFilesStorage( self._c, self._cursor_transaction_wrapper, self.modules_services, self.modules_hashes, self.modules_texts )
self._modules.append( self.modules_files_storage )
#
self.modules_mappings_counts = ClientDBMappingsCounts.ClientDBMappingsCounts( self._c, self.modules_services )
self._modules.append( self.modules_mappings_counts )
#
self.modules_tags_local_cache = ClientDBDefinitionsCache.ClientDBCacheLocalTags( self._c, self.modules_tags, self.modules_services, self.modules_mappings_counts )
self._modules.append( self.modules_tags_local_cache )
self.modules_hashes_local_cache = ClientDBDefinitionsCache.ClientDBCacheLocalHashes( self._c, self.modules_hashes, self.modules_services, self.modules_files_storage )
self._modules.append( self.modules_hashes_local_cache )
#
self.modules_mappings_storage = ClientDBMappingsStorage.ClientDBMappingsStorage( self._c, self.modules_services )
self._modules.append( self.modules_mappings_storage )
#
self.modules_tag_siblings = ClientDBTagSiblings.ClientDBTagSiblings( self._c, self.modules_services, self.modules_tags, self.modules_tags_local_cache )
self._modules.append( self.modules_tag_siblings )
self.modules_tag_parents = ClientDBTagParents.ClientDBTagParents( self._c, self.modules_services, self.modules_tags_local_cache, self.modules_tag_siblings )
self._modules.append( self.modules_tag_parents )
self.modules_tag_display = ClientDBTagDisplay.ClientDBTagDisplay( self._c, self._cursor_transaction_wrapper, self.modules_services, self.modules_tags, self.modules_tags_local_cache, self.modules_tag_siblings, self.modules_tag_parents )
self._modules.append( self.modules_tag_display )
# when you do the mappings caches, storage and display, consider carefully how you want them slotting in here
# don't rush into it
self.modules_tag_search = ClientDBTagSearch.ClientDBTagSearch( self._c, self.modules_services, self.modules_tags, self.modules_tag_display )
self._modules.append( self.modules_tag_search )
self.modules_mappings_counts_update = ClientDBMappingsCountsUpdate.ClientDBMappingsCountsUpdate( self._c, self.modules_services, self.modules_mappings_counts, self.modules_tags_local_cache, self.modules_tag_display, self.modules_tag_search )
self._modules.append( self.modules_mappings_counts_update )
#
self.modules_mappings_cache_specific_display = ClientDBMappingsCacheSpecificDisplay.ClientDBMappingsCacheSpecificDisplay( self._c, self.modules_services, self.modules_mappings_counts, self.modules_mappings_counts_update, self.modules_mappings_storage, self.modules_tag_display )
#
self.modules_similar_files = ClientDBSimilarFiles.ClientDBSimilarFiles( self._c, self.modules_services, self.modules_files_storage )
self._modules.append( self.modules_similar_files )
self.modules_files_duplicates = ClientDBFilesDuplicates.ClientDBFilesDuplicates( self._c, self.modules_files_storage, self.modules_hashes_local_cache, self.modules_similar_files )
self._modules.append( self.modules_files_duplicates )
#
self.modules_files_maintenance_queue = ClientDBFilesMaintenanceQueue.ClientDBFilesMaintenanceQueue( self._c, self.modules_hashes_local_cache )
self._modules.append( self.modules_files_maintenance_queue )
#
self.modules_repositories = ClientDBRepositories.ClientDBRepositories( self._c, self._cursor_transaction_wrapper, self.modules_services, self.modules_files_storage, self.modules_files_metadata_basic, self.modules_hashes_local_cache, self.modules_tags_local_cache, self.modules_files_maintenance_queue )
self._modules.append( self.modules_repositories )
#
self.modules_files_maintenance = ClientDBFilesMaintenance.ClientDBFilesMaintenance( self._c, self.modules_files_maintenance_queue, self.modules_hashes, self.modules_hashes_local_cache, self.modules_files_metadata_basic, self.modules_similar_files, self.modules_repositories, self._weakref_media_result_cache )
self._modules.append( self.modules_files_maintenance )
def _ManageDBError( self, job, e ):
if isinstance( e, MemoryError ):
HydrusData.ShowText( 'The client is running out of memory! Restart it ASAP!' )
tb = traceback.format_exc()
if 'malformed' in tb:
HydrusData.ShowText( 'A database exception looked like it could be a very serious \'database image is malformed\' error! Unless you know otherwise, please shut down the client immediately and check the \'help my db is broke.txt\' under install_dir/db.' )
if job.IsSynchronous():
db_traceback = 'Database ' + tb
first_line = str( type( e ).__name__ ) + ': ' + str( e )
new_e = HydrusExceptions.DBException( e, first_line, db_traceback )
job.PutResult( new_e )
else:
HydrusData.ShowException( e )
def _MigrationClearJob( self, database_temp_job_name ):
self._Execute( 'DROP TABLE {};'.format( database_temp_job_name ) )
def _MigrationGetMappings( self, database_temp_job_name, file_service_key, tag_service_key, hash_type, tag_filter, content_statuses ):
time_started_precise = HydrusData.GetNowPrecise()
data = []
file_service_id = self.modules_services.GetServiceId( file_service_key )
tag_service_id = self.modules_services.GetServiceId( tag_service_key )
statuses_to_table_names = self.modules_mappings_storage.GetFastestStorageMappingTableNames( file_service_id, tag_service_id )
select_queries = []
for content_status in content_statuses:
table_name = statuses_to_table_names[ content_status ]
select_query = 'SELECT tag_id FROM {} WHERE hash_id = ?;'.format( table_name )
select_queries.append( select_query )
we_should_stop = False
while not we_should_stop:
result = self._Execute( 'SELECT hash_id FROM {};'.format( database_temp_job_name ) ).fetchone()
if result is None:
break
( hash_id, ) = result
self._Execute( 'DELETE FROM {} WHERE hash_id = ?;'.format( database_temp_job_name ), ( hash_id, ) )
if hash_type == 'sha256':
desired_hash = self.modules_hashes_local_cache.GetHash( hash_id )
else:
try:
desired_hash = self.modules_hashes.GetExtraHash( hash_type, hash_id )
except HydrusExceptions.DataMissing:
continue
tags = set()
for select_query in select_queries:
tag_ids = self._STL( self._Execute( select_query, ( hash_id, ) ) )
tag_ids_to_tags = self.modules_tags_local_cache.GetTagIdsToTags( tag_ids = tag_ids )
tags.update( tag_ids_to_tags.values() )
if not tag_filter.AllowsEverything():
tags = tag_filter.Filter( tags )
if len( tags ) > 0:
data.append( ( desired_hash, tags ) )
we_should_stop = len( data ) >= 256 or ( len( data ) > 0 and HydrusData.TimeHasPassedPrecise( time_started_precise + 1.0 ) )
return data
def _MigrationGetPairs( self, database_temp_job_name, left_tag_filter, right_tag_filter ):
time_started_precise = HydrusData.GetNowPrecise()
data = []
we_should_stop = False
while not we_should_stop:
result = self._Execute( 'SELECT left_tag_id, right_tag_id FROM {};'.format( database_temp_job_name ) ).fetchone()
if result is None:
break
( left_tag_id, right_tag_id ) = result
self._Execute( 'DELETE FROM {} WHERE left_tag_id = ? AND right_tag_id = ?;'.format( database_temp_job_name ), ( left_tag_id, right_tag_id ) )
left_tag = self.modules_tags_local_cache.GetTag( left_tag_id )
if not left_tag_filter.TagOK( left_tag ):
continue
right_tag = self.modules_tags_local_cache.GetTag( right_tag_id )
if not right_tag_filter.TagOK( right_tag ):
continue
data.append( ( left_tag, right_tag ) )
we_should_stop = len( data ) >= 256 or ( len( data ) > 0 and HydrusData.TimeHasPassedPrecise( time_started_precise + 1.0 ) )
return data
def _MigrationStartMappingsJob( self, database_temp_job_name, file_service_key, tag_service_key, hashes, content_statuses ):
file_service_id = self.modules_services.GetServiceId( file_service_key )
self._Execute( 'CREATE TABLE IF NOT EXISTS durable_temp.{} ( hash_id INTEGER PRIMARY KEY );'.format( database_temp_job_name ) )
if hashes is not None:
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._ExecuteMany( 'INSERT INTO {} ( hash_id ) VALUES ( ? );'.format( database_temp_job_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
else:
tag_service_id = self.modules_services.GetServiceId( tag_service_key )
statuses_to_table_names = {}
use_hashes_table = False
if file_service_id == self.modules_services.combined_file_service_id:
# if our tag service is the biggest, and if it basically accounts for all the hashes we know about, it is much faster to just use the hashes table
our_results = self._GetServiceInfo( tag_service_key )
our_num_files = our_results[ HC.SERVICE_INFO_NUM_FILES ]
other_services = [ service for service in self.modules_services.GetServices( HC.REAL_TAG_SERVICES ) if service.GetServiceKey() != tag_service_key ]
other_num_files = []
for other_service in other_services:
other_results = self._GetServiceInfo( other_service.GetServiceKey() )
other_num_files.append( other_results[ HC.SERVICE_INFO_NUM_FILES ] )
if len( other_num_files ) == 0:
we_are_big = True
else:
we_are_big = our_num_files >= 0.75 * max( other_num_files )
if we_are_big:
local_files_results = self._GetServiceInfo( CC.COMBINED_LOCAL_FILE_SERVICE_KEY )
local_files_num_files = local_files_results[ HC.SERVICE_INFO_NUM_FILES ]
if local_files_num_files > our_num_files:
# probably a small local tags service, ok to pull from current_mappings
we_are_big = False
if we_are_big:
use_hashes_table = True
if use_hashes_table:
# this obviously just pulls literally all known files
# makes migration take longer if the tag service does not cover many of these files, but saves huge startup time since it is a simple list
select_subqueries = [ 'SELECT hash_id FROM hashes' ]
else:
statuses_to_table_names = self.modules_mappings_storage.GetFastestStorageMappingTableNames( file_service_id, tag_service_id )
select_subqueries = []
for content_status in content_statuses:
table_name = statuses_to_table_names[ content_status ]
select_subquery = 'SELECT DISTINCT hash_id FROM {}'.format( table_name )
select_subqueries.append( select_subquery )
for select_subquery in select_subqueries:
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id ) {};'.format( database_temp_job_name, select_subquery ) )
def _MigrationStartPairsJob( self, database_temp_job_name, tag_service_key, content_type, content_statuses ):
self._Execute( 'CREATE TABLE IF NOT EXISTS durable_temp.{} ( left_tag_id INTEGER, right_tag_id INTEGER, PRIMARY KEY ( left_tag_id, right_tag_id ) );'.format( database_temp_job_name ) )
tag_service_id = self.modules_services.GetServiceId( tag_service_key )
if content_type == HC.CONTENT_TYPE_TAG_PARENTS:
source_table_names = [ 'tag_parents', 'tag_parent_petitions' ]
left_column_name = 'child_tag_id'
right_column_name = 'parent_tag_id'
elif content_type == HC.CONTENT_TYPE_TAG_SIBLINGS:
source_table_names = [ 'tag_siblings', 'tag_sibling_petitions' ]
left_column_name = 'bad_tag_id'
right_column_name = 'good_tag_id'
for source_table_name in source_table_names:
self._Execute( 'INSERT OR IGNORE INTO {} ( left_tag_id, right_tag_id ) SELECT {}, {} FROM {} WHERE service_id = ? AND status IN {};'.format( database_temp_job_name, left_column_name, right_column_name, source_table_name, HydrusData.SplayListForDB( content_statuses ) ), ( tag_service_id, ) )
def _PerceptualHashesResetSearchFromHashes( self, hashes ):
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self.modules_similar_files.ResetSearch( hash_ids )
def _PerceptualHashesSearchForPotentialDuplicates( self, search_distance, maintenance_mode = HC.MAINTENANCE_FORCED, job_key = None, stop_time = None, work_time_float = None ):
time_started_float = HydrusData.GetNowFloat()
num_done = 0
still_work_to_do = True
group_of_hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM shape_search_cache WHERE searched_distance IS NULL or searched_distance < ?;', ( search_distance, ) ).fetchmany( 10 ) )
while len( group_of_hash_ids ) > 0:
text = 'searching potential duplicates: {}'.format( HydrusData.ToHumanInt( num_done ) )
HG.client_controller.frame_splash_status.SetSubtext( text )
for ( i, hash_id ) in enumerate( group_of_hash_ids ):
if work_time_float is not None and HydrusData.TimeHasPassedFloat( time_started_float + work_time_float ):
return ( still_work_to_do, num_done )
if job_key is not None:
( i_paused, should_stop ) = job_key.WaitIfNeeded()
if should_stop:
return ( still_work_to_do, num_done )
should_stop = HG.client_controller.ShouldStopThisWork( maintenance_mode, stop_time = stop_time )
if should_stop:
return ( still_work_to_do, num_done )
media_id = self.modules_files_duplicates.DuplicatesGetMediaId( hash_id )
potential_duplicate_media_ids_and_distances = [ ( self.modules_files_duplicates.DuplicatesGetMediaId( duplicate_hash_id ), distance ) for ( duplicate_hash_id, distance ) in self.modules_similar_files.Search( hash_id, search_distance ) if duplicate_hash_id != hash_id ]
self.modules_files_duplicates.DuplicatesAddPotentialDuplicates( media_id, potential_duplicate_media_ids_and_distances )
self._Execute( 'UPDATE shape_search_cache SET searched_distance = ? WHERE hash_id = ?;', ( search_distance, hash_id ) )
num_done += 1
group_of_hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM shape_search_cache WHERE searched_distance IS NULL or searched_distance < ?;', ( search_distance, ) ).fetchmany( 10 ) )
still_work_to_do = False
return ( still_work_to_do, num_done )
def _ProcessContentUpdates( self, service_keys_to_content_updates, publish_content_updates = True ):
notify_new_downloads = False
notify_new_pending = False
notify_new_parents = False
notify_new_siblings = False
valid_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
try:
service_id = self.modules_services.GetServiceId( service_key )
except HydrusExceptions.DataMissing:
continue
valid_service_keys_to_content_updates[ service_key ] = content_updates
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
ultimate_mappings_ids = []
ultimate_deleted_mappings_ids = []
ultimate_pending_mappings_ids = []
ultimate_pending_rescinded_mappings_ids = []
ultimate_petitioned_mappings_ids = []
ultimate_petitioned_rescinded_mappings_ids = []
changed_sibling_tag_ids = set()
changed_parent_tag_ids = set()
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
if service_type in HC.FILE_SERVICES:
if data_type == HC.CONTENT_TYPE_FILES:
if action == HC.CONTENT_UPDATE_ADVANCED:
( sub_action, sub_row ) = row
if sub_action == 'delete_deleted':
hashes = sub_row
if hashes is None:
service_ids_to_nums_cleared = self.modules_files_storage.ClearLocalDeleteRecord()
else:
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
service_ids_to_nums_cleared = self.modules_files_storage.ClearLocalDeleteRecord( hash_ids )
self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', ( ( -num_cleared, clear_service_id, HC.SERVICE_INFO_NUM_DELETED_FILES ) for ( clear_service_id, num_cleared ) in service_ids_to_nums_cleared.items() ) )
elif action == HC.CONTENT_UPDATE_ADD:
if service_type in HC.LOCAL_FILE_SERVICES or service_type == HC.FILE_REPOSITORY:
( file_info_manager, timestamp ) = row
( hash_id, hash, size, mime, width, height, duration, num_frames, has_audio, num_words ) = file_info_manager.ToTuple()
self.modules_files_metadata_basic.AddFilesInfo( [ ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) ] )
elif service_type == HC.IPFS:
( file_info_manager, multihash ) = row
hash_id = file_info_manager.hash_id
self._SetServiceFilename( service_id, hash_id, multihash )
timestamp = HydrusData.GetNow()
self._AddFiles( service_id, [ ( hash_id, timestamp ) ] )
else:
hashes = row
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
if action == HC.CONTENT_UPDATE_ARCHIVE:
self._ArchiveFiles( hash_ids )
elif action == HC.CONTENT_UPDATE_INBOX:
self._InboxFiles( hash_ids )
elif action == HC.CONTENT_UPDATE_DELETE:
actual_delete_hash_ids = self._FilterForFileDeleteLock( service_id, hash_ids )
if len( actual_delete_hash_ids ) < len( hash_ids ):
hash_ids = actual_delete_hash_ids
hashes = self.modules_hashes_local_cache.GetHashes( hash_ids )
content_update.SetRow( hashes )
if service_type in ( HC.LOCAL_FILE_DOMAIN, HC.COMBINED_LOCAL_FILE ):
if content_update.HasReason():
reason = content_update.GetReason()
# at the moment, we only set a deletion reason when a file leaves a real file domain. not on second delete from trash, so if file in trash, no new delete reason will be set
location_context = ClientLocation.LocationContext( current_service_keys = ( service_key, ) )
reason_setting_hash_ids = self.modules_files_storage.FilterHashIds( location_context, hash_ids )
self.modules_files_storage.SetFileDeletionReason( reason_setting_hash_ids, reason )
if service_id == self.modules_services.trash_service_id:
# shouldn't be called anymore, but just in case someone fidgets a trash delete with client api or something
self._DeleteFiles( self.modules_services.combined_local_file_service_id, hash_ids )
else:
self._DeleteFiles( service_id, hash_ids )
elif action == HC.CONTENT_UPDATE_UNDELETE:
self._UndeleteFiles( service_id, hash_ids )
elif action == HC.CONTENT_UPDATE_PEND:
invalid_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, hash_ids, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = hash_ids.difference( invalid_hash_ids )
self.modules_files_storage.PendFiles( service_id, valid_hash_ids )
if service_key == CC.COMBINED_LOCAL_FILE_SERVICE_KEY:
notify_new_downloads = True
else:
notify_new_pending = True
elif action == HC.CONTENT_UPDATE_PETITION:
reason = content_update.GetReason()
reason_id = self.modules_texts.GetTextId( reason )
valid_hash_ids = self.modules_files_storage.FilterHashIdsToStatus( service_id, hash_ids, HC.CONTENT_STATUS_CURRENT )
self.modules_files_storage.PetitionFiles( service_id, reason_id, valid_hash_ids )
notify_new_pending = True
elif action == HC.CONTENT_UPDATE_RESCIND_PEND:
self.modules_files_storage.RescindPendFiles( service_id, hash_ids )
if service_key == CC.COMBINED_LOCAL_FILE_SERVICE_KEY:
notify_new_downloads = True
else:
notify_new_pending = True
elif action == HC.CONTENT_UPDATE_RESCIND_PETITION:
self.modules_files_storage.RescindPetitionFiles( service_id, hash_ids )
notify_new_pending = True
elif data_type == HC.CONTENT_TYPE_DIRECTORIES:
if action == HC.CONTENT_UPDATE_ADD:
( hashes, dirname, note ) = row
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._SetServiceDirectory( service_id, hash_ids, dirname, note )
elif action == HC.CONTENT_UPDATE_DELETE:
dirname = row
self._DeleteServiceDirectory( service_id, dirname )
elif data_type == HC.CONTENT_TYPE_URLS:
if action == HC.CONTENT_UPDATE_ADD:
( urls, hashes ) = row
url_ids = { self.modules_urls.GetURLId( url ) for url in urls }
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._ExecuteMany( 'INSERT OR IGNORE INTO url_map ( hash_id, url_id ) VALUES ( ?, ? );', itertools.product( hash_ids, url_ids ) )
elif action == HC.CONTENT_UPDATE_DELETE:
( urls, hashes ) = row
url_ids = { self.modules_urls.GetURLId( url ) for url in urls }
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._ExecuteMany( 'DELETE FROM url_map WHERE hash_id = ? AND url_id = ?;', itertools.product( hash_ids, url_ids ) )
elif data_type == HC.CONTENT_TYPE_TIMESTAMP:
( timestamp_type, hash, data ) = row
if timestamp_type == 'domain':
if action == HC.CONTENT_UPDATE_ADD:
( domain, timestamp ) = data
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
domain_id = self.modules_urls.GetURLDomainId( domain )
self.modules_files_metadata_basic.UpdateDomainModifiedTimestamp( hash_id, domain_id, timestamp )
elif action == HC.CONTENT_UPDATE_SET:
( domain, timestamp ) = data
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
domain_id = self.modules_urls.GetURLDomainId( domain )
self.modules_files_metadata_basic.SetDomainModifiedTimestamp( hash_id, domain_id, timestamp )
elif action == HC.CONTENT_UPDATE_DELETE:
domain = data
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
domain_id = self.modules_urls.GetURLDomainId( domain )
self.modules_files_metadata_basic.ClearDomainModifiedTimestamp( hash_id, domain_id )
elif data_type == HC.CONTENT_TYPE_FILE_VIEWING_STATS:
if action == HC.CONTENT_UPDATE_ADVANCED:
action = row
if action == 'clear':
self._Execute( 'DELETE FROM file_viewing_stats;' )
elif action == HC.CONTENT_UPDATE_ADD:
( hash, canvas_type, view_timestamp, views_delta, viewtime_delta ) = row
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
self._Execute( 'INSERT OR IGNORE INTO file_viewing_stats ( hash_id, canvas_type, last_viewed_timestamp, views, viewtime ) VALUES ( ?, ?, ?, ?, ? );', ( hash_id, canvas_type, 0, 0, 0 ) )
self._Execute( 'UPDATE file_viewing_stats SET last_viewed_timestamp = ?, views = views + ?, viewtime = viewtime + ? WHERE hash_id = ? AND canvas_type = ?;', ( view_timestamp, views_delta, viewtime_delta, hash_id, canvas_type ) )
elif action == HC.CONTENT_UPDATE_DELETE:
hashes = row
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
self._ExecuteMany( 'DELETE FROM file_viewing_stats WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
elif service_type in HC.REAL_TAG_SERVICES:
if data_type == HC.CONTENT_TYPE_MAPPINGS:
( tag, hashes ) = row
try:
tag_id = self.modules_tags.GetTagId( tag )
except HydrusExceptions.TagSizeException:
continue
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
display_affected = action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_RESCIND_PEND )
if display_affected and publish_content_updates and self.modules_tag_display.IsChained( ClientTags.TAG_DISPLAY_ACTUAL, service_id, tag_id ):
self._regen_tags_managers_hash_ids.update( hash_ids )
if action == HC.CONTENT_UPDATE_ADD:
if not HG.client_controller.tag_display_manager.TagOK( ClientTags.TAG_DISPLAY_STORAGE, service_key, tag ):
continue
ultimate_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_DELETE:
ultimate_deleted_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_PEND:
if not HG.client_controller.tag_display_manager.TagOK( ClientTags.TAG_DISPLAY_STORAGE, service_key, tag ):
continue
ultimate_pending_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_RESCIND_PEND:
ultimate_pending_rescinded_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_PETITION:
reason = content_update.GetReason()
reason_id = self.modules_texts.GetTextId( reason )
ultimate_petitioned_mappings_ids.append( ( tag_id, hash_ids, reason_id ) )
elif action == HC.CONTENT_UPDATE_RESCIND_PETITION:
ultimate_petitioned_rescinded_mappings_ids.append( ( tag_id, hash_ids ) )
elif action == HC.CONTENT_UPDATE_CLEAR_DELETE_RECORD:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( service_id )
self._ExecuteMany( 'DELETE FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( deleted_mappings_table_name ), ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
self._Execute( 'DELETE FROM service_info WHERE service_id = ? AND info_type = ?;', ( service_id, HC.SERVICE_INFO_NUM_DELETED_MAPPINGS ) )
cache_file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for cache_file_service_id in cache_file_service_ids:
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( cache_file_service_id, service_id )
self._ExecuteMany( 'DELETE FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in hash_ids ) )
elif data_type == HC.CONTENT_TYPE_TAG_PARENTS:
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE ):
( child_tag, parent_tag ) = row
try:
child_tag_id = self.modules_tags.GetTagId( child_tag )
parent_tag_id = self.modules_tags.GetTagId( parent_tag )
except HydrusExceptions.TagSizeException:
continue
pairs = ( ( child_tag_id, parent_tag_id ), )
if action == HC.CONTENT_UPDATE_ADD:
self.modules_tag_parents.AddTagParents( service_id, pairs )
elif action == HC.CONTENT_UPDATE_DELETE:
self.modules_tag_parents.DeleteTagParents( service_id, pairs )
changed_parent_tag_ids.update( ( child_tag_id, parent_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
elif action in ( HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_PETITION ):
( child_tag, parent_tag ) = row
try:
child_tag_id = self.modules_tags.GetTagId( child_tag )
parent_tag_id = self.modules_tags.GetTagId( parent_tag )
except HydrusExceptions.TagSizeException:
continue
reason = content_update.GetReason()
reason_id = self.modules_texts.GetTextId( reason )
triples = ( ( child_tag_id, parent_tag_id, reason_id ), )
if action == HC.CONTENT_UPDATE_PEND:
self.modules_tag_parents.PendTagParents( service_id, triples )
elif action == HC.CONTENT_UPDATE_PETITION:
self.modules_tag_parents.PetitionTagParents( service_id, triples )
changed_parent_tag_ids.update( ( child_tag_id, parent_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
elif action in ( HC.CONTENT_UPDATE_RESCIND_PEND, HC.CONTENT_UPDATE_RESCIND_PETITION ):
( child_tag, parent_tag ) = row
try:
child_tag_id = self.modules_tags.GetTagId( child_tag )
parent_tag_id = self.modules_tags.GetTagId( parent_tag )
except HydrusExceptions.TagSizeException:
continue
pairs = ( ( child_tag_id, parent_tag_id ), )
if action == HC.CONTENT_UPDATE_RESCIND_PEND:
self.modules_tag_parents.RescindPendingTagParents( service_id, pairs )
elif action == HC.CONTENT_UPDATE_RESCIND_PETITION:
self.modules_tag_parents.RescindPetitionedTagParents( service_id, pairs )
changed_parent_tag_ids.update( ( child_tag_id, parent_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
notify_new_parents = True
elif data_type == HC.CONTENT_TYPE_TAG_SIBLINGS:
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE ):
( bad_tag, good_tag ) = row
try:
bad_tag_id = self.modules_tags.GetTagId( bad_tag )
good_tag_id = self.modules_tags.GetTagId( good_tag )
except HydrusExceptions.TagSizeException:
continue
pairs = ( ( bad_tag_id, good_tag_id ), )
if action == HC.CONTENT_UPDATE_ADD:
self.modules_tag_siblings.AddTagSiblings( service_id, pairs )
elif action == HC.CONTENT_UPDATE_DELETE:
self.modules_tag_siblings.DeleteTagSiblings( service_id, pairs )
changed_sibling_tag_ids.update( ( bad_tag_id, good_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
elif action in ( HC.CONTENT_UPDATE_PEND, HC.CONTENT_UPDATE_PETITION ):
( bad_tag, good_tag ) = row
try:
bad_tag_id = self.modules_tags.GetTagId( bad_tag )
good_tag_id = self.modules_tags.GetTagId( good_tag )
except HydrusExceptions.TagSizeException:
continue
reason = content_update.GetReason()
reason_id = self.modules_texts.GetTextId( reason )
triples = ( ( bad_tag_id, good_tag_id, reason_id ), )
if action == HC.CONTENT_UPDATE_PEND:
self.modules_tag_siblings.PendTagSiblings( service_id, triples )
elif action == HC.CONTENT_UPDATE_PETITION:
self.modules_tag_siblings.PetitionTagSiblings( service_id, triples )
changed_sibling_tag_ids.update( ( bad_tag_id, good_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
elif action in ( HC.CONTENT_UPDATE_RESCIND_PEND, HC.CONTENT_UPDATE_RESCIND_PETITION ):
( bad_tag, good_tag ) = row
try:
bad_tag_id = self.modules_tags.GetTagId( bad_tag )
good_tag_id = self.modules_tags.GetTagId( good_tag )
except HydrusExceptions.TagSizeException:
continue
pairs = ( ( bad_tag_id, good_tag_id ), )
if action == HC.CONTENT_UPDATE_RESCIND_PEND:
self.modules_tag_siblings.RescindPendingTagSiblings( service_id, pairs )
elif action == HC.CONTENT_UPDATE_RESCIND_PETITION:
self.modules_tag_siblings.RescindPetitionedTagSiblings( service_id, pairs )
changed_sibling_tag_ids.update( ( bad_tag_id, good_tag_id ) )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
notify_new_siblings = True
elif service_type in HC.RATINGS_SERVICES:
if action == HC.CONTENT_UPDATE_ADD:
( rating, hashes ) = row
hash_ids = self.modules_hashes_local_cache.GetHashIds( hashes )
splayed_hash_ids = HydrusData.SplayListForDB( hash_ids )
if service_type in ( HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ):
ratings_added = 0
self._ExecuteMany( 'DELETE FROM local_ratings WHERE service_id = ? AND hash_id = ?;', ( ( service_id, hash_id ) for hash_id in hash_ids ) )
ratings_added -= self._GetRowCount()
if rating is not None:
self._ExecuteMany( 'INSERT INTO local_ratings ( service_id, hash_id, rating ) VALUES ( ?, ?, ? );', [ ( service_id, hash_id, rating ) for hash_id in hash_ids ] )
ratings_added += self._GetRowCount()
self._Execute( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', ( ratings_added, service_id, HC.SERVICE_INFO_NUM_FILES ) )
elif action == HC.CONTENT_UPDATE_ADVANCED:
action = row
if action == 'delete_for_deleted_files':
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_DELETED )
self._Execute( 'DELETE FROM local_ratings WHERE service_id = ? and hash_id IN ( SELECT hash_id FROM {} );'.format( deleted_files_table_name ), ( service_id, ) )
ratings_deleted = self._GetRowCount()
self._Execute( 'UPDATE service_info SET info = info - ? WHERE service_id = ? AND info_type = ?;', ( ratings_deleted, service_id, HC.SERVICE_INFO_NUM_FILES ) )
elif action == 'delete_for_non_local_files':
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
self._Execute( 'DELETE FROM local_ratings WHERE local_ratings.service_id = ? and hash_id NOT IN ( SELECT hash_id FROM {} );'.format( current_files_table_name ), ( service_id, ) )
ratings_deleted = self._GetRowCount()
self._Execute( 'UPDATE service_info SET info = info - ? WHERE service_id = ? AND info_type = ?;', ( ratings_deleted, service_id, HC.SERVICE_INFO_NUM_FILES ) )
elif action == 'delete_for_all_files':
self._Execute( 'DELETE FROM local_ratings WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'UPDATE service_info SET info = ? WHERE service_id = ? AND info_type = ?;', ( 0, service_id, HC.SERVICE_INFO_NUM_FILES ) )
elif service_type == HC.LOCAL_NOTES:
if action == HC.CONTENT_UPDATE_SET:
( hash, name, note ) = row
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
name_id = self.modules_texts.GetLabelId( name )
self._Execute( 'DELETE FROM file_notes WHERE hash_id = ? AND name_id = ?;', ( hash_id, name_id ) )
if len( note ) > 0:
note_id = self.modules_texts.GetNoteId( note )
self._Execute( 'INSERT OR IGNORE INTO file_notes ( hash_id, name_id, note_id ) VALUES ( ?, ?, ? );', ( hash_id, name_id, note_id ) )
elif action == HC.CONTENT_UPDATE_DELETE:
( hash, name ) = row
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
name_id = self.modules_texts.GetLabelId( name )
self._Execute( 'DELETE FROM file_notes WHERE hash_id = ? AND name_id = ?;', ( hash_id, name_id ) )
if len( ultimate_mappings_ids ) + len( ultimate_deleted_mappings_ids ) + len( ultimate_pending_mappings_ids ) + len( ultimate_pending_rescinded_mappings_ids ) + len( ultimate_petitioned_mappings_ids ) + len( ultimate_petitioned_rescinded_mappings_ids ) > 0:
self._UpdateMappings( service_id, mappings_ids = ultimate_mappings_ids, deleted_mappings_ids = ultimate_deleted_mappings_ids, pending_mappings_ids = ultimate_pending_mappings_ids, pending_rescinded_mappings_ids = ultimate_pending_rescinded_mappings_ids, petitioned_mappings_ids = ultimate_petitioned_mappings_ids, petitioned_rescinded_mappings_ids = ultimate_petitioned_rescinded_mappings_ids )
if service_type == HC.TAG_REPOSITORY:
notify_new_pending = True
if len( changed_sibling_tag_ids ) > 0:
self.modules_tag_display.NotifySiblingsChanged( service_id, changed_sibling_tag_ids )
if len( changed_parent_tag_ids ) > 0:
self.modules_tag_display.NotifyParentsChanged( service_id, changed_parent_tag_ids )
if publish_content_updates:
if notify_new_pending:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
if notify_new_downloads:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_downloads' )
if notify_new_siblings or notify_new_parents:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self.pub_content_updates_after_commit( valid_service_keys_to_content_updates )
def _ProcessRepositoryContent( self, service_key, content_hash, content_iterator_dict, content_types_to_process, job_key, work_time ):
FILES_INITIAL_CHUNK_SIZE = 20
MAPPINGS_INITIAL_CHUNK_SIZE = 50
PAIR_ROWS_INITIAL_CHUNK_SIZE = 100
service_id = self.modules_services.GetServiceId( service_key )
precise_time_to_stop = HydrusData.GetNowPrecise() + work_time
num_rows_processed = 0
if HC.CONTENT_TYPE_FILES in content_types_to_process:
if 'new_files' in content_iterator_dict:
has_audio = None # hack until we figure this out better
i = content_iterator_dict[ 'new_files' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, FILES_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
files_info_rows = []
files_rows = []
for ( service_hash_id, size, mime, timestamp, width, height, duration, num_frames, num_words ) in chunk:
hash_id = self.modules_repositories.NormaliseServiceHashId( service_id, service_hash_id )
files_info_rows.append( ( hash_id, size, mime, width, height, duration, num_frames, has_audio, num_words ) )
files_rows.append( ( hash_id, timestamp ) )
self.modules_files_metadata_basic.AddFilesInfo( files_info_rows )
self._AddFiles( service_id, files_rows )
num_rows_processed += len( files_rows )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'new_files' ]
#
if 'deleted_files' in content_iterator_dict:
i = content_iterator_dict[ 'deleted_files' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, FILES_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
service_hash_ids = chunk
hash_ids = self.modules_repositories.NormaliseServiceHashIds( service_id, service_hash_ids )
self._DeleteFiles( service_id, hash_ids )
num_rows_processed += len( hash_ids )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'deleted_files' ]
#
if HC.CONTENT_TYPE_MAPPINGS in content_types_to_process:
if 'new_mappings' in content_iterator_dict:
i = content_iterator_dict[ 'new_mappings' ]
for chunk in HydrusData.SplitMappingIteratorIntoAutothrottledChunks( i, MAPPINGS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
mappings_ids = []
num_rows = 0
# yo, I can save time if I merge these ids so we only have one round of normalisation
for ( service_tag_id, service_hash_ids ) in chunk:
tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_tag_id )
hash_ids = self.modules_repositories.NormaliseServiceHashIds( service_id, service_hash_ids )
mappings_ids.append( ( tag_id, hash_ids ) )
num_rows += len( service_hash_ids )
self._UpdateMappings( service_id, mappings_ids = mappings_ids )
num_rows_processed += num_rows
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'new_mappings' ]
#
if 'deleted_mappings' in content_iterator_dict:
i = content_iterator_dict[ 'deleted_mappings' ]
for chunk in HydrusData.SplitMappingIteratorIntoAutothrottledChunks( i, MAPPINGS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
deleted_mappings_ids = []
num_rows = 0
for ( service_tag_id, service_hash_ids ) in chunk:
tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_tag_id )
hash_ids = self.modules_repositories.NormaliseServiceHashIds( service_id, service_hash_ids )
deleted_mappings_ids.append( ( tag_id, hash_ids ) )
num_rows += len( service_hash_ids )
self._UpdateMappings( service_id, deleted_mappings_ids = deleted_mappings_ids )
num_rows_processed += num_rows
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'deleted_mappings' ]
#
parents_or_siblings_changed = False
try:
if HC.CONTENT_TYPE_TAG_PARENTS in content_types_to_process:
if 'new_parents' in content_iterator_dict:
i = content_iterator_dict[ 'new_parents' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, PAIR_ROWS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
parent_ids = []
tag_ids = set()
for ( service_child_tag_id, service_parent_tag_id ) in chunk:
child_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_child_tag_id )
parent_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_parent_tag_id )
tag_ids.add( child_tag_id )
tag_ids.add( parent_tag_id )
parent_ids.append( ( child_tag_id, parent_tag_id ) )
self.modules_tag_parents.AddTagParents( service_id, parent_ids )
self.modules_tag_display.NotifyParentsChanged( service_id, tag_ids )
parents_or_siblings_changed = True
num_rows_processed += len( parent_ids )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'new_parents' ]
#
if 'deleted_parents' in content_iterator_dict:
i = content_iterator_dict[ 'deleted_parents' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, PAIR_ROWS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
parent_ids = []
tag_ids = set()
for ( service_child_tag_id, service_parent_tag_id ) in chunk:
child_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_child_tag_id )
parent_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_parent_tag_id )
tag_ids.add( child_tag_id )
tag_ids.add( parent_tag_id )
parent_ids.append( ( child_tag_id, parent_tag_id ) )
self.modules_tag_parents.DeleteTagParents( service_id, parent_ids )
self.modules_tag_display.NotifyParentsChanged( service_id, tag_ids )
parents_or_siblings_changed = True
num_rows = len( parent_ids )
num_rows_processed += num_rows
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'deleted_parents' ]
#
if HC.CONTENT_TYPE_TAG_SIBLINGS in content_types_to_process:
if 'new_siblings' in content_iterator_dict:
i = content_iterator_dict[ 'new_siblings' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, PAIR_ROWS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
sibling_ids = []
tag_ids = set()
for ( service_bad_tag_id, service_good_tag_id ) in chunk:
bad_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_bad_tag_id )
good_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_good_tag_id )
tag_ids.add( bad_tag_id )
tag_ids.add( good_tag_id )
sibling_ids.append( ( bad_tag_id, good_tag_id ) )
self.modules_tag_siblings.AddTagSiblings( service_id, sibling_ids )
self.modules_tag_display.NotifySiblingsChanged( service_id, tag_ids )
parents_or_siblings_changed = True
num_rows = len( sibling_ids )
num_rows_processed += num_rows
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'new_siblings' ]
#
if 'deleted_siblings' in content_iterator_dict:
i = content_iterator_dict[ 'deleted_siblings' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, PAIR_ROWS_INITIAL_CHUNK_SIZE, precise_time_to_stop ):
sibling_ids = []
tag_ids = set()
for ( service_bad_tag_id, service_good_tag_id ) in chunk:
bad_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_bad_tag_id )
good_tag_id = self.modules_repositories.NormaliseServiceTagId( service_id, service_good_tag_id )
tag_ids.add( bad_tag_id )
tag_ids.add( good_tag_id )
sibling_ids.append( ( bad_tag_id, good_tag_id ) )
self.modules_tag_siblings.DeleteTagSiblings( service_id, sibling_ids )
self.modules_tag_display.NotifySiblingsChanged( service_id, tag_ids )
parents_or_siblings_changed = True
num_rows_processed += len( sibling_ids )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del content_iterator_dict[ 'deleted_siblings' ]
finally:
if parents_or_siblings_changed:
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self.modules_repositories.SetUpdateProcessed( service_id, content_hash, content_types_to_process )
return num_rows_processed
def _PushRecentTags( self, service_key, tags ):
service_id = self.modules_services.GetServiceId( service_key )
if tags is None:
self._Execute( 'DELETE FROM recent_tags WHERE service_id = ?;', ( service_id, ) )
else:
now = HydrusData.GetNow()
tag_ids = [ self.modules_tags.GetTagId( tag ) for tag in tags ]
self._ExecuteMany( 'REPLACE INTO recent_tags ( service_id, tag_id, timestamp ) VALUES ( ?, ?, ? );', ( ( service_id, tag_id, now ) for tag_id in tag_ids ) )
def _Read( self, action, *args, **kwargs ):
if action == 'autocomplete_predicates': result = self._GetAutocompletePredicates( *args, **kwargs )
elif action == 'boned_stats': result = self._GetBonedStats( *args, **kwargs )
elif action == 'client_files_locations': result = self._GetClientFilesLocations( *args, **kwargs )
elif action == 'deferred_physical_delete': result = self.modules_files_storage.GetDeferredPhysicalDelete( *args, **kwargs )
elif action == 'duplicate_pairs_for_filtering': result = self._DuplicatesGetPotentialDuplicatePairsForFiltering( *args, **kwargs )
elif action == 'file_duplicate_hashes': result = self.modules_files_duplicates.DuplicatesGetFileHashesByDuplicateType( *args, **kwargs )
elif action == 'file_duplicate_info': result = self.modules_files_duplicates.DuplicatesGetFileDuplicateInfo( *args, **kwargs )
elif action == 'file_hashes': result = self.modules_hashes.GetFileHashes( *args, **kwargs )
elif action == 'file_history': result = self._GetFileHistory( *args, **kwargs )
elif action == 'file_maintenance_get_job': result = self.modules_files_maintenance_queue.GetJob( *args, **kwargs )
elif action == 'file_maintenance_get_job_counts': result = self.modules_files_maintenance_queue.GetJobCounts( *args, **kwargs )
elif action == 'file_query_ids': result = self._GetHashIdsFromQuery( *args, **kwargs )
elif action == 'file_system_predicates': result = self._GetFileSystemPredicates( *args, **kwargs )
elif action == 'filter_existing_tags': result = self._FilterExistingTags( *args, **kwargs )
elif action == 'filter_hashes': result = self._FilterHashesByService( *args, **kwargs )
elif action == 'force_refresh_tags_managers': result = self._GetForceRefreshTagsManagers( *args, **kwargs )
elif action == 'gui_session': result = self.modules_serialisable.GetGUISession( *args, **kwargs )
elif action == 'hash_ids_to_hashes': result = self.modules_hashes_local_cache.GetHashIdsToHashes( *args, **kwargs )
elif action == 'hash_status': result = self._GetHashStatus( *args, **kwargs )
elif action == 'have_hashed_serialised_objects': result = self.modules_serialisable.HaveHashedJSONDumps( *args, **kwargs )
elif action == 'ideal_client_files_locations': result = self._GetIdealClientFilesLocations( *args, **kwargs )
elif action == 'imageboards': result = self.modules_serialisable.GetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_IMAGEBOARD, *args, **kwargs )
elif action == 'inbox_hashes': result = self._FilterInboxHashes( *args, **kwargs )
elif action == 'is_an_orphan': result = self._IsAnOrphan( *args, **kwargs )
elif action == 'last_shutdown_work_time': result = self.modules_db_maintenance.GetLastShutdownWorkTime( *args, **kwargs )
elif action == 'local_booru_share_keys': result = self.modules_serialisable.GetYAMLDumpNames( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU )
elif action == 'local_booru_share': result = self.modules_serialisable.GetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU, *args, **kwargs )
elif action == 'local_booru_shares': result = self.modules_serialisable.GetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU )
elif action == 'maintenance_due': result = self._GetMaintenanceDue( *args, **kwargs )
elif action == 'media_predicates': result = self._GetMediaPredicates( *args, **kwargs )
elif action == 'media_result': result = self._GetMediaResultFromHash( *args, **kwargs )
elif action == 'media_results': result = self._GetMediaResultsFromHashes( *args, **kwargs )
elif action == 'media_results_from_ids': result = self._GetMediaResults( *args, **kwargs )
elif action == 'migration_get_mappings': result = self._MigrationGetMappings( *args, **kwargs )
elif action == 'migration_get_pairs': result = self._MigrationGetPairs( *args, **kwargs )
elif action == 'missing_repository_update_hashes': result = self.modules_repositories.GetRepositoryUpdateHashesIDoNotHave( *args, **kwargs )
elif action == 'missing_thumbnail_hashes': result = self._GetRepositoryThumbnailHashesIDoNotHave( *args, **kwargs )
elif action == 'num_deferred_file_deletes': result = self.modules_files_storage.GetDeferredPhysicalDeleteCounts()
elif action == 'nums_pending': result = self._GetNumsPending( *args, **kwargs )
elif action == 'options': result = self._GetOptions( *args, **kwargs )
elif action == 'pending': result = self._GetPending( *args, **kwargs )
elif action == 'random_potential_duplicate_hashes': result = self._DuplicatesGetRandomPotentialDuplicateHashes( *args, **kwargs )
elif action == 'recent_tags': result = self._GetRecentTags( *args, **kwargs )
elif action == 'repository_progress': result = self.modules_repositories.GetRepositoryProgress( *args, **kwargs )
elif action == 'repository_update_hashes_to_process': result = self.modules_repositories.GetRepositoryUpdateHashesICanProcess( *args, **kwargs )
elif action == 'serialisable': result = self.modules_serialisable.GetJSONDump( *args, **kwargs )
elif action == 'serialisable_simple': result = self.modules_serialisable.GetJSONSimple( *args, **kwargs )
elif action == 'serialisable_named': result = self.modules_serialisable.GetJSONDumpNamed( *args, **kwargs )
elif action == 'serialisable_names': result = self.modules_serialisable.GetJSONDumpNames( *args, **kwargs )
elif action == 'serialisable_names_to_backup_timestamps': result = self.modules_serialisable.GetJSONDumpNamesToBackupTimestamps( *args, **kwargs )
elif action == 'service_directory': result = self._GetServiceDirectoryHashes( *args, **kwargs )
elif action == 'service_directories': result = self._GetServiceDirectoriesInfo( *args, **kwargs )
elif action == 'service_filenames': result = self._GetServiceFilenames( *args, **kwargs )
elif action == 'service_info': result = self._GetServiceInfo( *args, **kwargs )
elif action == 'services': result = self.modules_services.GetServices( *args, **kwargs )
elif action == 'similar_files_maintenance_status': result = self.modules_similar_files.GetMaintenanceStatus( *args, **kwargs )
elif action == 'related_tags': result = self._GetRelatedTags( *args, **kwargs )
elif action == 'tag_display_application': result = self.modules_tag_display.GetApplication( *args, **kwargs )
elif action == 'tag_display_maintenance_status': result = self._CacheTagDisplayGetApplicationStatusNumbers( *args, **kwargs )
elif action == 'tag_parents': result = self.modules_tag_parents.GetTagParents( *args, **kwargs )
elif action == 'tag_siblings': result = self.modules_tag_siblings.GetTagSiblings( *args, **kwargs )
elif action == 'tag_siblings_all_ideals': result = self.modules_tag_siblings.GetTagSiblingsIdeals( *args, **kwargs )
elif action == 'tag_display_decorators': result = self.modules_tag_display.GetUIDecorators( *args, **kwargs )
elif action == 'tag_siblings_and_parents_lookup': result = self.modules_tag_display.GetSiblingsAndParentsForTags( *args, **kwargs )
elif action == 'tag_siblings_lookup': result = self.modules_tag_siblings.GetTagSiblingsForTags( *args, **kwargs )
elif action == 'trash_hashes': result = self._GetTrashHashes( *args, **kwargs )
elif action == 'potential_duplicates_count': result = self._DuplicatesGetPotentialDuplicatesCount( *args, **kwargs )
elif action == 'url_statuses': result = self._GetURLStatuses( *args, **kwargs )
elif action == 'vacuum_data': result = self.modules_db_maintenance.GetVacuumData( *args, **kwargs )
else: raise Exception( 'db received an unknown read command: ' + action )
return result
def _RecoverFromMissingDefinitions( self, content_type ):
# this is not finished, but basics are there
# remember this func uses a bunch of similar tech for the eventual orphan definition cleansing routine
# we just have to extend modules functionality to cover all content tables and we are good to go
if content_type == HC.CONTENT_TYPE_HASH:
definition_column_name = 'hash_id'
# eventually migrate this gubbins to cancellable async done in parts, which means generating, handling, and releasing the temp table name more cleverly
# job presentation to UI
all_tables_and_columns = []
for module in self._modules:
all_tables_and_columns.extend( module.GetTablesAndColumnsThatUseDefinitions( HC.CONTENT_TYPE_HASH ) )
temp_all_useful_definition_ids_table_name = 'durable_temp.all_useful_definition_ids_{}'.format( os.urandom( 8 ).hex() )
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( {} INTEGER PRIMARY KEY );'.format( temp_all_useful_definition_ids_table_name, definition_column_name ) )
try:
num_to_do = 0
for ( table_name, column_name ) in all_tables_and_columns:
query = 'INSERT OR IGNORE INTO {} ( {} ) SELECT DISTINCT {} FROM {};'.format(
temp_all_useful_definition_ids_table_name,
definition_column_name,
column_name,
table_name
)
self._Execute( query )
num_to_do += self._GetRowCount()
num_missing = 0
num_recovered = 0
batch_of_definition_ids = self._STL( self._Execute( 'SELECT {} FROM {} LIMIT 1024;'.format( definition_column_name, temp_all_useful_definition_ids_table_name ) ) )
while len( batch_of_definition_ids ) > 1024:
for definition_id in batch_of_definition_ids:
if not self.modules_hashes.HasHashId( definition_id ):
if content_type == HC.CONTENT_TYPE_HASH and self.modules_hashes_local_cache.HasHashId( definition_id ):
hash = self.modules_hashes_local_cache.GetHash( definition_id )
self._Execute( 'INSERT OR IGNORE INTO hashes ( hash_id, hash ) VALUES ( ?, ? );', ( definition_id, sqlite3.Binary( hash ) ) )
HydrusData.Print( '{} {} had no master definition, but I was able to recover from the local cache'.format( definition_column_name, definition_id ) )
num_recovered += 1
else:
HydrusData.Print( '{} {} had no master definition, it has been purged from the database!'.format( definition_column_name, definition_id ) )
for ( table_name, column_name ) in all_tables_and_columns:
self._Execute( 'DELETE FROM {} WHERE {} = ?;'.format( table_name, column_name ), ( definition_id, ) )
# tell user they will want to run clear orphan files, reset service cache info, and may need to recalc some autocomplete counts depending on total missing definitions
# I should clear service info based on content_type
num_missing += 1
batch_of_definition_ids = self._Execute( 'SELECT {} FROM {} LIMIT 1024;'.format( definition_column_name, temp_all_useful_definition_ids_table_name ) )
finally:
self._Execute( 'DROP TABLE {};'.format( temp_all_useful_definition_ids_table_name ) )
def _RegenerateLocalHashCache( self ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating local hash cache' )
self._controller.pub( 'modal_message', job_key )
message = 'generating local hash cache'
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self.modules_hashes_local_cache.Repopulate()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _RegenerateLocalTagCache( self ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating local tag cache' )
self._controller.pub( 'modal_message', job_key )
message = 'generating local tag cache'
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self.modules_tags_local_cache.Repopulate()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RegenerateTagCacheSearchableSubtagMaps( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerate tag fast search cache searchable subtag map' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
def status_hook( s ):
job_key.SetVariable( 'popup_text_2', s )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'repopulating specific cache {}_{}'.format( file_service_id, tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.RegenerateSearchableSubtagMap( file_service_id, tag_service_id, status_hook = status_hook )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'repopulating combined cache {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.RegenerateSearchableSubtagMap( self.modules_services.combined_file_service_id, tag_service_id, status_hook = status_hook )
finally:
job_key.DeleteVariable( 'popup_text_2' )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _RegenerateTagCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag fast search cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
def status_hook( s ):
job_key.SetVariable( 'popup_text_2', s )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'generating specific cache {}_{}'.format( file_service_id, tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheTagsPopulate( file_service_id, tag_service_id, status_hook = status_hook )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'generating combined cache {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, tag_service_id )
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, tag_service_id )
self._CacheTagsPopulate( self.modules_services.combined_file_service_id, tag_service_id, status_hook = status_hook )
finally:
job_key.DeleteVariable( 'popup_text_2' )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _RegenerateTagDisplayMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag display mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for tag_service_id in tag_service_ids:
# first off, we want to clear all the current siblings and parents so they will be reprocessed later
# we'll also have to catch up the tag definition cache to account for this
tag_ids_in_dispute = set()
tag_ids_in_dispute.update( self.modules_tag_siblings.GetAllTagIds( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id ) )
tag_ids_in_dispute.update( self.modules_tag_parents.GetAllTagIds( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id ) )
self.modules_tag_siblings.ClearActual( tag_service_id )
self.modules_tag_parents.ClearActual( tag_service_id )
if len( tag_ids_in_dispute ) > 0:
self._CacheTagsSyncTags( tag_service_id, tag_ids_in_dispute )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'generating specific display cache {}_{}'.format( file_service_id, tag_service_id )
def status_hook_1( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
status_hook_1( 'dropping old data' )
self.modules_mappings_cache_specific_display.Drop( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.Generate( file_service_id, tag_service_id, populate_from_storage = True, status_hook = status_hook_1 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'generating combined display cache {}'.format( tag_service_id )
def status_hook_2( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
status_hook_2( 'dropping old data' )
self._CacheCombinedFilesDisplayMappingsDrop( tag_service_id )
self._CacheCombinedFilesDisplayMappingsGenerate( tag_service_id, status_hook = status_hook_2 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RegenerateTagDisplayPendingMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag display pending mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'regenerating specific display cache pending {}_{}'.format( file_service_id, tag_service_id )
def status_hook_1( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self.modules_mappings_cache_specific_display.RegeneratePending( file_service_id, tag_service_id, status_hook = status_hook_1 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'regenerating combined display cache pending {}'.format( tag_service_id )
def status_hook_2( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self._CacheCombinedFilesDisplayMappingsRegeneratePending( tag_service_id, status_hook = status_hook_2 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RegenerateTagMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
tag_cache_file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
for tag_service_id in tag_service_ids:
self.modules_tag_siblings.ClearActual( tag_service_id )
self.modules_tag_parents.ClearActual( tag_service_id )
time.sleep( 0.01 )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'generating specific cache {}_{}'.format( file_service_id, tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
if file_service_id in tag_cache_file_service_ids:
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheSpecificMappingsDrop( file_service_id, tag_service_id )
self._CacheSpecificMappingsGenerate( file_service_id, tag_service_id )
self._cursor_transaction_wrapper.CommitAndBegin()
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'generating combined cache {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, tag_service_id )
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, tag_service_id )
self._CacheCombinedFilesMappingsDrop( tag_service_id )
self._CacheCombinedFilesMappingsGenerate( tag_service_id )
self._cursor_transaction_wrapper.CommitAndBegin()
if tag_service_key is None:
message = 'generating local tag cache'
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self.modules_tags_local_cache.Repopulate()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RegenerateTagParentsCache( self, only_these_service_ids = None ):
if only_these_service_ids is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = only_these_service_ids
# as siblings may have changed, parents may have as well
self.modules_tag_parents.Regen( tag_service_ids )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_tag_display_application' )
def _RegenerateTagPendingMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'regenerating tag pending mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'regenerating specific cache pending {}_{}'.format( file_service_id, tag_service_id )
def status_hook_1( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self._CacheSpecificMappingsRegeneratePending( file_service_id, tag_service_id, status_hook = status_hook_1 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'regenerating combined cache pending {}'.format( tag_service_id )
def status_hook_2( s: str ):
job_key.SetVariable( 'popup_text_2', s )
self._controller.frame_splash_status.SetSubtext( '{} - {}'.format( message, s ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
self._CacheCombinedFilesMappingsRegeneratePending( tag_service_id, status_hook = status_hook_2 )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _RelocateClientFiles( self, prefix, source, dest ):
if not os.path.exists( dest ):
raise Exception( 'Was commanded to move prefix "{}" from "{}" to "{}", but that destination does not exist!'.format( prefix, source, dest ) )
full_source = os.path.join( source, prefix )
full_dest = os.path.join( dest, prefix )
if os.path.exists( full_source ):
HydrusPaths.MergeTree( full_source, full_dest )
elif not os.path.exists( full_dest ):
HydrusPaths.MakeSureDirectoryExists( full_dest )
portable_dest = HydrusPaths.ConvertAbsPathToPortablePath( dest )
self._Execute( 'UPDATE client_files_locations SET location = ? WHERE prefix = ?;', ( portable_dest, prefix ) )
if os.path.exists( full_source ):
try: HydrusPaths.RecyclePath( full_source )
except: pass
def _RepairClientFiles( self, correct_rows ):
for ( prefix, correct_location ) in correct_rows:
full_abs_correct_location = os.path.join( correct_location, prefix )
HydrusPaths.MakeSureDirectoryExists( full_abs_correct_location )
portable_correct_location = HydrusPaths.ConvertAbsPathToPortablePath( correct_location )
self._Execute( 'UPDATE client_files_locations SET location = ? WHERE prefix = ?;', ( portable_correct_location, prefix ) )
def _RepairDB( self, version ):
# migrate most of this gubbins to the new modules system, and HydrusDB tbh!
self._controller.frame_splash_status.SetText( 'checking database' )
HydrusDB.HydrusDB._RepairDB( self, version )
self._weakref_media_result_cache = ClientMediaResultCache.MediaResultCache()
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
# caches
existing_cache_tables = self._STS( self._Execute( 'SELECT name FROM external_caches.sqlite_master WHERE type = ?;', ( 'table', ) ) )
mappings_cache_tables = set()
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if version >= 465:
mappings_cache_tables.update( ( name.split( '.' )[1] for name in ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id ) ) )
mappings_cache_tables.update( ( name.split( '.' )[1] for name in ClientDBMappingsCacheSpecificDisplay.GenerateSpecificDisplayMappingsCacheTableNames( file_service_id, tag_service_id ) ) )
we_did_a_full_regen = False
missing_main_tables = sorted( mappings_cache_tables.difference( existing_cache_tables ) )
if len( missing_main_tables ) > 0:
HydrusData.DebugPrint( 'The missing mapping cache tables were:' )
HydrusData.DebugPrint( os.linesep.join( missing_main_tables ) )
message = 'On boot, {} mapping caches tables were missing! This could be due to the entire \'caches\' database file being missing or due to some other problem. All of this data can be regenerated.'.format( len( missing_main_tables ) )
message += os.linesep * 2
message += 'If you wish, click ok on this message and the client will recreate and repopulate these tables with the correct data. This may take a few minutes. But if you want to solve this problem otherwise, kill the hydrus process now.'
message += os.linesep * 2
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
BlockingSafeShowMessage( message )
self._RegenerateTagMappingsCache()
we_did_a_full_regen = True
if not we_did_a_full_regen:
# autocomplete
( missing_storage_tag_count_service_pairs, missing_display_tag_count_service_pairs ) = self.modules_mappings_counts.GetMissingTagCountServicePairs()
# unfortunately, for now, due to display maintenance being tag service wide, I can't regen individual lads here
# maybe in future I can iterate all sibs/parents and just do it here and now with addimplication
missing_storage_tag_count_tag_service_ids = { tag_service_id for ( file_service_id, tag_service_id ) in missing_storage_tag_count_service_pairs }
missing_display_tag_count_tag_service_ids = { tag_service_id for ( file_service_id, tag_service_id ) in missing_display_tag_count_service_pairs }
# a storage regen will cover a display regen
missing_display_tag_count_tag_service_ids = missing_display_tag_count_tag_service_ids.difference( missing_storage_tag_count_tag_service_ids )
if len( missing_display_tag_count_tag_service_ids ) > 0:
missing_display_tag_count_tag_service_ids = sorted( missing_display_tag_count_tag_service_ids )
message = 'On boot, some important tag count tables for the display context were missing! You should have already had a notice about this. You may have had other problems earlier, but this particular problem is completely recoverable and results in no lost data. The relevant tables have been recreated and will now be repopulated. The services about to be worked on are:'
message += os.linesep * 2
message += os.linesep.join( ( str( t ) for t in missing_display_tag_count_tag_service_ids ) )
message += os.linesep * 2
message += 'If you want to go ahead, click ok on this message and the client will fill these tables with the correct data. It may take some time. If you want to solve this problem otherwise, kill the hydrus process now.'
BlockingSafeShowMessage( message )
for tag_service_id in missing_display_tag_count_tag_service_ids:
tag_service_key = self.modules_services.GetService( tag_service_id ).GetServiceKey()
self._RegenerateTagDisplayMappingsCache( tag_service_key = tag_service_key )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
if len( missing_storage_tag_count_tag_service_ids ) > 0:
missing_storage_tag_count_tag_service_ids = sorted( missing_storage_tag_count_tag_service_ids )
message = 'On boot, some important tag count tables for the storage context were missing! You should have already had a notice about this. You may have had other problems earlier, but this particular problem is completely recoverable and results in no lost data. The relevant tables have been recreated and will now be repopulated. The services about to be worked on are:'
message += os.linesep * 2
message += os.linesep.join( ( str( t ) for t in missing_storage_tag_count_tag_service_ids ) )
message += os.linesep * 2
message += 'If you want to go ahead, click ok on this message and the client will fill these tables with the correct data. It may take some time. If you want to solve this problem otherwise, kill the hydrus process now.'
BlockingSafeShowMessage( message )
for tag_service_id in missing_storage_tag_count_tag_service_ids:
tag_service_key = self.modules_services.GetService( tag_service_id ).GetServiceKey()
self._RegenerateTagMappingsCache( tag_service_key = tag_service_key )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
# tag search, this requires autocomplete and siblings/parents in place
missing_tag_search_service_pairs = self.modules_tag_search.GetMissingTagSearchServicePairs()
if len( missing_tag_search_service_pairs ) > 0:
missing_tag_search_service_pairs = sorted( missing_tag_search_service_pairs )
message = 'On boot, some important tag search tables were missing! You should have already had a notice about this. You may have had other problems earlier, but this particular problem is completely recoverable and results in no lost data. The relevant tables have been recreated and will now be repopulated. The service pairs about to be worked on are:'
message += os.linesep * 2
message += os.linesep.join( ( str( t ) for t in missing_tag_search_service_pairs ) )
message += os.linesep * 2
message += 'If you want to go ahead, click ok on this message and the client will fill these tables with the correct data. It may take some time. If you want to solve this problem otherwise, kill the hydrus process now.'
BlockingSafeShowMessage( message )
for ( file_service_id, tag_service_id ) in missing_tag_search_service_pairs:
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheTagsPopulate( file_service_id, tag_service_id )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
#
new_options = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_OPTIONS )
if new_options is None:
message = 'On boot, your main options object was missing!'
message += os.linesep * 2
message += 'If you wish, click ok on this message and the client will re-add fresh options with default values. But if you want to solve this problem otherwise, kill the hydrus process now.'
message += os.linesep * 2
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
BlockingSafeShowMessage( message )
new_options = ClientOptions.ClientOptions()
new_options.SetSimpleDownloaderFormulae( ClientDefaults.GetDefaultSimpleDownloaderFormulae() )
self.modules_serialisable.SetJSONDump( new_options )
# an explicit empty string so we don't linger on 'checking database' if the next stage lags a bit on its own update. no need to give anyone heart attacks
self._controller.frame_splash_status.SetText( '' )
def _RepairInvalidTags( self, job_key: typing.Optional[ ClientThreading.JobKey ] = None ):
invalid_tag_ids_and_tags = set()
BLOCK_SIZE = 1000
select_statement = 'SELECT tag_id FROM tags;'
bad_tag_count = 0
for ( group_of_tag_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, select_statement, BLOCK_SIZE ):
if job_key is not None:
if job_key.IsCancelled():
break
message = 'Scanning tags: {} - Bad Found: {}'.format( HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ), HydrusData.ToHumanInt( bad_tag_count ) )
job_key.SetVariable( 'popup_text_1', message )
for tag_id in group_of_tag_ids:
tag = self.modules_tags_local_cache.GetTag( tag_id )
try:
cleaned_tag = HydrusTags.CleanTag( tag )
HydrusTags.CheckTagNotEmpty( cleaned_tag )
except:
cleaned_tag = 'unrecoverable invalid tag'
if tag != cleaned_tag:
invalid_tag_ids_and_tags.add( ( tag_id, tag, cleaned_tag ) )
bad_tag_count += 1
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for ( i, ( tag_id, tag, cleaned_tag ) ) in enumerate( invalid_tag_ids_and_tags ):
if job_key is not None:
if job_key.IsCancelled():
break
message = 'Fixing bad tags: {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, bad_tag_count ) )
job_key.SetVariable( 'popup_text_1', message )
# now find an entirely new namespace_id, subtag_id pair for this tag
existing_tags = set()
potential_new_cleaned_tag = cleaned_tag
while self.modules_tags.TagExists( potential_new_cleaned_tag ):
existing_tags.add( potential_new_cleaned_tag )
potential_new_cleaned_tag = HydrusData.GetNonDupeName( cleaned_tag, existing_tags )
cleaned_tag = potential_new_cleaned_tag
( namespace, subtag ) = HydrusTags.SplitTag( cleaned_tag )
namespace_id = self.modules_tags.GetNamespaceId( namespace )
subtag_id = self.modules_tags.GetSubtagId( subtag )
self.modules_tags.UpdateTagId( tag_id, namespace_id, subtag_id )
self.modules_tags_local_cache.UpdateTagInCache( tag_id, cleaned_tag )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if self.modules_tag_search.HasTag( file_service_id, tag_service_id, tag_id ):
self.modules_tag_search.DeleteTags( file_service_id, tag_service_id, ( tag_id, ) )
self.modules_tag_search.AddTags( file_service_id, tag_service_id, ( tag_id, ) )
try:
HydrusData.Print( 'Invalid tag fixing: {} replaced with {}'.format( repr( tag ), repr( cleaned_tag ) ) )
except:
HydrusData.Print( 'Invalid tag fixing: Could not even print the bad tag to the log! It is now known as {}'.format( repr( cleaned_tag ) ) )
if job_key is not None:
if not job_key.IsCancelled():
if bad_tag_count == 0:
message = 'Invalid tag scanning: No bad tags found!'
else:
message = 'Invalid tag scanning: {} bad tags found and fixed! They have been written to the log.'.format( HydrusData.ToHumanInt( bad_tag_count ) )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
HydrusData.Print( message )
job_key.SetVariable( 'popup_text_1', message )
job_key.Finish()
def _RepopulateMappingsFromCache( self, tag_service_key = None, job_key = None ):
BLOCK_SIZE = 10000
num_rows_recovered = 0
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
for tag_service_id in tag_service_ids:
service = self.modules_services.GetService( tag_service_id )
name = service.GetName()
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( self.modules_services.combined_local_file_service_id, tag_service_id )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_local_file_service_id, HC.CONTENT_STATUS_CURRENT )
select_statement = 'SELECT hash_id FROM {};'.format( current_files_table_name )
for ( group_of_hash_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, select_statement, BLOCK_SIZE ):
if job_key is not None:
message = 'Doing "{}"\u2026: {}'.format( name, HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ) )
message += os.linesep * 2
message += 'Total rows recovered: {}'.format( HydrusData.ToHumanInt( num_rows_recovered ) )
job_key.SetVariable( 'popup_text_1', message )
if job_key.IsCancelled():
return
with self._MakeTemporaryIntegerTable( group_of_hash_ids, 'hash_id' ) as temp_table_name:
# temp hashes to mappings
insert_template = 'INSERT OR IGNORE INTO {} ( tag_id, hash_id ) SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'
self._Execute( insert_template.format( current_mappings_table_name, temp_table_name, cache_current_mappings_table_name ) )
num_rows_recovered += self._GetRowCount()
self._Execute( insert_template.format( deleted_mappings_table_name, temp_table_name, cache_deleted_mappings_table_name ) )
num_rows_recovered += self._GetRowCount()
self._Execute( insert_template.format( pending_mappings_table_name, temp_table_name, cache_pending_mappings_table_name ) )
num_rows_recovered += self._GetRowCount()
if job_key is not None:
job_key.SetVariable( 'popup_text_1', 'Done! Rows recovered: {}'.format( HydrusData.ToHumanInt( num_rows_recovered ) ) )
job_key.Finish()
def _RepopulateTagCacheMissingSubtags( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'repopulate tag fast search cache subtags' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
def status_hook( s ):
job_key.SetVariable( 'popup_text_2', s )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if job_key.IsCancelled():
break
message = 'repopulating specific cache {}_{}'.format( file_service_id, tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.RepopulateMissingSubtags( file_service_id, tag_service_id )
for tag_service_id in tag_service_ids:
if job_key.IsCancelled():
break
message = 'repopulating combined cache {}'.format( tag_service_id )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
time.sleep( 0.01 )
self.modules_tag_search.RepopulateMissingSubtags( self.modules_services.combined_file_service_id, tag_service_id )
finally:
job_key.DeleteVariable( 'popup_text_2' )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
def _RepopulateTagDisplayMappingsCache( self, tag_service_key = None ):
job_key = ClientThreading.JobKey( cancellable = True )
try:
job_key.SetStatusTitle( 'repopulating tag display mappings cache' )
self._controller.pub( 'modal_message', job_key )
if tag_service_key is None:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
tag_service_ids = ( self.modules_services.GetServiceId( tag_service_key ), )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for ( i, file_service_id ) in enumerate( file_service_ids ):
if job_key.IsCancelled():
break
table_name = ClientDBFilesStorage.GenerateFilesTableName( file_service_id, HC.CONTENT_STATUS_CURRENT )
for ( group_of_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, 'SELECT hash_id FROM {};'.format( table_name ), 1024 ):
message = 'repopulating {} {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, len( file_service_ids ) ), HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ) )
job_key.SetVariable( 'popup_text_1', message )
self._controller.frame_splash_status.SetSubtext( message )
with self._MakeTemporaryIntegerTable( group_of_ids, 'hash_id' ) as temp_hash_id_table_name:
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, group_of_ids, temp_hash_id_table_name )
self.modules_mappings_cache_specific_display.AddFiles( file_service_id, tag_service_id, group_of_ids, temp_hash_id_table_name )
job_key.SetVariable( 'popup_text_2', '' )
self._controller.frame_splash_status.SetSubtext( '' )
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 5 )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_force_refresh_tags_data' )
def _ReportOverupdatedDB( self, version ):
message = 'This client\'s database is version {}, but the software is version {}! This situation only sometimes works, and when it does not, it can break things! If you are not sure what is going on, or if you accidentally installed an older version of the software to a newer database, force-kill this client in Task Manager right now. Otherwise, ok this dialog box to continue.'.format( HydrusData.ToHumanInt( version ), HydrusData.ToHumanInt( HC.SOFTWARE_VERSION ) )
BlockingSafeShowMessage( message )
def _ReportUnderupdatedDB( self, version ):
message = 'This client\'s database is version {}, but the software is significantly later, {}! Trying to update many versions in one go can be dangerous due to bitrot. I suggest you try at most to only do 10 versions at once. If you want to try a big jump anyway, you should make sure you have a backup beforehand so you can roll back to it in case the update makes your db unbootable. If you would rather try smaller updates, or you do not have a backup, force-kill this client in Task Manager right now. Otherwise, ok this dialog box to continue.'.format( HydrusData.ToHumanInt( version ), HydrusData.ToHumanInt( HC.SOFTWARE_VERSION ) )
BlockingSafeShowMessage( message )
def _ResetRepository( self, service ):
( service_key, service_type, name, dictionary ) = service.ToTuple()
service_id = self.modules_services.GetServiceId( service_key )
prefix = 'resetting ' + name
job_key = ClientThreading.JobKey()
try:
job_key.SetVariable( 'popup_text_1', prefix + ': deleting service' )
self._controller.pub( 'modal_message', job_key )
self._DeleteService( service_id )
job_key.SetVariable( 'popup_text_1', prefix + ': recreating service' )
self._AddService( service_key, service_type, name, dictionary )
self._cursor_transaction_wrapper.pub_after_job( 'notify_account_sync_due' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_data' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_gui' )
job_key.SetVariable( 'popup_text_1', prefix + ': done!' )
finally:
job_key.Finish()
def _ResetRepositoryProcessing( self, service_key: bytes, content_types ):
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
service_type = service.GetServiceType()
prefix = 'resetting content'
job_key = ClientThreading.JobKey()
try:
service_info_types_to_delete = []
job_key.SetVariable( 'popup_text_1', '{}: calculating'.format( prefix ) )
self._controller.pub( 'modal_message', job_key )
# note that siblings/parents do not do a cachetags clear-regen because they only actually delete ideal, not actual
if HC.CONTENT_TYPE_FILES in content_types:
service_info_types_to_delete.extend( { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_VIEWABLE_FILES, HC.SERVICE_INFO_TOTAL_SIZE, HC.SERVICE_INFO_NUM_DELETED_FILES } )
self._Execute( 'DELETE FROM remote_thumbnails WHERE service_id = ?;', ( service_id, ) )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
self.modules_files_storage.ClearFilesTables( service_id, keep_pending = True )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
self._CacheSpecificMappingsClear( service_id, tag_service_id, keep_pending = True )
if service_type in HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES:
# not clear since siblings and parents can contribute
self.modules_tag_search.Drop( service_id, tag_service_id )
self.modules_tag_search.Generate( service_id, tag_service_id )
self._CacheTagsPopulate( service_id, tag_service_id )
if HC.CONTENT_TYPE_MAPPINGS in content_types:
service_info_types_to_delete.extend( { HC.SERVICE_INFO_NUM_FILES, HC.SERVICE_INFO_NUM_TAGS, HC.SERVICE_INFO_NUM_MAPPINGS, HC.SERVICE_INFO_NUM_DELETED_MAPPINGS } )
if service_type in HC.REAL_TAG_SERVICES:
self.modules_mappings_storage.ClearMappingsTables( service_id )
self._CacheCombinedFilesMappingsClear( service_id, keep_pending = True )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, service_id )
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, service_id )
self._CacheTagsPopulate( self.modules_services.combined_file_service_id, service_id )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
tag_cache_file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
for file_service_id in file_service_ids:
self._CacheSpecificMappingsClear( file_service_id, service_id, keep_pending = True )
if file_service_id in tag_cache_file_service_ids:
# not clear since siblings and parents can contribute
self.modules_tag_search.Drop( file_service_id, service_id )
self.modules_tag_search.Generate( file_service_id, service_id )
self._CacheTagsPopulate( file_service_id, service_id )
if HC.CONTENT_TYPE_TAG_PARENTS in content_types:
self._Execute( 'DELETE FROM tag_parents WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM tag_parent_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) )
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = ClientDBTagParents.GenerateTagParentsLookupCacheTableNames( service_id )
# do not delete from actual!
self._Execute( 'DELETE FROM {};'.format( cache_ideal_tag_parents_lookup_table_name ) )
if HC.CONTENT_TYPE_TAG_SIBLINGS in content_types:
self._Execute( 'DELETE FROM tag_siblings WHERE service_id = ?;', ( service_id, ) )
self._Execute( 'DELETE FROM tag_sibling_petitions WHERE service_id = ? AND status = ?;', ( service_id, HC.CONTENT_STATUS_PETITIONED ) )
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableNames( service_id )
self._Execute( 'DELETE FROM {};'.format( cache_ideal_tag_siblings_lookup_table_name ) )
#
job_key.SetVariable( 'popup_text_1', '{}: recalculating'.format( prefix ) )
if HC.CONTENT_TYPE_TAG_PARENTS in content_types or HC.CONTENT_TYPE_TAG_SIBLINGS in content_types:
interested_service_ids = set( self.modules_tag_display.GetInterestedServiceIds( service_id ) )
if len( interested_service_ids ) > 0:
self.modules_tag_display.RegenerateTagSiblingsAndParentsCache( only_these_service_ids = interested_service_ids )
self._ExecuteMany( 'DELETE FROM service_info WHERE service_id = ? AND info_type = ?;', ( ( service_id, info_type ) for info_type in service_info_types_to_delete ) )
self.modules_repositories.ReprocessRepository( service_key, content_types )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_data' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_gui' )
job_key.SetVariable( 'popup_text_1', prefix + ': done!' )
finally:
job_key.Finish()
def _SaveDirtyServices( self, dirty_services ):
# if allowed to save objects
self._SaveServices( dirty_services )
def _SaveServices( self, services ):
for service in services:
self.modules_services.UpdateService( service )
def _SaveOptions( self, options ):
try:
self._Execute( 'UPDATE options SET options = ?;', ( options, ) )
except:
HydrusData.Print( 'Failed options save dump:' )
HydrusData.Print( options )
raise
self._cursor_transaction_wrapper.pub_after_job( 'reset_thumbnail_cache' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_options' )
def _SetIdealClientFilesLocations( self, locations_to_ideal_weights, ideal_thumbnail_override_location ):
if len( locations_to_ideal_weights ) == 0:
raise Exception( 'No locations passed in ideal locations list!' )
self._Execute( 'DELETE FROM ideal_client_files_locations;' )
for ( abs_location, weight ) in locations_to_ideal_weights.items():
portable_location = HydrusPaths.ConvertAbsPathToPortablePath( abs_location )
self._Execute( 'INSERT INTO ideal_client_files_locations ( location, weight ) VALUES ( ?, ? );', ( portable_location, weight ) )
self._Execute( 'DELETE FROM ideal_thumbnail_override_location;' )
if ideal_thumbnail_override_location is not None:
portable_ideal_thumbnail_override_location = HydrusPaths.ConvertAbsPathToPortablePath( ideal_thumbnail_override_location )
self._Execute( 'INSERT INTO ideal_thumbnail_override_location ( location ) VALUES ( ? );', ( portable_ideal_thumbnail_override_location, ) )
def _SetPassword( self, password ):
if password is not None:
password_bytes = bytes( password, 'utf-8' )
password = hashlib.sha256( password_bytes ).digest()
self._controller.options[ 'password' ] = password
self._SaveOptions( self._controller.options )
def _SetServiceFilename( self, service_id, hash_id, filename ):
self._Execute( 'REPLACE INTO service_filenames ( service_id, hash_id, filename ) VALUES ( ?, ?, ? );', ( service_id, hash_id, filename ) )
def _SetServiceDirectory( self, service_id, hash_ids, dirname, note ):
directory_id = self.modules_texts.GetTextId( dirname )
self._Execute( 'DELETE FROM service_directories WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) )
self._Execute( 'DELETE FROM service_directory_file_map WHERE service_id = ? AND directory_id = ?;', ( service_id, directory_id ) )
num_files = len( hash_ids )
result = self._Execute( 'SELECT SUM( size ) FROM files_info WHERE hash_id IN ' + HydrusData.SplayListForDB( hash_ids ) + ';' ).fetchone()
if result is None:
total_size = 0
else:
( total_size, ) = result
self._Execute( 'INSERT INTO service_directories ( service_id, directory_id, num_files, total_size, note ) VALUES ( ?, ?, ?, ?, ? );', ( service_id, directory_id, num_files, total_size, note ) )
self._ExecuteMany( 'INSERT INTO service_directory_file_map ( service_id, directory_id, hash_id ) VALUES ( ?, ?, ? );', ( ( service_id, directory_id, hash_id ) for hash_id in hash_ids ) )
def _TryToSortHashIds( self, location_context: ClientLocation.LocationContext, hash_ids, sort_by: ClientMedia.MediaSort ):
did_sort = False
( sort_metadata, sort_data ) = sort_by.sort_type
sort_order = sort_by.sort_order
query = None
if sort_metadata == 'system':
simple_sorts = []
simple_sorts.append( CC.SORT_FILES_BY_IMPORT_TIME )
simple_sorts.append( CC.SORT_FILES_BY_FILESIZE )
simple_sorts.append( CC.SORT_FILES_BY_DURATION )
simple_sorts.append( CC.SORT_FILES_BY_FRAMERATE )
simple_sorts.append( CC.SORT_FILES_BY_NUM_FRAMES )
simple_sorts.append( CC.SORT_FILES_BY_WIDTH )
simple_sorts.append( CC.SORT_FILES_BY_HEIGHT )
simple_sorts.append( CC.SORT_FILES_BY_RATIO )
simple_sorts.append( CC.SORT_FILES_BY_NUM_PIXELS )
simple_sorts.append( CC.SORT_FILES_BY_MEDIA_VIEWS )
simple_sorts.append( CC.SORT_FILES_BY_MEDIA_VIEWTIME )
simple_sorts.append( CC.SORT_FILES_BY_APPROX_BITRATE )
simple_sorts.append( CC.SORT_FILES_BY_FILE_MODIFIED_TIMESTAMP )
simple_sorts.append( CC.SORT_FILES_BY_LAST_VIEWED_TIME )
simple_sorts.append( CC.SORT_FILES_BY_ARCHIVED_TIMESTAMP )
if sort_data in simple_sorts:
if sort_data == CC.SORT_FILES_BY_IMPORT_TIME:
if location_context.IsOneDomain() and location_context.IncludesCurrent():
file_service_key = list( location_context.current_service_keys )[0]
else:
file_service_key = CC.COMBINED_LOCAL_FILE_SERVICE_KEY
file_service_id = self.modules_services.GetServiceId( file_service_key )
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( file_service_id, HC.CONTENT_STATUS_CURRENT )
query = 'SELECT hash_id, timestamp FROM {temp_table} CROSS JOIN {current_files_table} USING ( hash_id );'.format( temp_table = '{temp_table}', current_files_table = current_files_table_name )
elif sort_data == CC.SORT_FILES_BY_FILESIZE:
query = 'SELECT hash_id, size FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_DURATION:
query = 'SELECT hash_id, duration FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_FRAMERATE:
query = 'SELECT hash_id, num_frames, duration FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_NUM_FRAMES:
query = 'SELECT hash_id, num_frames FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_WIDTH:
query = 'SELECT hash_id, width FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_HEIGHT:
query = 'SELECT hash_id, height FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_RATIO:
query = 'SELECT hash_id, width, height FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_NUM_PIXELS:
query = 'SELECT hash_id, width, height FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_MEDIA_VIEWS:
query = 'SELECT hash_id, views FROM {temp_table} CROSS JOIN file_viewing_stats USING ( hash_id ) WHERE canvas_type = {canvas_type};'.format( temp_table = '{temp_table}', canvas_type = CC.CANVAS_MEDIA_VIEWER )
elif sort_data == CC.SORT_FILES_BY_MEDIA_VIEWTIME:
query = 'SELECT hash_id, viewtime FROM {temp_table} CROSS JOIN file_viewing_stats USING ( hash_id ) WHERE canvas_type = {canvas_type};'.format( temp_table = '{temp_table}', canvas_type = CC.CANVAS_MEDIA_VIEWER )
elif sort_data == CC.SORT_FILES_BY_APPROX_BITRATE:
query = 'SELECT hash_id, duration, num_frames, size, width, height FROM {temp_table} CROSS JOIN files_info USING ( hash_id );'
elif sort_data == CC.SORT_FILES_BY_FILE_MODIFIED_TIMESTAMP:
q1 = 'SELECT hash_id, file_modified_timestamp FROM {temp_table} CROSS JOIN file_modified_timestamps USING ( hash_id )'
q2 = 'SELECT hash_id, file_modified_timestamp FROM {temp_table} CROSS JOIN file_domain_modified_timestamps USING ( hash_id )'
query = 'SELECT hash_id, MIN( file_modified_timestamp ) FROM ( {} UNION {} ) GROUP BY hash_id;'.format( q1, q2 )
elif sort_data == CC.SORT_FILES_BY_LAST_VIEWED_TIME:
query = 'SELECT hash_id, last_viewed_timestamp FROM {temp_table} CROSS JOIN file_viewing_stats USING ( hash_id ) WHERE canvas_type = {canvas_type};'.format( temp_table = '{temp_table}', canvas_type = CC.CANVAS_MEDIA_VIEWER )
elif sort_data == CC.SORT_FILES_BY_ARCHIVED_TIMESTAMP:
query = 'SELECT hash_id, archived_timestamp FROM {temp_table} CROSS JOIN archive_timestamps USING ( hash_id );'
if sort_data == CC.SORT_FILES_BY_RATIO:
def key( row ):
width = row[1]
height = row[2]
if width is None or height is None:
return -1
else:
return width / height
elif sort_data == CC.SORT_FILES_BY_FRAMERATE:
def key( row ):
num_frames = row[1]
duration = row[2]
if num_frames is None or duration is None or num_frames == 0 or duration == 0:
return -1
else:
return num_frames / duration
elif sort_data == CC.SORT_FILES_BY_NUM_PIXELS:
def key( row ):
width = row[1]
height = row[2]
if width is None or height is None or width == 0 or height == 0:
return -1
else:
return width * height
elif sort_data == CC.SORT_FILES_BY_APPROX_BITRATE:
def key( row ):
duration = row[1]
num_frames = row[2]
size = row[3]
width = row[4]
height = row[5]
if duration is None or duration == 0:
if size is None or size == 0:
duration_bitrate = -1
frame_bitrate = -1
else:
duration_bitrate = 0
if width is None or height is None:
frame_bitrate = 0
else:
if size is None or size == 0 or width is None or width == 0 or height is None or height == 0:
frame_bitrate = -1
else:
num_pixels = width * height
frame_bitrate = size / num_pixels
else:
if size is None or size == 0:
duration_bitrate = -1
frame_bitrate = -1
else:
duration_bitrate = size / duration
if num_frames is None or num_frames == 0:
frame_bitrate = 0
else:
frame_bitrate = duration_bitrate / num_frames
return ( duration_bitrate, frame_bitrate )
else:
key = lambda row: -1 if row[1] is None else row[1]
reverse = sort_order == CC.SORT_DESC
elif sort_data == CC.SORT_FILES_BY_RANDOM:
hash_ids = list( hash_ids )
random.shuffle( hash_ids )
did_sort = True
if query is not None:
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
hash_ids_and_other_data = sorted( self._Execute( query.format( temp_table = temp_hash_ids_table_name ) ), key = key, reverse = reverse )
original_hash_ids = set( hash_ids )
hash_ids = [ row[0] for row in hash_ids_and_other_data ]
# some stuff like media views won't have rows
missing_hash_ids = original_hash_ids.difference( hash_ids )
hash_ids.extend( missing_hash_ids )
did_sort = True
return ( did_sort, hash_ids )
def _UndeleteFiles( self, service_id, hash_ids ):
rows = self.modules_files_storage.GetUndeleteRows( service_id, hash_ids )
self._AddFiles( service_id, rows )
def _UnloadModules( self ):
del self.modules_hashes
del self.modules_tags
del self.modules_urls
del self.modules_texts
self._modules = []
def _UpdateDB( self, version ):
self._controller.frame_splash_status.SetText( 'updating db to v' + str( version + 1 ) )
if version == 419:
self._controller.frame_splash_status.SetSubtext( 'creating a couple of indices' )
self._CreateIndex( 'tag_parents', [ 'service_id', 'parent_tag_id' ] )
self._CreateIndex( 'tag_parent_petitions', [ 'service_id', 'parent_tag_id' ] )
self._CreateIndex( 'tag_siblings', [ 'service_id', 'good_tag_id' ] )
self._CreateIndex( 'tag_sibling_petitions', [ 'service_id', 'good_tag_id' ] )
self.modules_db_maintenance.AnalyzeTable( 'tag_parents' )
self.modules_db_maintenance.AnalyzeTable( 'tag_parent_petitions' )
self.modules_db_maintenance.AnalyzeTable( 'tag_siblings' )
self.modules_db_maintenance.AnalyzeTable( 'tag_sibling_petitions' )
self._controller.frame_splash_status.SetSubtext( 'regenerating ideal siblings and parents' )
try:
self.modules_tag_display.RegenerateTagSiblingsAndParentsCache()
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to regen sibling lookups failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 423:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'e621 file page parser', ) )
domain_manager.OverwriteDefaultURLClasses( ( 'nitter media timeline', 'nitter timeline' ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
#
result_master = self._Execute( 'SELECT 1 FROM external_master.sqlite_master WHERE name = ?;', ( 'subtags_fts4', ) ).fetchone()
result_caches = self._Execute( 'SELECT 1 FROM external_caches.sqlite_master WHERE name = ?;', ( 'subtags_fts4', ) ).fetchone()
if result_master is not None or result_caches is not None:
try:
self._controller.frame_splash_status.SetText( 'dropping old cache - subtags fts4' )
self._Execute( 'DROP TABLE IF EXISTS subtags_fts4;' )
self._controller.frame_splash_status.SetText( 'dropping old cache - subtags searchable map' )
self._Execute( 'DROP TABLE IF EXISTS subtags_searchable_map;' )
self._controller.frame_splash_status.SetText( 'dropping old cache - integer subtags' )
self._Execute( 'DROP TABLE IF EXISTS integer_subtags;' )
self.modules_services.combined_file_service_id = self.modules_services.GetServiceId( CC.COMBINED_FILE_SERVICE_KEY )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
self._controller.frame_splash_status.SetText( 'creating new specific cache - {} {}'.format( file_service_id, tag_service_id ) )
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheTagsPopulate( file_service_id, tag_service_id )
for tag_service_id in tag_service_ids:
self._controller.frame_splash_status.SetText( 'creating new combined files cache - {}'.format( tag_service_id ) )
self.modules_tag_search.Drop( self.modules_services.combined_file_service_id, tag_service_id )
self.modules_tag_search.Generate( self.modules_services.combined_file_service_id, tag_service_id )
self._CacheTagsPopulate( self.modules_services.combined_file_service_id, tag_service_id )
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'The v424 cache update failed to work! The error has been printed to the log. Please rollback to 423 and let hydev know the details.' )
if version == 424:
session_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER )
if session_manager is None:
try:
legacy_session_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER_LEGACY )
if legacy_session_manager is None:
session_manager = ClientNetworkingSessions.NetworkSessionManager()
session_manager.SetDirty()
message = 'Hey, when updating your session manager to the new object, it seems the original was missing. I have created an empty new one, but it will have no cookies, so you will have to re-login as needed.'
self.pub_initial_message( message )
else:
session_manager = ClientNetworkingSessionsLegacy.ConvertLegacyToNewSessions( legacy_session_manager )
self.modules_serialisable.DeleteJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER_LEGACY )
self.modules_serialisable.SetJSONDump( session_manager )
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'The v425 session update failed to work! The error has been printed to the log. Please rollback to 424 and let hydev know the details.' )
bandwidth_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER )
if bandwidth_manager is None:
try:
legacy_bandwidth_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER_LEGACY )
if legacy_bandwidth_manager is None:
bandwidth_manager = ClientNetworkingBandwidth.NetworkBandwidthManager()
ClientDefaults.SetDefaultBandwidthManagerRules( bandwidth_manager )
bandwidth_manager.SetDirty()
message = 'Hey, when updating your bandwidth manager to the new object, it seems the original was missing. I have created an empty new one, but it will have no bandwidth record or saved rules.'
self.pub_initial_message( message )
else:
bandwidth_manager = ClientNetworkingBandwidthLegacy.ConvertLegacyToNewBandwidth( legacy_bandwidth_manager )
self.modules_serialisable.DeleteJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER_LEGACY )
self.modules_serialisable.SetJSONDump( bandwidth_manager )
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'The v425 bandwidth update failed to work! The error has been printed to the log. Please rollback to 424 and let hydev know the details.' )
if version == 425:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'gelbooru 0.2.x gallery page parser', 'e621 file page parser', 'gelbooru 0.2.5 file page parser' ) )
domain_manager.OverwriteDefaultURLClasses( ( 'gelbooru gallery pool page', ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
message = 'You updated from an older version, so some automatic maintenance could not be run. Please run _database->regenerate->tag text search cache (subtags repopulation)_ for all services when you have some time.'
self.pub_initial_message( message )
if version == 426:
try:
self._RegenerateTagDisplayPendingMappingsCache()
except Exception as e:
HydrusData.PrintException( e )
message = 'The v427 pending tags regen routine failed! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
from hydrus.client.gui import ClientGUIShortcuts
try:
shortcut_sets = ClientDefaults.GetDefaultShortcuts()
try:
tags_autocomplete = [ shortcut_set for shortcut_set in shortcut_sets if shortcut_set.GetName() == 'tags_autocomplete' ][0]
except Exception as e:
tags_autocomplete = ClientGUIShortcuts.ShortcutSet( 'tags_autocomplete' )
main_gui = self.modules_serialisable.GetJSONDumpNamed( HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUT_SET, dump_name = 'main_gui' )
shortcuts = main_gui.GetShortcuts( CAC.SIMPLE_SYNCHRONISED_WAIT_SWITCH )
for shortcut in shortcuts:
tags_autocomplete.SetCommand( shortcut, CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_SYNCHRONISED_WAIT_SWITCH ) )
main_gui.DeleteShortcut( shortcut )
self.modules_serialisable.SetJSONDump( main_gui )
self.modules_serialisable.SetJSONDump( tags_autocomplete )
except Exception as e:
HydrusData.PrintException( e )
message = 'The v427 shortcut migrate failed! This is not super important, but hydev would be interested in seeing the error that was printed to the log. Check your \'main gui\' shortcuts if you want to set the migrated commands like \'force autocomplete search\'. I will now try to save an empty tag autocomplete shortcut set.'
self.pub_initial_message( message )
tags_autocomplete = ClientGUIShortcuts.ShortcutSet( 'tags_autocomplete' )
self.modules_serialisable.SetJSONDump( tags_autocomplete )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.DissolveParserLink( 'gelbooru gallery favorites page', 'gelbooru 0.2.5 file page parser' )
domain_manager.DissolveParserLink( 'gelbooru gallery page', 'gelbooru 0.2.5 file page parser' )
domain_manager.DissolveParserLink( 'gelbooru gallery pool page', 'gelbooru 0.2.5 file page parser' )
domain_manager.DissolveParserLink( 'gelbooru file page', 'gelbooru 0.2.x gallery page parser' )
#
domain_manager.OverwriteDefaultParsers( ( 'gelbooru 0.2.5 file page parser', ) )
#
domain_manager.OverwriteDefaultURLClasses( ( '420chan thread new format', ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 427:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultGUGs( [
'nitter (.eu mirror) media lookup',
'nitter (.eu mirror) retweets lookup',
'nitter (nixnet mirror) media lookup',
'nitter (nixnet mirror) retweets lookup'
] )
#
domain_manager.OverwriteDefaultURLClasses( [
'nitter (.eu mirror) media timeline',
'nitter (.eu mirror) timeline',
'nitter (.eu mirror) tweet media',
'nitter (.eu mirror) tweet',
'nitter (nixnet mirror) media timeline',
'nitter (nixnet mirror) timeline',
'nitter (nixnet mirror) tweet media',
'nitter (nixnet mirror) tweet'
] )
#
domain_manager.OverwriteDefaultParsers( [
'nitter media parser',
'nitter retweet parser',
'nitter tweet parser',
'nitter tweet parser (video from koto.reisen)'
] )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update nitter mirrors failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 428:
try:
self.modules_hashes_local_cache.CreateInitialTables()
self.modules_hashes_local_cache.CreateInitialIndices()
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'Could not create the new local hashes cache! The error has been printed to the log, please let hydev know!' )
# took out local hash regen here due to later file service splitting, which regens local hash cache anyway
if version == 429:
try:
tag_service_ids = set( self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
file_service_ids.add( self.modules_services.combined_file_service_id )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
subtags_searchable_map_table_name = self.modules_tag_search.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( subtag_id INTEGER PRIMARY KEY, searchable_subtag_id INTEGER );'.format( subtags_searchable_map_table_name ) )
self._CreateIndex( subtags_searchable_map_table_name, [ 'searchable_subtag_id' ] )
self._RegenerateTagCacheSearchableSubtagMaps()
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'The v430 subtag searchable map generation routine failed! The error has been printed to the log, please let hydev know!' )
if version == 430:
try:
# due to a bug in over-eager deletion from the tag definition cache, we'll need to resync chained tag ids
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
message = 'fixing up some desynchronised tag definitions: {}'.format( tag_service_id )
self._controller.frame_splash_status.SetSubtext( message )
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableNames( tag_service_id )
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = ClientDBTagParents.GenerateTagParentsLookupCacheTableNames( tag_service_id )
tag_ids_in_dispute = set()
tag_ids_in_dispute.update( self._STS( self._Execute( 'SELECT DISTINCT bad_tag_id FROM {};'.format( cache_actual_tag_siblings_lookup_table_name ) ) ) )
tag_ids_in_dispute.update( self._STS( self._Execute( 'SELECT ideal_tag_id FROM {};'.format( cache_actual_tag_siblings_lookup_table_name ) ) ) )
tag_ids_in_dispute.update( self._STS( self._Execute( 'SELECT DISTINCT child_tag_id FROM {};'.format( cache_actual_tag_parents_lookup_table_name ) ) ) )
tag_ids_in_dispute.update( self._STS( self._Execute( 'SELECT DISTINCT ancestor_tag_id FROM {};'.format( cache_actual_tag_parents_lookup_table_name ) ) ) )
if len( tag_ids_in_dispute ) > 0:
self._CacheTagsSyncTags( tag_service_id, tag_ids_in_dispute )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to resync some tag definitions failed! Please let hydrus dev know!'
self.pub_initial_message( message )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( [
'8chan.moe thread api parser',
'e621 file page parser'
] )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 431:
try:
new_options = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_OPTIONS )
old_options = self._GetOptions()
SORT_BY_LEXICOGRAPHIC_ASC = 8
SORT_BY_LEXICOGRAPHIC_DESC = 9
SORT_BY_INCIDENCE_ASC = 10
SORT_BY_INCIDENCE_DESC = 11
SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC = 12
SORT_BY_LEXICOGRAPHIC_NAMESPACE_DESC = 13
SORT_BY_INCIDENCE_NAMESPACE_ASC = 14
SORT_BY_INCIDENCE_NAMESPACE_DESC = 15
SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_ASC = 16
SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_DESC = 17
old_default_tag_sort = old_options[ 'default_tag_sort' ]
from hydrus.client.metadata import ClientTagSorting
sort_type = ClientTagSorting.SORT_BY_HUMAN_TAG
if old_default_tag_sort in ( SORT_BY_LEXICOGRAPHIC_ASC, SORT_BY_LEXICOGRAPHIC_DESC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC ):
sort_type = ClientTagSorting.SORT_BY_HUMAN_TAG
elif old_default_tag_sort in ( SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_DESC ):
sort_type = ClientTagSorting.SORT_BY_HUMAN_SUBTAG
elif old_default_tag_sort in ( SORT_BY_INCIDENCE_ASC, SORT_BY_INCIDENCE_DESC, SORT_BY_INCIDENCE_NAMESPACE_ASC, SORT_BY_INCIDENCE_NAMESPACE_DESC ):
sort_type = ClientTagSorting.SORT_BY_COUNT
if old_default_tag_sort in ( SORT_BY_INCIDENCE_ASC, SORT_BY_INCIDENCE_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_ASC, SORT_BY_LEXICOGRAPHIC_IGNORE_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC ):
sort_order = CC.SORT_ASC
else:
sort_order = CC.SORT_DESC
use_siblings = True
if old_default_tag_sort in ( SORT_BY_INCIDENCE_NAMESPACE_ASC, SORT_BY_INCIDENCE_NAMESPACE_DESC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC, SORT_BY_LEXICOGRAPHIC_NAMESPACE_DESC ):
group_by = ClientTagSorting.GROUP_BY_NAMESPACE
else:
group_by = ClientTagSorting.GROUP_BY_NOTHING
tag_sort = ClientTagSorting.TagSort(
sort_type = sort_type,
sort_order = sort_order,
use_siblings = use_siblings,
group_by = group_by
)
new_options.SetDefaultTagSort( tag_sort )
self.modules_serialisable.SetJSONDump( new_options )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to convert your old default tag sort to the new format failed! Please set it again in the options.'
self.pub_initial_message( message )
if version == 432:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultGUGs( [
'twitter syndication profile lookup (limited) (with replies)',
'twitter syndication profile lookup (limited)'
] )
#
domain_manager.OverwriteDefaultURLClasses( [
'twitter syndication api profile',
'twitter syndication api tweet',
'twitter tweet'
] )
#
domain_manager.OverwriteDefaultParsers( [
'twitter syndication api profile parser',
'twitter syndication api tweet parser'
] )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to add the twitter downloader failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 435:
try:
self._RegenerateTagPendingMappingsCache()
types_to_delete = (
HC.SERVICE_INFO_NUM_PENDING_MAPPINGS,
HC.SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS,
HC.SERVICE_INFO_NUM_PENDING_TAG_PARENTS,
HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS,
HC.SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS,
HC.SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS,
HC.SERVICE_INFO_NUM_PENDING_FILES,
HC.SERVICE_INFO_NUM_PETITIONED_FILES
)
self._DeleteServiceInfo( types_to_delete = types_to_delete )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to regenerate the pending tag cache failed! This is not a big deal, but you might still have a bad pending count for your pending menu. Error information has been written to the log. Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 436:
result = self._Execute( 'SELECT sql FROM sqlite_master WHERE name = ?;', ( 'deleted_files', ) ).fetchone()
if result is None:
raise Exception( 'No deleted_files table!!!' )
( s, ) = result
if 'timestamp' not in s:
self._Execute( 'ALTER TABLE deleted_files ADD COLUMN timestamp INTEGER;' )
self._Execute( 'ALTER TABLE deleted_files ADD COLUMN original_timestamp INTEGER;' )
self._Execute( 'UPDATE deleted_files SET timestamp = ?, original_timestamp = ?;', ( None, None ) )
my_files_service_id = self.modules_services.GetServiceId( CC.LOCAL_FILE_SERVICE_KEY )
self._Execute( 'INSERT OR IGNORE INTO deleted_files ( service_id, hash_id, timestamp, original_timestamp ) SELECT ?, hash_id, timestamp, original_timestamp FROM deleted_files WHERE service_id = ?;', ( my_files_service_id, self.modules_services.combined_local_file_service_id ) )
self._Execute( 'INSERT OR IGNORE INTO deleted_files ( service_id, hash_id, timestamp, original_timestamp ) SELECT ?, hash_id, ?, timestamp FROM current_files WHERE service_id = ?;', ( my_files_service_id, None, self.modules_services.trash_service_id ) )
self._CreateIndex( 'deleted_files', [ 'timestamp' ] )
self._CreateIndex( 'deleted_files', [ 'original_timestamp' ] )
self._Execute( 'DELETE FROM service_info WHERE info_type = ?;', ( HC.SERVICE_INFO_NUM_DELETED_FILES, ) )
self.modules_db_maintenance.AnalyzeTable( 'deleted_files' )
if version == 438:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultURLClasses( ( 'imgur single media file url', ) )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some url classes failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 440:
try:
old_options = self._GetOptions()
if 'sort_by' in old_options:
old_sort_by = old_options[ 'sort_by' ]
new_options = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_OPTIONS )
default_namespace_sorts = [ ClientMedia.MediaSort( sort_type = ( 'namespaces', ( namespaces, ClientTags.TAG_DISPLAY_ACTUAL ) ) ) for ( gumpf, namespaces ) in old_sort_by ]
new_options.SetDefaultNamespaceSorts( default_namespace_sorts )
self.modules_serialisable.SetJSONDump( new_options )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to migrate the old default namespace sorts failed! Please let hydrus dev know!'
self.pub_initial_message( message )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultURLClasses( ( 'pixiv artist page (new format)', ) )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some url classes failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 441:
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'json_dumps_hashed', ) ).fetchone()
if result is None:
self._controller.frame_splash_status.SetSubtext( 'doing pre-update free space check' )
legacy_dump_type = HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_LEGACY
result = self._Execute( 'SELECT SUM( LENGTH( dump ) ) FROM json_dumps_named WHERE dump_type = ?;', ( legacy_dump_type, ) ).fetchone()
if result is None or result[0] is None:
raise Exception( 'Hey, for the v442 update step, I am supposed to be converting your sessions to a new object, but it did not seem like there were any! I am not sure what is going on, so the update will now be abandoned. Please roll back to v441 and let hydev know!' )
( space_needed, ) = result
space_needed /= 2 # most sessions will have backups and shared pages will save space in the end
try:
HydrusDBBase.CheckHasSpaceForDBTransaction( self._db_dir, space_needed )
except Exception as e:
message = 'Hey, for the v442 update step, I am supposed to be converting your sessions to a new object, but there was a problem. It looks like you have very large sessions, and I do not think you have enough free disk space to perform the conversion safely. If you OK this dialog, it will be attempted anyway, but be warned: you may run out of space mid-update and then have serious problems. I recommend you kill the hydrus process NOW and then free up some space before trying again. Please check the full error:'
message += os.linesep * 2
message += str( e )
BlockingSafeShowMessage( message )
one_worked_ok = False
self._Execute( 'CREATE TABLE IF NOT EXISTS json_dumps_hashed ( hash BLOB_BYTES PRIMARY KEY, dump_type INTEGER, version INTEGER, dump BLOB_BYTES );' )
names_and_timestamps = self._Execute( 'SELECT dump_name, timestamp FROM json_dumps_named WHERE dump_type = ?;', ( legacy_dump_type, ) ).fetchall()
from hydrus.client.gui.pages import ClientGUISessionLegacy
import json
for ( i, ( name, timestamp ) ) in enumerate( names_and_timestamps ):
self._controller.frame_splash_status.SetSubtext( 'converting "{}" "{}"\u2026'.format( name, HydrusData.ConvertTimestampToPrettyTime( timestamp ) ) )
( dump_version, dump ) = self._Execute( 'SELECT version, dump FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? AND timestamp = ?;', ( legacy_dump_type, name, timestamp ) ).fetchone()
try:
if isinstance( dump, bytes ):
dump = str( dump, 'utf-8' )
serialisable_info = json.loads( dump )
legacy_session = HydrusSerialisable.CreateFromSerialisableTuple( ( legacy_dump_type, name, dump_version, serialisable_info ) )
except Exception as e:
HydrusData.PrintException( e, do_wait = False )
try:
timestamp_string = time.strftime( '%Y-%m-%d %H-%M-%S' )
filename = '({}, {}) at {}.json'.format( name, timestamp, timestamp_string )
path = os.path.join( self._db_dir, filename )
with open( path, 'wb' ) as f:
if isinstance( dump, str ):
dump = bytes( dump, 'utf-8', errors = 'replace' )
f.write( dump )
except Exception as e:
pass
message = 'When updating sessions, "{}" at "{}" was non-loadable/convertable! I tried to save a backup of the object to your database directory.'.format( name, HydrusData.ConvertTimestampToPrettyTime( timestamp ) )
HydrusData.Print( message )
self.pub_initial_message( message )
continue
session = ClientGUISessionLegacy.ConvertLegacyToNew( legacy_session )
self.modules_serialisable.SetJSONDump( session, force_timestamp = timestamp )
self._Execute( 'DELETE FROM json_dumps_named WHERE dump_type = ? AND dump_name = ? AND timestamp = ?;', ( legacy_dump_type, name, timestamp ) )
one_worked_ok = True
if not one_worked_ok:
raise Exception( 'When trying to update your sessions to the new format, none of them converted correctly! Rather than send you into an empty and potentially non-functional client, the update is now being abandoned. Please roll back to v441 and let hydev know!' )
self._Execute( 'DELETE FROM json_dumps_named WHERE dump_type = ?;', ( legacy_dump_type, ) )
self._controller.frame_splash_status.SetSubtext( 'session converting finished' )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'yande.re post page parser', 'moebooru file page parser' ) )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some url classes failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 442:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteParserLink( 'yande.re file page', 'yande.re post page parser' )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some url classes failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 446:
result = self._Execute( 'SELECT 1 FROM json_dumps_named WHERE dump_type = ? AND dump_name = ?;', ( 32, 'gelbooru md5' ) ).fetchone()
if result is not None:
try:
self._Execute( 'DELETE FROM json_dumps_named WHERE dump_type = ? AND dump_name = ?;', ( 32, 'gelbooru md5' ) )
script_info = ( 32, 'gelbooru md5', 2, HydrusData.GetNow(), '''["http://gelbooru.com/index.php", 0, 1, [55, 1, [[[4, "hex"]], "some hash bytes"]], "md5", {"s": "list", "page": "post"}, [[30, 6, ["we got sent back to main gallery page -- title test", 8, [27, 7, [[26, 1, [[62, 2, [0, "head", {}, 0, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "title", {}, 0, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], [true, [51, 1, [2, "Image List", null, null, "Image List"]]]]], [30, 6, ["", 0, [27, 7, [[26, 1, [[62, 2, [0, "li", {"class": "tag-type-general"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, 1, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], ""]], [30, 6, ["", 0, [27, 7, [[26, 1, [[62, 2, [0, "li", {"class": "tag-type-copyright"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, 1, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], "series"]], [30, 6, ["", 0, [27, 7, [[26, 1, [[62, 2, [0, "li", {"class": "tag-type-artist"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, 1, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], "creator"]], [30, 6, ["", 0, [27, 7, [[26, 1, [[62, 2, [0, "li", {"class": "tag-type-character"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, 1, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 1, "", [84, 1, [26, 1, []]]]], "character"]], [30, 6, ["we got sent back to main gallery page -- page links exist", 8, [27, 7, [[26, 1, [[62, 2, [0, "div", {"id": "paginator"}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]], [62, 2, [0, "a", {}, null, null, false, [51, 1, [3, "", null, null, "example string"]]]]]], 2, "class", [84, 1, [26, 1, []]]]], [true, [51, 1, [3, "", null, null, "pagination"]]]]]]]''' )
self._Execute( 'REPLACE INTO json_dumps_named VALUES ( ?, ?, ?, ?, ? );', script_info )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update gelbooru file lookup script failed! Please let hydrus dev know!'
self.pub_initial_message( message )
#
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'current_files', ) ).fetchone()
if result is not None:
try:
service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
for ( i, service_id ) in enumerate( service_ids ):
self._controller.frame_splash_status.SetSubtext( 'reorganising file storage {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, len( service_ids ) ) ) )
self.modules_files_storage.GenerateFilesTables( service_id )
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = ClientDBFilesStorage.GenerateFilesTableNames( service_id )
self._Execute( 'INSERT INTO {} ( hash_id, timestamp ) SELECT hash_id, timestamp FROM current_files WHERE service_id = ?;'.format( current_files_table_name ), ( service_id, ) )
self._Execute( 'INSERT INTO {} ( hash_id, timestamp, original_timestamp ) SELECT hash_id, timestamp, original_timestamp FROM deleted_files WHERE service_id = ?;'.format( deleted_files_table_name ), ( service_id, ) )
self._Execute( 'INSERT INTO {} ( hash_id ) SELECT hash_id FROM file_transfers WHERE service_id = ?;'.format( pending_files_table_name ), ( service_id, ) )
self._Execute( 'INSERT INTO {} ( hash_id, reason_id ) SELECT hash_id, reason_id FROM file_petitions WHERE service_id = ?;'.format( petitioned_files_table_name ), ( service_id, ) )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._Execute( 'DROP TABLE current_files;' )
self._Execute( 'DROP TABLE deleted_files;' )
self._Execute( 'DROP TABLE file_transfers;' )
self._Execute( 'DROP TABLE file_petitions;' )
except Exception as e:
HydrusData.PrintException( e )
raise Exception( 'Unfortunately, hydrus was unable to update your file storage to the new system! The error has been written to your log, please roll back to v446 and let hydev know!' )
#
self.modules_hashes_local_cache.Repopulate()
if version == 447:
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling PSD files for thumbnail regen' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.APPLICATION_PSD, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule PSD files for thumbnail generation failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 448:
self._controller.frame_splash_status.SetSubtext( 'updating repository update storage' )
for service_id in self.modules_services.GetServiceIds( HC.REPOSITORIES ):
service_type = self.modules_services.GetService( service_id ).GetServiceType()
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = ClientDBRepositories.GenerateRepositoryUpdatesTableNames( service_id )
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( repository_unregistered_updates_table_name, ) ).fetchone()
if result is not None:
continue
all_data = self._Execute( 'SELECT update_index, hash_id, processed FROM {};'.format( repository_updates_table_name ) ).fetchall()
self._Execute( 'DROP TABLE {};'.format( repository_updates_table_name ) )
#
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( update_index INTEGER, hash_id INTEGER, PRIMARY KEY ( update_index, hash_id ) );'.format( repository_updates_table_name ) )
self._CreateIndex( repository_updates_table_name, [ 'hash_id' ] )
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );'.format( repository_unregistered_updates_table_name ) )
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, content_type INTEGER, processed INTEGER_BOOLEAN, PRIMARY KEY ( hash_id, content_type ) );'.format( repository_updates_processed_table_name ) )
self._CreateIndex( repository_updates_processed_table_name, [ 'content_type' ] )
#
for ( update_index, hash_id, processed ) in all_data:
self._Execute( 'INSERT OR IGNORE INTO {} ( update_index, hash_id ) VALUES ( ?, ? );'.format( repository_updates_table_name ), ( update_index, hash_id ) )
try:
mime = self.modules_files_metadata_basic.GetMime( hash_id )
except HydrusExceptions.DataMissing:
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( repository_unregistered_updates_table_name ), ( hash_id, ) )
continue
if mime == HC.APPLICATION_HYDRUS_UPDATE_DEFINITIONS:
content_types = ( HC.CONTENT_TYPE_DEFINITIONS, )
else:
if service_type == HC.FILE_REPOSITORY:
content_types = ( HC.CONTENT_TYPE_FILES, )
else:
content_types = ( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_TYPE_TAG_SIBLINGS )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id, content_type, processed ) VALUES ( ?, ?, ? );'.format( repository_updates_processed_table_name ), ( ( hash_id, content_type, processed ) for content_type in content_types ) )
self.modules_repositories.DoOutstandingUpdateRegistration()
self._controller.frame_splash_status.SetSubtext( 'resetting siblings and parents' )
for service in self.modules_services.GetServices( ( HC.TAG_REPOSITORY, ) ):
service_key = service.GetServiceKey()
self._ResetRepositoryProcessing( service_key, ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_TYPE_TAG_SIBLINGS ) )
if version == 450:
result = self._c.execute( 'SELECT 1 FROM external_caches.sqlite_master WHERE name = ?;', ( 'shape_perceptual_hashes', ) ).fetchone()
if result is not None:
self._controller.frame_splash_status.SetSubtext( 'moving some similar file data around' )
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.shape_perceptual_hashes ( phash_id INTEGER PRIMARY KEY, phash BLOB_BYTES UNIQUE );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.shape_perceptual_hash_map ( phash_id INTEGER, hash_id INTEGER, PRIMARY KEY ( phash_id, hash_id ) );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS shape_search_cache ( hash_id INTEGER PRIMARY KEY, searched_distance INTEGER );' )
self._Execute( 'INSERT OR IGNORE INTO external_master.shape_perceptual_hashes SELECT phash_id, phash FROM external_caches.shape_perceptual_hashes;' )
self._Execute( 'INSERT OR IGNORE INTO external_master.shape_perceptual_hash_map SELECT phash_id, hash_id FROM external_caches.shape_perceptual_hash_map;' )
self._Execute( 'INSERT OR IGNORE INTO main.shape_search_cache SELECT hash_id, searched_distance FROM external_caches.shape_search_cache;' )
self._Execute( 'DROP TABLE external_caches.shape_perceptual_hashes;' )
self._Execute( 'DROP TABLE external_caches.shape_perceptual_hash_map;' )
self._Execute( 'DROP TABLE external_caches.shape_search_cache;' )
self._CreateIndex( 'external_master.shape_perceptual_hash_map', [ 'hash_id' ] )
self.modules_db_maintenance.TouchAnalyzeNewTables()
if version == 451:
self.modules_services.combined_file_service_id = self.modules_services.GetServiceId( CC.COMBINED_FILE_SERVICE_KEY )
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
if file_service_id == self.modules_services.combined_file_service_id:
self._controller.frame_splash_status.SetText( 'working on combined tags cache - {}'.format( tag_service_id ) )
else:
self._controller.frame_splash_status.SetText( 'working on specific tags cache - {} {}'.format( file_service_id, tag_service_id ) )
tags_table_name = self.modules_tag_search.GetTagsTableName( file_service_id, tag_service_id )
integer_subtags_table_name = self.modules_tag_search.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
query = 'SELECT subtag_id FROM {};'.format( tags_table_name )
BLOCK_SIZE = 10000
for ( group_of_subtag_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, query, BLOCK_SIZE ):
message = HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
self._controller.frame_splash_status.SetSubtext( message )
with self._MakeTemporaryIntegerTable( group_of_subtag_ids, 'subtag_id' ) as temp_subtag_ids_table_name:
# temp subtag_ids to subtags
subtag_ids_and_subtags = self._Execute( 'SELECT subtag_id, subtag FROM {} CROSS JOIN subtags USING ( subtag_id );'.format( temp_subtag_ids_table_name ) ).fetchall()
for ( subtag_id, subtag ) in subtag_ids_and_subtags:
if subtag.isdecimal():
try:
integer_subtag = int( subtag )
if ClientDBTagSearch.CanCacheInteger( integer_subtag ):
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id, integer_subtag ) VALUES ( ?, ? );'.format( integer_subtags_table_name ), ( subtag_id, integer_subtag ) )
except ValueError:
pass
if version == 452:
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
suffix = '{}_{}'.format( file_service_id, tag_service_id )
cache_files_table_name = 'external_caches.specific_files_cache_{}'.format( suffix )
result = self._Execute( 'SELECT 1 FROM external_caches.sqlite_master WHERE name = ?;', ( cache_files_table_name.split( '.', 1 )[1], ) ).fetchone()
if result is None:
continue
self._controller.frame_splash_status.SetText( 'filling holes in specific tags cache - {} {}'.format( file_service_id, tag_service_id ) )
# it turns out cache_files_table_name was not being populated on service creation/reset, so files imported before a tag service was created were not being stored in specific mapping cache data!
# furthermore, there was confusion whether cache_files_table_name was for mappings (files that have tags) on the tag service or just files on the file service.
# since we now store current files for each file service on a separate table, and the clever mappings intepretation seems expensive and not actually so useful, we are moving to our nice table instead in various joins/filters/etc...
current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( file_service_id, HC.CONTENT_STATUS_CURRENT )
query = 'SELECT hash_id FROM {} EXCEPT SELECT hash_id FROM {};'.format( current_files_table_name, cache_files_table_name )
BLOCK_SIZE = 10000
for ( group_of_hash_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, query, BLOCK_SIZE ):
message = HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
self._controller.frame_splash_status.SetSubtext( message )
with self._MakeTemporaryIntegerTable( group_of_hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, group_of_hash_ids, temp_hash_ids_table_name )
self.modules_mappings_cache_specific_display.AddFiles( file_service_id, tag_service_id, group_of_hash_ids, temp_hash_ids_table_name )
self._Execute( 'DROP TABLE {};'.format( cache_files_table_name ) )
if version == 459:
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling clip and apng files for regen' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.APPLICATION_CLIP, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.IMAGE_APNG, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule clip and apng files for maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 460:
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling clip files for regen' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.APPLICATION_CLIP, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE duration > ? AND size < ? AND width >= ? AND height >= ?;'.format( table_join ), ( 3600 * 1000, 64 * 1048576, 480, 360 ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule clip files for maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 461:
try:
num_rating_services = len( self.modules_services.GetServiceIds( HC.RATINGS_SERVICES ) )
if num_rating_services == 0:
def ask_what_to_do_ratings_service():
message = 'New clients now start with a simple like/dislike rating service. You are not new, but you have no rating services--would you like to get this default now and try ratings out?'
from hydrus.client.gui import ClientGUIDialogsQuick
result = ClientGUIDialogsQuick.GetYesNo( None, message, title = 'Get rating service?' )
return result == QW.QDialog.Accepted
add_favourites = self._controller.CallBlockingToQt( None, ask_what_to_do_ratings_service )
if add_favourites:
( service_key, service_type, name ) = ( CC.DEFAULT_FAVOURITES_RATING_SERVICE_KEY, HC.LOCAL_RATING_LIKE, 'favourites' )
dictionary = ClientServices.GenerateDefaultServiceDictionary( service_type )
from hydrus.client.metadata import ClientRatings
dictionary[ 'shape' ] = ClientRatings.STAR
like_colours = {}
like_colours[ ClientRatings.LIKE ] = ( ( 0, 0, 0 ), ( 240, 240, 65 ) )
like_colours[ ClientRatings.DISLIKE ] = ( ( 0, 0, 0 ), ( 200, 80, 120 ) )
like_colours[ ClientRatings.NULL ] = ( ( 0, 0, 0 ), ( 191, 191, 191 ) )
like_colours[ ClientRatings.MIXED ] = ( ( 0, 0, 0 ), ( 95, 95, 95 ) )
dictionary[ 'colours' ] = list( like_colours.items() )
self._AddService( service_key, service_type, name, dictionary )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to add a default favourites service failed. Please let hydrus dev know!'
self.pub_initial_message( message )
#
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'pixiv artist gallery page api parser new urls' ) )
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some downloader objects failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 462:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultGUGs( ( 'deviant art tag search', ) )
domain_manager.OverwriteDefaultParsers( ( 'deviant gallery page api parser (new cursor)', ) )
domain_manager.OverwriteDefaultURLClasses( ( 'deviant art tag gallery page api (cursor navigation)', ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling ogg files for regen' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.AUDIO_OGG, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule ogg files for maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 463:
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'deferred_physical_file_deletes', ) ).fetchone()
if result is None:
self._Execute( 'CREATE TABLE IF NOT EXISTS deferred_physical_file_deletes ( hash_id INTEGER PRIMARY KEY );' )
self._Execute( 'CREATE TABLE IF NOT EXISTS deferred_physical_thumbnail_deletes ( hash_id INTEGER PRIMARY KEY );' )
if version == 464:
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'gelbooru 0.2.x gallery page parser', ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
#
result = self.modules_services.GetServiceIds( ( HC.COMBINED_DELETED_FILE, ) )
if len( result ) == 0:
self._controller.frame_splash_status.SetText( 'creating new tag search data' )
dictionary = ClientServices.GenerateDefaultServiceDictionary( HC.COMBINED_DELETED_FILE )
self._AddService( CC.COMBINED_DELETED_FILE_SERVICE_KEY, HC.COMBINED_DELETED_FILE, 'all deleted files', dictionary )
#
# populate combined deleted files current files table
self.modules_files_storage.DropFilesTables( self.modules_services.combined_deleted_file_service_id )
self.modules_files_storage.GenerateFilesTables( self.modules_services.combined_deleted_file_service_id )
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
# this should make them empty, since no files yet
self.modules_tag_search.Drop( self.modules_services.combined_deleted_file_service_id, tag_service_id )
self.modules_tag_search.Generate( self.modules_services.combined_deleted_file_service_id, tag_service_id )
self._CacheSpecificMappingsDrop( self.modules_services.combined_deleted_file_service_id, tag_service_id )
self._CacheSpecificMappingsGenerate( self.modules_services.combined_deleted_file_service_id, tag_service_id )
combined_deleted_files_current_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( self.modules_services.combined_deleted_file_service_id, HC.CONTENT_STATUS_CURRENT )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_COVERED_BY_COMBINED_DELETED_FILE )
for ( i, file_service_id ) in enumerate( file_service_ids ):
deleted_files_table_name = ClientDBFilesStorage.GenerateFilesTableName( file_service_id, HC.CONTENT_STATUS_DELETED )
for ( chunk_of_hash_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, 'SELECT hash_id FROM {};'.format( deleted_files_table_name ), 1024 ):
message = 'deleted files cache: service {}, done {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, len( file_service_ids ) ), HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do ) )
self._controller.frame_splash_status.SetSubtext( message )
for hash_id in chunk_of_hash_ids:
row = self._Execute( 'SELECT hash_id, timestamp FROM {} WHERE hash_id = ?;'.format( deleted_files_table_name ), ( hash_id, ) ).fetchone()
existing_row = self._Execute( 'SELECT hash_id, timestamp FROM {} WHERE hash_id = ?;'.format( combined_deleted_files_current_files_table_name ), ( hash_id, ) ).fetchone()
if existing_row is None:
rows = [ row ]
# this should now populate the tag caches and search cache
self._AddFiles( self.modules_services.combined_deleted_file_service_id, rows )
else:
# it doesn't really matter, but let's try to have the earliest timestamp here to start with, since that'll be roughly 'natural' going forwards
if row[1] is not None and ( existing_row[1] is None or row[1] < existing_row[1] ):
self._Execute( 'UPDATE {} SET timestamp = ? WHERE hash_id = ?;'.format( combined_deleted_files_current_files_table_name ), ( row[1], hash_id ) )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
#
# ipfs is also getting specific caches and tag search too, so we'll do that here
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
file_service_ids = self.modules_services.GetServiceIds( ( HC.IPFS, ) )
for file_service_id in file_service_ids:
hash_ids = self.modules_files_storage.GetCurrentHashIdsList( file_service_id )
for tag_service_id in tag_service_ids:
time.sleep( 0.01 )
self.modules_tag_search.Drop( file_service_id, tag_service_id )
self.modules_tag_search.Generate( file_service_id, tag_service_id )
self._CacheSpecificMappingsDrop( file_service_id, tag_service_id )
self._CacheSpecificMappingsCreateTables( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.Generate( file_service_id, tag_service_id, populate_from_storage = False )
BLOCK_SIZE = 1000
for ( i, block_of_hash_ids ) in enumerate( HydrusData.SplitListIntoChunks( hash_ids, BLOCK_SIZE ) ):
with self._MakeTemporaryIntegerTable( block_of_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
message = 'ipfs: {}_{} - {}'.format( file_service_id, tag_service_id, HydrusData.ConvertValueRangeToPrettyString( i * BLOCK_SIZE, len( hash_ids ) ) )
self._controller.frame_splash_status.SetSubtext( message )
self._CacheSpecificMappingsAddFiles( file_service_id, tag_service_id, block_of_hash_ids, temp_hash_id_table_name )
self.modules_mappings_cache_specific_display.AddFiles( file_service_id, tag_service_id, block_of_hash_ids, temp_hash_id_table_name )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self._cursor_transaction_wrapper.CommitAndBegin()
#
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'has_icc_profile', ) ).fetchone()
if result is None:
try:
self._Execute( 'CREATE TABLE IF NOT EXISTS has_icc_profile ( hash_id INTEGER PRIMARY KEY );' )
self._controller.frame_splash_status.SetSubtext( 'scheduling files for icc profile scan' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime IN {};'.format( table_join, HydrusData.SplayListForDB( HC.FILES_THAT_CAN_HAVE_ICC_PROFILE ) ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_HAS_ICC_PROFILE )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule image files for icc maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
#
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'pixel_hash_map', ) ).fetchone()
if result is None:
try:
self._Execute( 'CREATE TABLE IF NOT EXISTS pixel_hash_map ( hash_id INTEGER, pixel_hash_id INTEGER, PRIMARY KEY ( hash_id, pixel_hash_id ) );' )
self._CreateIndex( 'pixel_hash_map', [ 'pixel_hash_id' ] )
self._controller.frame_splash_status.SetSubtext( 'scheduling files for pixel hash generation' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime IN {};'.format( table_join, HydrusData.SplayListForDB( HC.FILES_THAT_CAN_HAVE_PIXEL_HASH ) ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_PIXEL_HASH )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule image files for pixel hash maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 467:
try:
self._controller.frame_splash_status.SetSubtext( 'fixing a pixel duplicates storage problem' )
bad_ids = self._STS( self._Execute( 'SELECT hash_id FROM pixel_hash_map WHERE hash_id = pixel_hash_id;' ) )
self.modules_files_maintenance_queue.AddJobs( bad_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_PIXEL_HASH )
self._Execute( 'DELETE FROM pixel_hash_map WHERE hash_id = pixel_hash_id;' )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule image files for pixel hash maintenance failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 469:
try:
self._controller.frame_splash_status.SetSubtext( 'scheduling video for better silent audio track check' )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime IN {} AND has_audio = ?;'.format( table_join, HydrusData.SplayListForDB( HC.VIDEO ) ), ( True, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to schedule audible video files for audio track recheck failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 470:
( result, ) = self._Execute( 'SELECT sql FROM sqlite_master WHERE name = ?;', ( 'file_viewing_stats', ) ).fetchone()
if 'preview_views' in result:
self._controller.frame_splash_status.SetSubtext( 'reworking file viewing stats' )
self._Execute( 'ALTER TABLE file_viewing_stats RENAME TO file_viewing_stats_old;' )
self._Execute( 'CREATE TABLE IF NOT EXISTS file_viewing_stats ( hash_id INTEGER, canvas_type INTEGER, last_viewed_timestamp INTEGER, views INTEGER, viewtime INTEGER, PRIMARY KEY ( hash_id, canvas_type ) );' )
self._CreateIndex( 'file_viewing_stats', [ 'last_viewed_timestamp' ] )
self._CreateIndex( 'file_viewing_stats', [ 'views' ] )
self._CreateIndex( 'file_viewing_stats', [ 'viewtime' ] )
self._Execute( 'INSERT INTO file_viewing_stats SELECT hash_id, ?, ?, preview_views, preview_viewtime FROM file_viewing_stats_old;', ( CC.CANVAS_PREVIEW, None ) )
self._Execute( 'INSERT INTO file_viewing_stats SELECT hash_id, ?, ?, media_views, media_viewtime FROM file_viewing_stats_old;', ( CC.CANVAS_MEDIA_VIEWER, None ) )
self.modules_db_maintenance.AnalyzeTable( 'file_viewing_stats' )
self._Execute( 'DROP TABLE file_viewing_stats_old;' )
if version == 472:
try:
from hydrus.client.gui import ClientGUIShortcuts
main_gui = self.modules_serialisable.GetJSONDumpNamed( HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUT_SET, dump_name = 'main_gui' )
palette_shortcut = ClientGUIShortcuts.Shortcut( ClientGUIShortcuts.SHORTCUT_TYPE_KEYBOARD_CHARACTER, ord( 'P' ), ClientGUIShortcuts.SHORTCUT_PRESS_TYPE_PRESS, [ ClientGUIShortcuts.SHORTCUT_MODIFIER_CTRL ] )
palette_command = CAC.ApplicationCommand.STATICCreateSimpleCommand( CAC.SIMPLE_OPEN_COMMAND_PALETTE )
result = main_gui.GetCommand( palette_shortcut )
if result is None:
main_gui.SetCommand( palette_shortcut, palette_command )
self.modules_serialisable.SetJSONDump( main_gui )
except Exception as e:
HydrusData.PrintException( e )
message = 'The new palette shortcut failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
if version == 473:
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'archive_timestamps', ) ).fetchone()
if result is None:
self._Execute( 'CREATE TABLE IF NOT EXISTS archive_timestamps ( hash_id INTEGER PRIMARY KEY, archived_timestamp INTEGER );' )
self._CreateIndex( 'archive_timestamps', [ 'archived_timestamp' ] )
try:
location_context = ClientLocation.LocationContext( current_service_keys = ( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, ) )
db_location_context = self.modules_files_storage.GetDBLocationContext( location_context )
operator = '>'
num_relationships = 0
dupe_type = HC.DUPLICATE_POTENTIAL
dupe_hash_ids = self.modules_files_duplicates.DuplicatesGetHashIdsFromDuplicateCountPredicate( db_location_context, operator, num_relationships, dupe_type )
with self._MakeTemporaryIntegerTable( dupe_hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN files_info USING ( hash_id ) WHERE mime IN {};'.format( temp_hash_ids_table_name, HydrusData.SplayListForDB( ( HC.IMAGE_GIF, HC.IMAGE_PNG, HC.IMAGE_TIFF ) ) ), ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_PIXEL_HASH )
except Exception as e:
HydrusData.PrintException( e )
message = 'Some pixel hash regen scheduling failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
if version == 474:
try:
# ok we have improved apng detection now, so let's efficiently guess which of our pngs could be apngs for rescan
# IRL data of some 2-frame (i.e. minimal inaccuracy) apngs: 1.16MB @ 908x1,214 and 397KB @ 500x636, which for a single frame calculation is bitrates of 1.08 bits/pixel and 1.28 bits/pixel
# most apngs are going to be above this fake 1-frame bitrate
# as an aside, IRL data of some chunky pngs give about 2.5 bits/pixel, efficient screenshots and monochome tend to be around 0.2
# real apngs divided by number of frames tend to be around 0.05 to 0.2 to 1.0
# so, let's pull all the pngs with bitrate over 0.85 and schedule them for rescan
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ? AND size / ( width * height ) > ?;'.format( table_join ), ( HC.IMAGE_PNG, 0.85 ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Some apng regen scheduling failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
try:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.AUDIO_M4A, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Some mp4 regen scheduling failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'deviant art file extended_fetch parser', ) )
#
from hydrus.client.networking import ClientNetworkingContexts
sank_network_context = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, 'sankakucomplex.com' )
network_contexts_to_custom_header_dicts = domain_manager.GetNetworkContextsToCustomHeaderDicts()
if sank_network_context in network_contexts_to_custom_header_dicts:
custom_header_dict = network_contexts_to_custom_header_dicts[ sank_network_context ]
if 'User-Agent' in custom_header_dict:
( header, verified, reason ) = custom_header_dict[ 'User-Agent' ]
if header == 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0':
header = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
custom_header_dict[ 'User-Agent' ] = ( header, verified, reason )
domain_manager.SetNetworkContextsToCustomHeaderDicts( network_contexts_to_custom_header_dicts )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
if version == 475:
result = self._Execute( 'SELECT 1 FROM sqlite_master WHERE name = ?;', ( 'file_domain_modified_timestamps', ) ).fetchone()
if result is None:
self._Execute( 'CREATE TABLE IF NOT EXISTS file_domain_modified_timestamps ( hash_id INTEGER, domain_id INTEGER, file_modified_timestamp INTEGER, PRIMARY KEY ( hash_id, domain_id ) );' )
self._CreateIndex( 'file_domain_modified_timestamps', [ 'file_modified_timestamp' ] )
if version == 476:
try:
# fixed apng duration calculation
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.combined_local_file_service_id, 'files_info', HC.CONTENT_STATUS_CURRENT )
hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} WHERE mime = ?;'.format( table_join ), ( HC.IMAGE_APNG, ) ) )
self.modules_files_maintenance_queue.AddJobs( hash_ids, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
except Exception as e:
HydrusData.PrintException( e )
message = 'Some apng regen scheduling failed to set! This is not super important, but hydev would be interested in seeing the error that was printed to the log.'
self.pub_initial_message( message )
try:
domain_manager = self.modules_serialisable.GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_DOMAIN_MANAGER )
domain_manager.Initialise()
#
domain_manager.OverwriteDefaultParsers( ( 'nitter tweet parser', 'nitter tweet parser (video from koto.reisen)' ) )
#
domain_manager.TryToLinkURLClassesAndParsers()
#
self.modules_serialisable.SetJSONDump( domain_manager )
except Exception as e:
HydrusData.PrintException( e )
message = 'Trying to update some parsers failed! Please let hydrus dev know!'
self.pub_initial_message( message )
self._controller.frame_splash_status.SetTitleText( 'updated db to v{}'.format( HydrusData.ToHumanInt( version + 1 ) ) )
self._Execute( 'UPDATE version SET version = ?;', ( version + 1, ) )
def _UpdateMappings( self, tag_service_id, mappings_ids = None, deleted_mappings_ids = None, pending_mappings_ids = None, pending_rescinded_mappings_ids = None, petitioned_mappings_ids = None, petitioned_rescinded_mappings_ids = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
if mappings_ids is None: mappings_ids = []
if deleted_mappings_ids is None: deleted_mappings_ids = []
if pending_mappings_ids is None: pending_mappings_ids = []
if pending_rescinded_mappings_ids is None: pending_rescinded_mappings_ids = []
if petitioned_mappings_ids is None: petitioned_mappings_ids = []
if petitioned_rescinded_mappings_ids is None: petitioned_rescinded_mappings_ids = []
mappings_ids = self._FilterExistingUpdateMappings( tag_service_id, mappings_ids, HC.CONTENT_UPDATE_ADD )
deleted_mappings_ids = self._FilterExistingUpdateMappings( tag_service_id, deleted_mappings_ids, HC.CONTENT_UPDATE_DELETE )
pending_mappings_ids = self._FilterExistingUpdateMappings( tag_service_id, pending_mappings_ids, HC.CONTENT_UPDATE_PEND )
pending_rescinded_mappings_ids = self._FilterExistingUpdateMappings( tag_service_id, pending_rescinded_mappings_ids, HC.CONTENT_UPDATE_RESCIND_PEND )
tag_ids_to_filter_chained = { tag_id for ( tag_id, hash_ids ) in itertools.chain.from_iterable( ( mappings_ids, deleted_mappings_ids, pending_mappings_ids, pending_rescinded_mappings_ids ) ) }
chained_tag_ids = self.modules_tag_display.FilterChained( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids_to_filter_chained )
file_service_ids = self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES )
change_in_num_mappings = 0
change_in_num_deleted_mappings = 0
change_in_num_pending_mappings = 0
change_in_num_petitioned_mappings = 0
change_in_num_files = 0
hash_ids_lists = ( hash_ids for ( tag_id, hash_ids ) in itertools.chain.from_iterable( ( mappings_ids, pending_mappings_ids ) ) )
hash_ids_being_added = { hash_id for hash_id in itertools.chain.from_iterable( hash_ids_lists ) }
hash_ids_lists = ( hash_ids for ( tag_id, hash_ids ) in itertools.chain.from_iterable( ( deleted_mappings_ids, pending_rescinded_mappings_ids ) ) )
hash_ids_being_removed = { hash_id for hash_id in itertools.chain.from_iterable( hash_ids_lists ) }
hash_ids_being_altered = hash_ids_being_added.union( hash_ids_being_removed )
filtered_hashes_generator = self._CacheSpecificMappingsGetFilteredHashesGenerator( file_service_ids, tag_service_id, hash_ids_being_altered )
self._Execute( 'CREATE TABLE IF NOT EXISTS mem.temp_hash_ids ( hash_id INTEGER );' )
self._ExecuteMany( 'INSERT INTO temp_hash_ids ( hash_id ) VALUES ( ? );', ( ( hash_id, ) for hash_id in hash_ids_being_altered ) )
pre_existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM temp_hash_ids WHERE EXISTS ( SELECT 1 FROM {} WHERE hash_id = temp_hash_ids.hash_id );'.format( current_mappings_table_name ) ) )
num_files_added = len( hash_ids_being_added.difference( pre_existing_hash_ids ) )
change_in_num_files += num_files_added
# BIG NOTE:
# after testing some situations, it makes nicest logical sense to interleave all cache updates into the loops
# otherwise, when there are conflicts due to sheer duplication or the display system applying two tags at once with the same implications, we end up relying on an out-of-date/unsynced (in cache terms) specific cache for combined etc...
# I now extend this to counts, argh. this is not great in overhead terms, but many optimisations rely on a/c counts now, and the fallback is the combined storage ac count cache
if len( mappings_ids ) > 0:
for ( tag_id, hash_ids ) in mappings_ids:
if tag_id in chained_tag_ids:
self._CacheCombinedFilesDisplayMappingsAddMappingsForChained( tag_service_id, tag_id, hash_ids )
self._ExecuteMany( 'DELETE FROM ' + deleted_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_deleted_deleted = self._GetRowCount()
self._ExecuteMany( 'DELETE FROM ' + pending_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_pending_deleted = self._GetRowCount()
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + current_mappings_table_name + ' VALUES ( ?, ? );', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_current_inserted = self._GetRowCount()
change_in_num_deleted_mappings -= num_deleted_deleted
change_in_num_pending_mappings -= num_pending_deleted
change_in_num_mappings += num_current_inserted
self.modules_mappings_counts_update.UpdateCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, num_current_inserted, - num_pending_deleted ) ] )
if tag_id not in chained_tag_ids:
self.modules_mappings_counts_update.UpdateCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, num_current_inserted, - num_pending_deleted ) ] )
self._CacheSpecificMappingsAddMappings( tag_service_id, tag_id, hash_ids, filtered_hashes_generator )
if len( deleted_mappings_ids ) > 0:
for ( tag_id, hash_ids ) in deleted_mappings_ids:
if tag_id in chained_tag_ids:
self._CacheCombinedFilesDisplayMappingsDeleteMappingsForChained( tag_service_id, tag_id, hash_ids )
self._ExecuteMany( 'DELETE FROM ' + current_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_current_deleted = self._GetRowCount()
self._ExecuteMany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_petitions_deleted = self._GetRowCount()
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + deleted_mappings_table_name + ' VALUES ( ?, ? );', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_deleted_inserted = self._GetRowCount()
change_in_num_mappings -= num_current_deleted
change_in_num_petitioned_mappings -= num_petitions_deleted
change_in_num_deleted_mappings += num_deleted_inserted
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, num_current_deleted, 0 ) ] )
if tag_id not in chained_tag_ids:
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, num_current_deleted, 0 ) ] )
self._CacheSpecificMappingsDeleteMappings( tag_service_id, tag_id, hash_ids, filtered_hashes_generator )
if len( pending_mappings_ids ) > 0:
for ( tag_id, hash_ids ) in pending_mappings_ids:
if tag_id in chained_tag_ids:
self._CacheCombinedFilesDisplayMappingsPendMappingsForChained( tag_service_id, tag_id, hash_ids )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + pending_mappings_table_name + ' VALUES ( ?, ? );', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_pending_inserted = self._GetRowCount()
change_in_num_pending_mappings += num_pending_inserted
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, 0, num_pending_inserted ) ] )
if tag_id not in chained_tag_ids:
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, 0, num_pending_inserted ) ] )
self._CacheSpecificMappingsPendMappings( tag_service_id, tag_id, hash_ids, filtered_hashes_generator )
if len( pending_rescinded_mappings_ids ) > 0:
for ( tag_id, hash_ids ) in pending_rescinded_mappings_ids:
if tag_id in chained_tag_ids:
self._CacheCombinedFilesDisplayMappingsRescindPendingMappingsForChained( tag_service_id, tag_id, hash_ids )
self._ExecuteMany( 'DELETE FROM ' + pending_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_pending_deleted = self._GetRowCount()
change_in_num_pending_mappings -= num_pending_deleted
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, 0, num_pending_deleted ) ] )
if tag_id not in chained_tag_ids:
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_ACTUAL, self.modules_services.combined_file_service_id, tag_service_id, [ ( tag_id, 0, num_pending_deleted ) ] )
self._CacheSpecificMappingsRescindPendingMappings( tag_service_id, tag_id, hash_ids, filtered_hashes_generator )
#
post_existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM temp_hash_ids WHERE EXISTS ( SELECT 1 FROM {} WHERE hash_id = temp_hash_ids.hash_id );'.format( current_mappings_table_name ) ) )
self._Execute( 'DROP TABLE temp_hash_ids;' )
num_files_removed = len( pre_existing_hash_ids.intersection( hash_ids_being_removed ).difference( post_existing_hash_ids ) )
change_in_num_files -= num_files_removed
for ( tag_id, hash_ids, reason_id ) in petitioned_mappings_ids:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + petitioned_mappings_table_name + ' VALUES ( ?, ?, ? );', [ ( tag_id, hash_id, reason_id ) for hash_id in hash_ids ] )
num_petitions_inserted = self._GetRowCount()
change_in_num_petitioned_mappings += num_petitions_inserted
for ( tag_id, hash_ids ) in petitioned_rescinded_mappings_ids:
self._ExecuteMany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in hash_ids ) )
num_petitions_deleted = self._GetRowCount()
change_in_num_petitioned_mappings -= num_petitions_deleted
service_info_updates = []
if change_in_num_mappings != 0: service_info_updates.append( ( change_in_num_mappings, tag_service_id, HC.SERVICE_INFO_NUM_MAPPINGS ) )
if change_in_num_deleted_mappings != 0: service_info_updates.append( ( change_in_num_deleted_mappings, tag_service_id, HC.SERVICE_INFO_NUM_DELETED_MAPPINGS ) )
if change_in_num_pending_mappings != 0: service_info_updates.append( ( change_in_num_pending_mappings, tag_service_id, HC.SERVICE_INFO_NUM_PENDING_MAPPINGS ) )
if change_in_num_petitioned_mappings != 0: service_info_updates.append( ( change_in_num_petitioned_mappings, tag_service_id, HC.SERVICE_INFO_NUM_PETITIONED_MAPPINGS ) )
if change_in_num_files != 0: service_info_updates.append( ( change_in_num_files, tag_service_id, HC.SERVICE_INFO_NUM_FILES ) )
if len( service_info_updates ) > 0: self._ExecuteMany( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', service_info_updates )
def _UpdateServerServices( self, admin_service_key, serverside_services, service_keys_to_access_keys, deletee_service_keys ):
admin_service_id = self.modules_services.GetServiceId( admin_service_key )
admin_service = self.modules_services.GetService( admin_service_id )
admin_credentials = admin_service.GetCredentials()
( host, admin_port ) = admin_credentials.GetAddress()
#
current_service_keys = self.modules_services.GetServiceKeys()
for serverside_service in serverside_services:
service_key = serverside_service.GetServiceKey()
if service_key in current_service_keys:
service_id = self.modules_services.GetServiceId( service_key )
service = self.modules_services.GetService( service_id )
credentials = service.GetCredentials()
upnp_port = serverside_service.GetUPnPPort()
if upnp_port is None:
port = serverside_service.GetPort()
credentials.SetAddress( host, port )
else:
credentials.SetAddress( host, upnp_port )
service.SetCredentials( credentials )
self.modules_services.UpdateService( service )
else:
if service_key in service_keys_to_access_keys:
service_type = serverside_service.GetServiceType()
name = serverside_service.GetName()
service = ClientServices.GenerateService( service_key, service_type, name )
access_key = service_keys_to_access_keys[ service_key ]
credentials = service.GetCredentials()
upnp_port = serverside_service.GetUPnPPort()
if upnp_port is None:
port = serverside_service.GetPort()
credentials.SetAddress( host, port )
else:
credentials.SetAddress( host, upnp_port )
credentials.SetAccessKey( access_key )
service.SetCredentials( credentials )
( service_key, service_type, name, dictionary ) = service.ToTuple()
self._AddService( service_key, service_type, name, dictionary )
for service_key in deletee_service_keys:
try:
self.modules_services.GetServiceId( service_key )
except HydrusExceptions.DataMissing:
continue
self._DeleteService( service_id )
self._cursor_transaction_wrapper.pub_after_job( 'notify_account_sync_due' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_data' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_gui' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
def _UpdateServices( self, services ):
current_service_keys = self.modules_services.GetServiceKeys()
future_service_keys = { service.GetServiceKey() for service in services }
for service_key in current_service_keys:
if service_key not in future_service_keys:
service_id = self.modules_services.GetServiceId( service_key )
self._DeleteService( service_id )
for service in services:
service_key = service.GetServiceKey()
if service_key in current_service_keys:
self.modules_services.UpdateService( service )
else:
( service_key, service_type, name, dictionary ) = service.ToTuple()
self._AddService( service_key, service_type, name, dictionary )
self._cursor_transaction_wrapper.pub_after_job( 'notify_account_sync_due' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_data' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_services_gui' )
self._cursor_transaction_wrapper.pub_after_job( 'notify_new_pending' )
def _Vacuum( self, names: typing.Collection[ str ], maintenance_mode = HC.MAINTENANCE_FORCED, stop_time = None, force_vacuum = False ):
ok_names = []
for name in names:
db_path = os.path.join( self._db_dir, self._db_filenames[ name ] )
try:
HydrusDB.CheckCanVacuumCursor( db_path, self._c )
except Exception as e:
if not self._have_printed_a_cannot_vacuum_message:
HydrusData.Print( 'Cannot vacuum "{}": {}'.format( db_path, e ) )
self._have_printed_a_cannot_vacuum_message = True
continue
if self._controller.ShouldStopThisWork( maintenance_mode, stop_time = stop_time ):
return
ok_names.append( name )
if len( ok_names ) == 0:
HydrusData.ShowText( 'A call to vacuum was made, but none of those databases could be vacuumed! Maybe drive free space is tight and/or recently changed?' )
return
job_key_pubbed = False
job_key = ClientThreading.JobKey()
job_key.SetStatusTitle( 'database maintenance - vacuum' )
self._CloseDBConnection()
try:
for name in ok_names:
time.sleep( 1 )
try:
db_path = os.path.join( self._db_dir, self._db_filenames[ name ] )
if not job_key_pubbed:
self._controller.pub( 'modal_message', job_key )
job_key_pubbed = True
self._controller.frame_splash_status.SetText( 'vacuuming ' + name )
job_key.SetVariable( 'popup_text_1', 'vacuuming ' + name )
started = HydrusData.GetNowPrecise()
HydrusDB.VacuumDB( db_path )
time_took = HydrusData.GetNowPrecise() - started
HydrusData.Print( 'Vacuumed ' + db_path + ' in ' + HydrusData.TimeDeltaToPrettyTimeDelta( time_took ) )
except Exception as e:
HydrusData.Print( 'vacuum failed:' )
HydrusData.ShowException( e )
text = 'An attempt to vacuum the database failed.'
text += os.linesep * 2
text += 'If the error is not obvious, please contact the hydrus developer.'
HydrusData.ShowText( text )
self._InitDBConnection()
return
job_key.SetVariable( 'popup_text_1', 'cleaning up' )
finally:
self._InitDBConnection()
self.modules_db_maintenance.RegisterSuccessfulVacuum( name )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete( 10 )
def _Write( self, action, *args, **kwargs ):
result = None
if action == 'analyze': self.modules_db_maintenance.AnalyzeDueTables( *args, **kwargs )
elif action == 'associate_repository_update_hashes': self.modules_repositories.AssociateRepositoryUpdateHashes( *args, **kwargs )
elif action == 'backup': self._Backup( *args, **kwargs )
elif action == 'clear_deferred_physical_delete': self.modules_files_storage.ClearDeferredPhysicalDelete( *args, **kwargs )
elif action == 'clear_false_positive_relations': self.modules_files_duplicates.DuplicatesClearAllFalsePositiveRelationsFromHashes( *args, **kwargs )
elif action == 'clear_false_positive_relations_between_groups': self.modules_files_duplicates.DuplicatesClearFalsePositiveRelationsBetweenGroupsFromHashes( *args, **kwargs )
elif action == 'clear_orphan_file_records': self._ClearOrphanFileRecords( *args, **kwargs )
elif action == 'clear_orphan_tables': self._ClearOrphanTables( *args, **kwargs )
elif action == 'content_updates': self._ProcessContentUpdates( *args, **kwargs )
elif action == 'cull_file_viewing_statistics': self._CullFileViewingStatistics( *args, **kwargs )
elif action == 'db_integrity': self._CheckDBIntegrity( *args, **kwargs )
elif action == 'delete_imageboard': self.modules_serialisable.DeleteYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_IMAGEBOARD, *args, **kwargs )
elif action == 'delete_local_booru_share': self.modules_serialisable.DeleteYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU, *args, **kwargs )
elif action == 'delete_pending': self._DeletePending( *args, **kwargs )
elif action == 'delete_serialisable_named': self.modules_serialisable.DeleteJSONDumpNamed( *args, **kwargs )
elif action == 'delete_service_info': self._DeleteServiceInfo( *args, **kwargs )
elif action == 'delete_potential_duplicate_pairs': self.modules_files_duplicates.DuplicatesDeleteAllPotentialDuplicatePairs( *args, **kwargs )
elif action == 'dirty_services': self._SaveDirtyServices( *args, **kwargs )
elif action == 'dissolve_alternates_group': self.modules_files_duplicates.DuplicatesDissolveAlternatesGroupIdFromHashes( *args, **kwargs )
elif action == 'dissolve_duplicates_group': self.modules_files_duplicates.DuplicatesDissolveMediaIdFromHashes( *args, **kwargs )
elif action == 'duplicate_pair_status': self._DuplicatesSetDuplicatePairStatus( *args, **kwargs )
elif action == 'duplicate_set_king': self.modules_files_duplicates.DuplicatesSetKingFromHash( *args, **kwargs )
elif action == 'file_maintenance_add_jobs': self.modules_files_maintenance_queue.AddJobs( *args, **kwargs )
elif action == 'file_maintenance_add_jobs_hashes': self.modules_files_maintenance_queue.AddJobsHashes( *args, **kwargs )
elif action == 'file_maintenance_cancel_jobs': self.modules_files_maintenance_queue.CancelJobs( *args, **kwargs )
elif action == 'file_maintenance_clear_jobs': self.modules_files_maintenance.ClearJobs( *args, **kwargs )
elif action == 'fix_logically_inconsistent_mappings': self._FixLogicallyInconsistentMappings( *args, **kwargs )
elif action == 'imageboard': self.modules_serialisable.SetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_IMAGEBOARD, *args, **kwargs )
elif action == 'ideal_client_files_locations': self._SetIdealClientFilesLocations( *args, **kwargs )
elif action == 'import_file': result = self._ImportFile( *args, **kwargs )
elif action == 'import_update': self._ImportUpdate( *args, **kwargs )
elif action == 'local_booru_share': self.modules_serialisable.SetYAMLDump( ClientDBSerialisable.YAML_DUMP_ID_LOCAL_BOORU, *args, **kwargs )
elif action == 'maintain_hashed_serialisables': result = self.modules_serialisable.MaintainHashedStorage( *args, **kwargs )
elif action == 'maintain_similar_files_search_for_potential_duplicates': result = self._PerceptualHashesSearchForPotentialDuplicates( *args, **kwargs )
elif action == 'maintain_similar_files_tree': self.modules_similar_files.MaintainTree( *args, **kwargs )
elif action == 'migration_clear_job': self._MigrationClearJob( *args, **kwargs )
elif action == 'migration_start_mappings_job': self._MigrationStartMappingsJob( *args, **kwargs )
elif action == 'migration_start_pairs_job': self._MigrationStartPairsJob( *args, **kwargs )
elif action == 'process_repository_content': result = self._ProcessRepositoryContent( *args, **kwargs )
elif action == 'process_repository_definitions': result = self.modules_repositories.ProcessRepositoryDefinitions( *args, **kwargs )
elif action == 'push_recent_tags': self._PushRecentTags( *args, **kwargs )
elif action == 'regenerate_local_hash_cache': self._RegenerateLocalHashCache( *args, **kwargs )
elif action == 'regenerate_local_tag_cache': self._RegenerateLocalTagCache( *args, **kwargs )
elif action == 'regenerate_similar_files': self.modules_similar_files.RegenerateTree( *args, **kwargs )
elif action == 'regenerate_searchable_subtag_maps': self._RegenerateTagCacheSearchableSubtagMaps( *args, **kwargs )
elif action == 'regenerate_tag_cache': self._RegenerateTagCache( *args, **kwargs )
elif action == 'regenerate_tag_display_mappings_cache': self._RegenerateTagDisplayMappingsCache( *args, **kwargs )
elif action == 'regenerate_tag_display_pending_mappings_cache': self._RegenerateTagDisplayPendingMappingsCache( *args, **kwargs )
elif action == 'regenerate_tag_mappings_cache': self._RegenerateTagMappingsCache( *args, **kwargs )
elif action == 'regenerate_tag_parents_cache': self._RegenerateTagParentsCache( *args, **kwargs )
elif action == 'regenerate_tag_pending_mappings_cache': self._RegenerateTagPendingMappingsCache( *args, **kwargs )
elif action == 'regenerate_tag_siblings_and_parents_cache': self.modules_tag_display.RegenerateTagSiblingsAndParentsCache( *args, **kwargs )
elif action == 'register_shutdown_work': self.modules_db_maintenance.RegisterShutdownWork( *args, **kwargs )
elif action == 'repopulate_mappings_from_cache': self._RepopulateMappingsFromCache( *args, **kwargs )
elif action == 'repopulate_tag_cache_missing_subtags': self._RepopulateTagCacheMissingSubtags( *args, **kwargs )
elif action == 'repopulate_tag_display_mappings_cache': self._RepopulateTagDisplayMappingsCache( *args, **kwargs )
elif action == 'relocate_client_files': self._RelocateClientFiles( *args, **kwargs )
elif action == 'remove_alternates_member': self.modules_files_duplicates.DuplicatesRemoveAlternateMemberFromHashes( *args, **kwargs )
elif action == 'remove_duplicates_member': self.modules_files_duplicates.DuplicatesRemoveMediaIdMemberFromHashes( *args, **kwargs )
elif action == 'remove_potential_pairs': self.modules_files_duplicates.DuplicatesRemovePotentialPairsFromHashes( *args, **kwargs )
elif action == 'repair_client_files': self._RepairClientFiles( *args, **kwargs )
elif action == 'repair_invalid_tags': self._RepairInvalidTags( *args, **kwargs )
elif action == 'reprocess_repository': self.modules_repositories.ReprocessRepository( *args, **kwargs )
elif action == 'reset_repository': self._ResetRepository( *args, **kwargs )
elif action == 'reset_repository_processing': self._ResetRepositoryProcessing( *args, **kwargs )
elif action == 'reset_potential_search_status': self._PerceptualHashesResetSearchFromHashes( *args, **kwargs )
elif action == 'save_options': self._SaveOptions( *args, **kwargs )
elif action == 'serialisable': self.modules_serialisable.SetJSONDump( *args, **kwargs )
elif action == 'serialisable_atomic': self.modules_serialisable.SetJSONComplex( *args, **kwargs )
elif action == 'serialisable_simple': self.modules_serialisable.SetJSONSimple( *args, **kwargs )
elif action == 'serialisables_overwrite': self.modules_serialisable.OverwriteJSONDumps( *args, **kwargs )
elif action == 'set_password': self._SetPassword( *args, **kwargs )
elif action == 'set_repository_update_hashes': self.modules_repositories.SetRepositoryUpdateHashes( *args, **kwargs )
elif action == 'schedule_repository_update_file_maintenance': self.modules_repositories.ScheduleRepositoryUpdateFileMaintenance( *args, **kwargs )
elif action == 'sync_tag_display_maintenance': result = self._CacheTagDisplaySync( *args, **kwargs )
elif action == 'tag_display_application': self.modules_tag_display.SetApplication( *args, **kwargs )
elif action == 'update_server_services': self._UpdateServerServices( *args, **kwargs )
elif action == 'update_services': self._UpdateServices( *args, **kwargs )
elif action == 'vacuum': self._Vacuum( *args, **kwargs )
else: raise Exception( 'db received an unknown write command: ' + action )
return result
def pub_content_updates_after_commit( self, service_keys_to_content_updates ):
self._after_job_content_update_jobs.append( service_keys_to_content_updates )
def pub_initial_message( self, message ):
self._initial_messages.append( message )
def pub_service_updates_after_commit( self, service_keys_to_service_updates ):
self._cursor_transaction_wrapper.pub_after_job( 'service_updates_data', service_keys_to_service_updates )
self._cursor_transaction_wrapper.pub_after_job( 'service_updates_gui', service_keys_to_service_updates )
def publish_status_update( self ):
self._controller.pub( 'set_status_bar_dirty' )
def GetInitialMessages( self ):
return self._initial_messages
def RestoreBackup( self, path ):
for filename in self._db_filenames.values():
HG.client_controller.frame_splash_status.SetText( filename )
source = os.path.join( path, filename )
dest = os.path.join( self._db_dir, filename )
if os.path.exists( source ):
HydrusPaths.MirrorFile( source, dest )
else:
# if someone backs up with an older version that does not have as many db files as this version, we get conflict
# don't want to delete just in case, but we will move it out the way
HydrusPaths.MergeFile( dest, dest + '.old' )
additional_filenames = self._GetPossibleAdditionalDBFilenames()
for additional_filename in additional_filenames:
source = os.path.join( path, additional_filename )
dest = os.path.join( self._db_dir, additional_filename )
if os.path.exists( source ):
HydrusPaths.MirrorFile( source, dest )
HG.client_controller.frame_splash_status.SetText( 'media files' )
client_files_source = os.path.join( path, 'client_files' )
client_files_default = os.path.join( self._db_dir, 'client_files' )
if os.path.exists( client_files_source ):
HydrusPaths.MirrorTree( client_files_source, client_files_default )
| true | true |
f7fcefa6fed7a7d207c29601ec2b5b1a4ec218e6 | 4,561 | py | Python | crawler.py | sebastianorozco/webcrawler | c34630f27505bbf07d2fea41724b9b05889be9b0 | [
"Apache-2.0"
] | null | null | null | crawler.py | sebastianorozco/webcrawler | c34630f27505bbf07d2fea41724b9b05889be9b0 | [
"Apache-2.0"
] | null | null | null | crawler.py | sebastianorozco/webcrawler | c34630f27505bbf07d2fea41724b9b05889be9b0 | [
"Apache-2.0"
] | null | null | null | """
crawler.py
"""
from urlparse import urljoin,urlparse
from collections import deque
import re
import traceback
from locale import getdefaultlocale
import logging
import time
from bs4 import BeautifulSoup
from database import Database
from webPage import WebPage
from threadPool import ThreadPool
log = logging.getLogger('Main.crawler')
class Crawler(object):
def __init__(self, args):
self.depth = args.depth
self.currentDepth = 1
self.keyword = args.keyword.decode(getdefaultlocale()[1])
self.database = Database(args.dbFile)
self.threadPool = ThreadPool(args.threadNum)
self.visitedHrefs = set()
self.unvisitedHrefs = deque()
self.unvisitedHrefs.append(args.url)
self.isCrawling = False
def start(self):
print '\nStart Crawling\n'
if not self._isDatabaseAvaliable():
print 'Error: Unable to open database file.\n'
else:
self.isCrawling = True
self.threadPool.startThreads()
while self.currentDepth < self.depth+1:
self._assignCurrentDepthTasks ()
#self.threadPool.taskJoin()Ctrl-C Interupt
while self.threadPool.getTaskLeft():
time.sleep(8)
print 'Depth %d Finish. Totally visited %d links. \n' % (
self.currentDepth, len(self.visitedHrefs))
log.info('Depth %d Finish. Total visited Links: %d\n' % (
self.currentDepth, len(self.visitedHrefs)))
self.currentDepth += 1
self.stop()
def stop(self):
self.isCrawling = False
self.threadPool.stopThreads()
self.database.close()
def getAlreadyVisitedNum(self):
#visitedHrefstaskQueue
visitedHrefs
return len(self.visitedHrefs) - self.threadPool.getTaskLeft()
def _assignCurrentDepthTasks(self):
while self.unvisitedHrefs:
url = self.unvisitedHrefs.popleft()
self.threadPool.putTask(self._taskHandler, url)
self.visitedHrefs.add(url)
def _taskHandler(self, url):
webPage = WebPage(url)
if webPage.fetch():
self._saveTaskResults(webPage)
self._addUnvisitedHrefs(webPage)
def _saveTaskResults(self, webPage):
url, pageSource = webPage.getDatas()
try:
if self.keyword:
if re.search(self.keyword, pageSource, re.I):
self.database.saveData(url, pageSource, self.keyword)
else:
self.database.saveData(url, pageSource)
except Exception, e:
log.error(' URL: %s ' % url + traceback.format_exc())
def _addUnvisitedHrefs(self, webPage):
url, pageSource = webPage.getDatas()
hrefs = self._getAllHrefsFromPage(url, pageSource)
for href in hrefs:
if self._isHttpOrHttpsProtocol(href):
if not self._isHrefRepeated(href):
self.unvisitedHrefs.append(href)
def _getAllHrefsFromPage(self, url, pageSource):
hrefs = []
soup = BeautifulSoup(pageSource)
results = soup.find_all('a',href=True)
for a in results:
href = a.get('href').encode('utf8')
if not href.startswith('http'):
href = urljoin(url, href)
hrefs.append(href)
return hrefs
def _isHttpOrHttpsProtocol(self, href):
protocal = urlparse(href).scheme
if protocal == 'http' or protocal == 'https':
return True
return False
def _isHrefRepeated(self, href):
if href in self.visitedHrefs or href in self.unvisitedHrefs:
return True
return False
def _isDatabaseAvaliable(self):
if self.database.isConn():
return True
return False
def selfTesting(self, args):
url = 'http://www.baidu.com/'
print '\nVisiting www.baidu.com'
pageSource = WebPage(url).fetch()
if pageSource == None:
print 'Please check your network and make sure it\'s connected.\n'
elif not self._isDatabaseAvaliable():
print 'Please make sure you have the permission to save data: %s\n' % args.dbFile
else:
self._saveTaskResults(url, pageSource)
print 'Create logfile and database Successfully.'
print 'Already save Baidu.com, Please check the database record.'
print 'Seems No Problem!\n'
| 33.785185 | 93 | 0.60513 | """
crawler.py
"""
from urlparse import urljoin,urlparse
from collections import deque
import re
import traceback
from locale import getdefaultlocale
import logging
import time
from bs4 import BeautifulSoup
from database import Database
from webPage import WebPage
from threadPool import ThreadPool
log = logging.getLogger('Main.crawler')
class Crawler(object):
def __init__(self, args):
self.depth = args.depth
self.currentDepth = 1
self.keyword = args.keyword.decode(getdefaultlocale()[1])
self.database = Database(args.dbFile)
self.threadPool = ThreadPool(args.threadNum)
self.visitedHrefs = set()
self.unvisitedHrefs = deque()
self.unvisitedHrefs.append(args.url)
self.isCrawling = False
def start(self):
print '\nStart Crawling\n'
if not self._isDatabaseAvaliable():
print 'Error: Unable to open database file.\n'
else:
self.isCrawling = True
self.threadPool.startThreads()
while self.currentDepth < self.depth+1:
self._assignCurrentDepthTasks ()
while self.threadPool.getTaskLeft():
time.sleep(8)
print 'Depth %d Finish. Totally visited %d links. \n' % (
self.currentDepth, len(self.visitedHrefs))
log.info('Depth %d Finish. Total visited Links: %d\n' % (
self.currentDepth, len(self.visitedHrefs)))
self.currentDepth += 1
self.stop()
def stop(self):
self.isCrawling = False
self.threadPool.stopThreads()
self.database.close()
def getAlreadyVisitedNum(self):
visitedHrefs
return len(self.visitedHrefs) - self.threadPool.getTaskLeft()
def _assignCurrentDepthTasks(self):
while self.unvisitedHrefs:
url = self.unvisitedHrefs.popleft()
self.threadPool.putTask(self._taskHandler, url)
self.visitedHrefs.add(url)
def _taskHandler(self, url):
webPage = WebPage(url)
if webPage.fetch():
self._saveTaskResults(webPage)
self._addUnvisitedHrefs(webPage)
def _saveTaskResults(self, webPage):
url, pageSource = webPage.getDatas()
try:
if self.keyword:
if re.search(self.keyword, pageSource, re.I):
self.database.saveData(url, pageSource, self.keyword)
else:
self.database.saveData(url, pageSource)
except Exception, e:
log.error(' URL: %s ' % url + traceback.format_exc())
def _addUnvisitedHrefs(self, webPage):
url, pageSource = webPage.getDatas()
hrefs = self._getAllHrefsFromPage(url, pageSource)
for href in hrefs:
if self._isHttpOrHttpsProtocol(href):
if not self._isHrefRepeated(href):
self.unvisitedHrefs.append(href)
def _getAllHrefsFromPage(self, url, pageSource):
hrefs = []
soup = BeautifulSoup(pageSource)
results = soup.find_all('a',href=True)
for a in results:
href = a.get('href').encode('utf8')
if not href.startswith('http'):
href = urljoin(url, href)
hrefs.append(href)
return hrefs
def _isHttpOrHttpsProtocol(self, href):
protocal = urlparse(href).scheme
if protocal == 'http' or protocal == 'https':
return True
return False
def _isHrefRepeated(self, href):
if href in self.visitedHrefs or href in self.unvisitedHrefs:
return True
return False
def _isDatabaseAvaliable(self):
if self.database.isConn():
return True
return False
def selfTesting(self, args):
url = 'http://www.baidu.com/'
print '\nVisiting www.baidu.com'
pageSource = WebPage(url).fetch()
if pageSource == None:
print 'Please check your network and make sure it\'s connected.\n'
elif not self._isDatabaseAvaliable():
print 'Please make sure you have the permission to save data: %s\n' % args.dbFile
else:
self._saveTaskResults(url, pageSource)
print 'Create logfile and database Successfully.'
print 'Already save Baidu.com, Please check the database record.'
print 'Seems No Problem!\n'
| false | true |
f7fcefd3c4b14bba418ab004458a2203519d7bb4 | 1,086 | py | Python | openerp/addons/l10n_fr_hr_payroll/__init__.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | odoo/addons/l10n_fr_hr_payroll/__init__.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | odoo/addons/l10n_fr_hr_payroll/__init__.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import l10n_fr_hr_payroll
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 43.44 | 78 | 0.624309 | true | true | |
f7fcf216326d902fad809437d9c0215cd4b51837 | 146 | py | Python | datasets/urls.py | gurupratap-matharu/vuln | e577684d72d4d06ad60705cf7faeefe9392ea7f1 | [
"MIT"
] | null | null | null | datasets/urls.py | gurupratap-matharu/vuln | e577684d72d4d06ad60705cf7faeefe9392ea7f1 | [
"MIT"
] | 8 | 2021-03-31T20:22:18.000Z | 2021-12-13T20:55:25.000Z | datasets/urls.py | gurupratap-matharu/vuln | e577684d72d4d06ad60705cf7faeefe9392ea7f1 | [
"MIT"
] | 1 | 2021-01-12T21:39:40.000Z | 2021-01-12T21:39:40.000Z | from django.urls import path
from datasets.views import DatasetListView
urlpatterns = [
path('', DatasetListView.as_view(), name='home'),
]
| 18.25 | 53 | 0.732877 | from django.urls import path
from datasets.views import DatasetListView
urlpatterns = [
path('', DatasetListView.as_view(), name='home'),
]
| true | true |
f7fcf2afea0cf43878f91a9140b2757c9eb451d2 | 414 | py | Python | www_slcschools_org/wsgi.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | www_slcschools_org/wsgi.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | www_slcschools_org/wsgi.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | """
WSGI config for www_slcschools_org project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "www_slcschools_org.settings")
application = get_wsgi_application()
| 24.352941 | 78 | 0.797101 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "www_slcschools_org.settings")
application = get_wsgi_application()
| true | true |
f7fcf2b5f855ae88428508dfb58bfdcf87cc078c | 22,805 | py | Python | sctriangulate/metrics.py | alexcwsmith/scTriangulate | ec014a4c575f4fd3270922ee9197493a6ec0846c | [
"MIT"
] | 1 | 2021-10-18T21:44:23.000Z | 2021-10-18T21:44:23.000Z | sctriangulate/metrics.py | alexcwsmith/scTriangulate | ec014a4c575f4fd3270922ee9197493a6ec0846c | [
"MIT"
] | null | null | null | sctriangulate/metrics.py | alexcwsmith/scTriangulate | ec014a4c575f4fd3270922ee9197493a6ec0846c | [
"MIT"
] | null | null | null | import scanpy as sc
import pandas as pd
import numpy as np
import anndata as ad
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import gseapy as gp
import math
import os
def check_filter_single_cluster(adata,key):
vc = adata.obs[key].value_counts()
exclude_clusters= vc.loc[vc==1].index
truth = np.logical_not(adata.obs[key].isin(exclude_clusters).values)
adata_valid = adata[truth,:]
return adata_valid
def doublet_compute(adata,key):
cluster_to_doublet = {}
for cluster in adata.obs[key].astype('category').cat.categories:
mean_score = adata[adata.obs[key]==cluster,:].obs['doublet_scores'].values.mean()
cluster_to_doublet[cluster] = mean_score
return cluster_to_doublet
def compute_combo_score(rank_uns,cluster):
rank_names = rank_uns['names'][cluster]
rank_lfc = rank_uns['logfoldchanges'][cluster]
rank_pval = rank_uns['pvals'][cluster]
df = pd.DataFrame({'names':rank_names,'lfc':rank_lfc,'pval':rank_pval})
# filter out down-regulated genes
df = df.loc[df['lfc'] > 0, :]
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
# the rank of each gene by lfc, the larger, the better, make argsort result reverse
temp = np.flip(np.argsort(df['lfc'].values))
ranks_lfc = np.empty_like(temp)
ranks_lfc[temp] = np.arange(len(df['pval'].values))
# the rank of each gene by pval, the smaller, the better
temp = np.argsort(df['pval'].values)
ranks_pval = np.empty_like(temp)
ranks_pval[temp] = np.arange(len(df['pval'].values))
# combo rank score
temp = (ranks_lfc + ranks_pval) / 2
df['rank_lfc'] = ranks_lfc
df['rank_pval'] = ranks_pval
df['combo'] = temp
df.sort_values(by='combo', inplace=True)
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
# filter out the genes if pval > 0.05
df = df.loc[df['pval']<0.05,:]
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
return df
def run_enrichr(gene_list,key,name,folder):
# run enrichr
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t')
artifact_dict = artifact.groupby(by='class')['genes'].apply(lambda x:x.tolist()).to_dict()
enr2 = gp.enrichr(gene_list=gene_list,
description=name,
gene_sets=artifact_dict,
background=20000,
outdir=os.path.join(folder,'scTriangulate_local_mode_enrichr'),
cutoff=0.1, # adj-p for plotting
verbose=True)
enrichr_result = enr2.results
enrichr_dict = {}
for metric in artifact_dict.keys():
if enrichr_result.shape[0] == 0: # no enrichment for any of the above terms
enrichr_dict[metric] = 0
else:
try:
enrichr_score = -math.log10(enrichr_result.loc[enrichr_result['Term']==metric,:]['Adjusted P-value'].to_list()[0])
except IndexError:
enrichr_dict[metric] = 0
else:
enrichr_dict[metric] = enrichr_score
return enrichr_dict
def run_gsea(gene_list,key,name,folder):
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t')
artifact_dict = artifact.groupby(by='class')['genes'].apply(lambda x:x.tolist()).to_dict()
artifact_dict_keys = list(artifact_dict.keys())
df = pd.DataFrame({0: gene_list, 1: 1/(np.arange(len(gene_list))+1)}) # col 1 is for descending rank of gene
gsea_dict = {}
try:
pre_res = gp.prerank(rnk=df, gene_sets=artifact_dict,
permutation_num=100,
outdir=os.path.join(folder,'scTriangulate_local_mode_gsea/{}/{}'.format(key,name)),
min_size=1,
max_size=10000,
seed=6,
verbose=True) # run this will cause artifact dict decreasing !! Caveats!!!
except: # no hit return, all metrics are zero
for metric in artifact_dict_keys:
gsea_dict[metric] = (0,0) # first is nes, second is #hit
else:
gsea_result = pre_res.res2d
metric_get = set(gsea_result.index.tolist())
for metric in artifact_dict_keys:
if metric in metric_get:
gsea_score = gsea_result.loc[gsea_result.index==metric,:]['nes'].to_list()[0]
gsea_hits = gsea_result.loc[gsea_result.index==metric,:]['matched_size'].to_list()[0]
gsea_dict[metric] = (gsea_score, gsea_hits)
else: # not enriched
gsea_dict[metric] = (0,0)
return gsea_dict
def read_artifact_genes(species,criterion):
'''
criterion1: all will be artifact
criterion2: all will be artifact except cellcycle
criterion3: all will be artifact except cellcycle, ribosome
criterion4: all will be artifact except cellcycle, ribosome, mitochondrial
criterion5: all will be artifact except cellcycle, ribosome, mitochondrial, antisense
criterion6: all will be artifact except cellcycle, ribosome, mitochondrial, antisense, predict_gene
'''
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t',index_col=0)
artifact = artifact.loc[artifact['species']==species,:]
if criterion == 1:
artifact = artifact
elif criterion == 2:
artifact = artifact.loc[~(artifact['class']=='cellcycle'),:]
elif criterion == 3:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcycle')),:]
elif criterion == 4:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')),:]
elif criterion == 5:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')|(artifact['class']=='antisense')),:]
elif criterion == 6:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')|(artifact['class']=='antisense')|(artifact['class']=='predict_gene')),:]
return artifact
def purify_gene(genelist,species,criterion):
result = []
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
for gene in genelist:
if gene not in artifact_genes:
result.append(gene)
return result
def marker_gene(adata, key, species, criterion, folder):
# delete previous rank_gene_gruops if present
if adata.uns.get('rank_genes_groups') != None:
del adata.uns['rank_genes_groups']
# perform t-test
sc.tl.rank_genes_groups(adata, key, method='t-test',n_genes=adata.shape[1])
all_genes = adata.var_names.values # ndarray, all the genes
all_clusters = adata.obs[key].cat.categories # pd.Index, all the clusters
cluster2gene = dict() # {'cluster1':[gene1,gene2..]}
rank_uns = adata.uns['rank_genes_groups']
pre_computed_dfs = []
for cluster in all_clusters:
cluster2gene[cluster] = []
df = compute_combo_score(rank_uns, cluster)
pre_computed_dfs.append(df)
for gene in all_genes:
index_store = []
for i,cluster in enumerate(all_clusters):
df = pre_computed_dfs[i]
# get the rank of the gene in each cluster
try:
index = np.nonzero(df['names'].values == gene)[0][0] # the rank of this gene in each cluster
except IndexError:
index = len(all_genes)
index_store.append(index)
if np.all(np.array(index_store) == len(all_genes)):
continue
assign = all_clusters[np.argmin(np.array(index_store))] # get argmin, take the corresponding cluster
cluster2gene[assign].append((gene,np.min(index_store)))
# sort the cluster2gene
for key_,value in cluster2gene.items():
gene = [item[0] for item in value]
rank = [item[1] for item in value]
temp = sorted(zip(gene,rank),key=lambda x:x[1])
cluster2gene[key_] = [item[0] for item in temp]
result = pd.Series(cluster2gene).to_frame()
result.columns = ['whole_marker_genes']
'''
now the result is a dataframe
whole_marker_genes
cluster1 gene_list
cluster2 gene_list
'''
# now let's perform enrichr and GSEA, and get puried marker gene
col_enrichr = []
col_gsea = []
col_purify = [] # genelist that have artifact genes removed
for cluster in result.index:
enrichr_dict = run_enrichr(result.loc[cluster,:].to_list()[0],key=key,name=cluster,folder=folder) # [0] because it is a [[gene_list]],we only need [gene_list]
gsea_dict = run_gsea(result.loc[cluster,:].to_list()[0],key=key,name=cluster,folder=folder)
purified = purify_gene(result.loc[cluster,:].to_list()[0],species,criterion) # the [0] is explained last line
col_enrichr.append(enrichr_dict)
col_gsea.append(gsea_dict)
col_purify.append(purified)
result['enrichr'] = col_enrichr
result['gsea'] = col_gsea
result['purify'] = col_purify
return result
def reassign_score(adata,key,marker,regress_size=False):
# get gene pool, slice the adata
num = 30
pool = []
for i in range(marker.shape[0]):
marker_genes = marker.iloc[i]['purify']
pick = marker_genes[:num] # if the list doesn't have more than 30 markers, it is oK, python will automatically choose all
pool.extend(pick)
pool = list(set(pool))
adata_now = adata[:,pool].copy()
# mean-centered and divide the std of the data
tmp = adata_now.X
from sklearn.preprocessing import scale
tmp_scaled = scale(tmp,axis=0)
adata_now.X = tmp_scaled
# reducing dimension
from sklearn.decomposition import PCA
reducer = PCA(n_components=30)
scoring = reducer.fit_transform(X=adata_now.X)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
scoring_y = le.fit_transform(adata_now.obs[key].astype('str'))
order = le.classes_
# compute the centroid of each cluster
X = np.empty([len(adata_now.obs[key].cat.categories),scoring.shape[1]])
y = []
for i,cluster in enumerate(adata_now.obs[key].cat.categories):
bool_index = adata_now.obs[key]==cluster
centroid = np.mean(scoring[bool_index,:],axis=0)
X[i,:] = centroid
y.append(cluster)
y = le.fit_transform(y)
# train a KNN classifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
# if number of centroid(training data) < N_neighbors, will raise error, we hard code it to be 10
n_neighbors = 10
if X.shape[0] < n_neighbors:
n_neighbors = X.shape[0]
model = KNeighborsClassifier(n_neighbors=n_neighbors,weights='distance')
model.fit(X,y)
pred = model.predict(scoring) # (n_samples,)
mat = confusion_matrix(scoring_y,pred)
confusion_reassign = pd.DataFrame(data=mat,index=order,columns=order)
accuracy = []
for i in range(mat.shape[0]):
accuracy.append(mat[i,i]/np.sum(mat[i,:]))
cluster_to_accuracy = {}
for i,cluster in enumerate(order):
cluster_to_accuracy[cluster] = accuracy[i]
# whether to regress out the clutser size effect or not
if regress_size:
key_metric_dict = cluster_to_accuracy
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size
cluster_to_accuracy = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_accuracy, confusion_reassign
'''below is the part for regression score'''
def background_normalizer(df,n_neighbors=10,scale=True):
# df is a two column dataframe where first column is metric and second column is size
from copy import deepcopy
df = deepcopy(df)
df['order'] = np.arange(df.shape[0])
col = []
for i in range(df.shape[0]):
this_metric = df[0][i]
distance_to_this = (df[0] - this_metric).abs()
df_tmp = deepcopy(df)
df_tmp['distance'] = distance_to_this.values
df_tmp.sort_values(by='distance',inplace=True)
neighbors_metric = df_tmp.iloc[:,0][:n_neighbors].values
mean_ = neighbors_metric.mean()
std_ = neighbors_metric.std()
if scale:
if std_ == 0:
col.append(0)
else:
col.append((this_metric-mean_)/std_)
else:
col.append(this_metric-mean_)
df['normalized'] = col
return df
def regress_size(df_inspect,regressor='background_zscore',n_neighbors=10,to_dict=False):
# df_inspect, index is cluster name, col1 is metric, col2 is size
if regressor == 'background_zscore':
df_now = background_normalizer(df_inspect,n_neighbors,True)
residual = df_now['normalized'].values
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'background_mean':
df_now = background_normalizer(df_inspect,n_neighbors,False)
residual = df_now['normalized'].values
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'GLM':
endog = df_inspect[0] # metric
exog = df_inspect[1] # size
import statsmodels.api as sm
exog = sm.add_constant(exog,prepend=True)
model = sm.GLM(endog,exog,family=sm.families.Gaussian())
res = model.fit()
residual = res.resid_response
normalized_metric_series = residual
elif regressor == 'Huber':
endog = df_inspect[0] # metric
exog = df_inspect[1] # size
from sklearn.linear_model import HuberRegressor
model = HuberRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
# outliers = model.outliers_
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'RANSAC':
endog = df_inspect[0] # metric
exog = df_inspect[1] # size
from sklearn.linear_model import RANSACRegressor
model = RANSACRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
#outliers = np.logical_not(model.inlier_mask_)
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'TheilSen':
endog = df_inspect[0] # metric
exog = df_inspect[1] # size
from sklearn.linear_model import TheilSenRegressor
model = TheilSenRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
if to_dict:
normalized_metric_dict = normalized_metric_series.to_dict()
final = normalized_metric_dict
else:
final = normalized_metric_series
return final
def tf_idf_bare_compute(df,cluster):
'''
now the df contains all the gene for and an additional column for cluster
'''
# compute its tf_idf
tmp1 = df.loc[df['cluster'] == cluster, :].loc[:,df.columns!='cluster'].values # (n_cells,n_genes)
tf = np.count_nonzero(tmp1,axis=0) / tmp1.shape[0] # (n_genes,)
tf = tf + 1e-5
tmp2 = df.loc[:,df.columns!='cluster'].values
df_ = np.count_nonzero(tmp2,axis=0) / tmp2.shape[0] # (n_genes,)
df_ = df_ + 1e-5
idf = -np.log10(df_)
tf_idf_ori = tf * idf # (n_genes,)
return tf_idf_ori
def single_size_query(obs,c):
# c would be {gs:ERP4}
key = list(c.keys())[0]
cluster = list(c.values())[0]
size = obs.loc[obs[key]==cluster,:].shape[0]
return size
def get_size_in_metrics(obs,key):
key_size_dict = {} # {ERP1:54,ERP2:100....}
for cluster in obs[key].unique():
size = single_size_query(obs,{key:cluster})
key_size_dict[cluster] = size
return key_size_dict
def tf_idf10_for_cluster(adata,key,species,criterion,regress_size=False):
df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names)
df['cluster'] = adata.obs[key].astype('str').values
cluster_to_tfidf10 = {} # store tfidf10 score
cluster_to_exclusive = {} # store exclusivly expressed genes
for item in adata.obs[key].cat.categories:
a = tf_idf_bare_compute(df,item)
a_names = adata.var_names
test = pd.Series(data=a, index=a_names)
test.sort_values(ascending=False, inplace=True)
# remove artifact genes
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
test_pure = test.loc[~test.index.isin(artifact_genes)]
result10 = test_pure.iloc[9]
cluster_to_tfidf10[item] = result10
cluster_to_exclusive[item] = test_pure.to_dict()
exclusive_genes = pd.Series(cluster_to_exclusive,name='genes')
# whether to regress out the clutser size effect or not
if regress_size:
key_metric_dict = cluster_to_tfidf10
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size
cluster_to_tfidf10 = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_tfidf10, exclusive_genes
def tf_idf5_for_cluster(adata,key,species,criterion,regress_size=False):
df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names)
df['cluster'] = adata.obs[key].astype('str').values
cluster_to_tfidf5 = {} # store tfidf1 score
for item in adata.obs[key].cat.categories:
a = tf_idf_bare_compute(df,item)
a_names = adata.var_names
test = pd.Series(data=a, index=a_names)
test.sort_values(ascending=False, inplace=True)
# remove artifact genes
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
test_pure = test.loc[~test.index.isin(artifact_genes)]
result5 = test_pure.iloc[4]
cluster_to_tfidf5[item] = result5
# whether to regress out the clutser size effect or not
if regress_size:
key_metric_dict = cluster_to_tfidf5
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size
cluster_to_tfidf5 = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_tfidf5
def tf_idf1_for_cluster(adata,key,species,criterion,regress_size=False):
df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names)
df['cluster'] = adata.obs[key].astype('str').values
cluster_to_tfidf1 = {} # store tfidf1 score
for item in adata.obs[key].cat.categories:
a = tf_idf_bare_compute(df,item)
a_names = adata.var_names
test = pd.Series(data=a, index=a_names)
test.sort_values(ascending=False, inplace=True)
# remove artifact genes
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
test_pure = test.loc[~test.index.isin(artifact_genes)]
result1 = test_pure.iloc[0]
cluster_to_tfidf1[item] = result1
# whether to regress out the clutser size effect or not
if regress_size:
key_metric_dict = cluster_to_tfidf1
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size
cluster_to_tfidf1 = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_tfidf1
def SCCAF_score(adata, key, species, criterion, scale_sccaf,regress_size=False):
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
# define X and Y and remove artifact genes in the first place
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
X = adata[:,~adata.var_names.isin(artifact_genes)].X.copy() # from ArrayView to ndarray
Y = adata.obs[key].values
# mean-centered and divide the std of the data, if too many cells (>50000), no scale, liblinear solver is robust to unscaled data
if scale_sccaf:
tmp = X
from sklearn.preprocessing import scale
tmp_scaled = scale(tmp,axis=0)
X = tmp_scaled
# label encoding Y to numerical values
le = LabelEncoder()
Y = le.fit_transform(Y)
# stratified split to traing and test, train and test, then get confusion matrix
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
for train_index, test_index in sss.split(X, Y):
X_train = X[train_index]
Y_train = Y[train_index]
X_test = X[test_index]
Y_test = Y[test_index]
model = LogisticRegression(penalty='l1', solver='liblinear', max_iter=100000)
model.fit(X_train, Y_train)
result = model.predict(X_test)
m = confusion_matrix(Y_test, result)
confusion_sccaf = pd.DataFrame(data=m,index=le.classes_,columns=le.classes_)
# derive cluster reliability from confusion matrix for each cluster
numeric2reliable = [] # [0.4,0.5...] length is the number of clusters involved in self-projection
for i in range(m.shape[0]):
numeric2reliable.append(m[i, i] / m[i, :].sum())
cluster_to_SCCAF = {}
for i in range(len(numeric2reliable)):
cluster_to_SCCAF[le.classes_[i]] = numeric2reliable[i]
# whether to regress out the clustser size effect or not
if regress_size:
key_metric_dict = cluster_to_SCCAF
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1) # index is cluster, col1 is metric, col2 is size
cluster_to_SCCAF = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_SCCAF, confusion_sccaf
| 42.546642 | 208 | 0.665293 | import scanpy as sc
import pandas as pd
import numpy as np
import anndata as ad
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import gseapy as gp
import math
import os
def check_filter_single_cluster(adata,key):
vc = adata.obs[key].value_counts()
exclude_clusters= vc.loc[vc==1].index
truth = np.logical_not(adata.obs[key].isin(exclude_clusters).values)
adata_valid = adata[truth,:]
return adata_valid
def doublet_compute(adata,key):
cluster_to_doublet = {}
for cluster in adata.obs[key].astype('category').cat.categories:
mean_score = adata[adata.obs[key]==cluster,:].obs['doublet_scores'].values.mean()
cluster_to_doublet[cluster] = mean_score
return cluster_to_doublet
def compute_combo_score(rank_uns,cluster):
rank_names = rank_uns['names'][cluster]
rank_lfc = rank_uns['logfoldchanges'][cluster]
rank_pval = rank_uns['pvals'][cluster]
df = pd.DataFrame({'names':rank_names,'lfc':rank_lfc,'pval':rank_pval})
df = df.loc[df['lfc'] > 0, :]
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
temp = np.flip(np.argsort(df['lfc'].values))
ranks_lfc = np.empty_like(temp)
ranks_lfc[temp] = np.arange(len(df['pval'].values))
temp = np.argsort(df['pval'].values)
ranks_pval = np.empty_like(temp)
ranks_pval[temp] = np.arange(len(df['pval'].values))
temp = (ranks_lfc + ranks_pval) / 2
df['rank_lfc'] = ranks_lfc
df['rank_pval'] = ranks_pval
df['combo'] = temp
df.sort_values(by='combo', inplace=True)
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
df = df.loc[df['pval']<0.05,:]
df.set_index(keys=pd.Index(np.arange(df.shape[0])), inplace=True)
return df
def run_enrichr(gene_list,key,name,folder):
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t')
artifact_dict = artifact.groupby(by='class')['genes'].apply(lambda x:x.tolist()).to_dict()
enr2 = gp.enrichr(gene_list=gene_list,
description=name,
gene_sets=artifact_dict,
background=20000,
outdir=os.path.join(folder,'scTriangulate_local_mode_enrichr'),
cutoff=0.1,
verbose=True)
enrichr_result = enr2.results
enrichr_dict = {}
for metric in artifact_dict.keys():
if enrichr_result.shape[0] == 0:
enrichr_dict[metric] = 0
else:
try:
enrichr_score = -math.log10(enrichr_result.loc[enrichr_result['Term']==metric,:]['Adjusted P-value'].to_list()[0])
except IndexError:
enrichr_dict[metric] = 0
else:
enrichr_dict[metric] = enrichr_score
return enrichr_dict
def run_gsea(gene_list,key,name,folder):
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t')
artifact_dict = artifact.groupby(by='class')['genes'].apply(lambda x:x.tolist()).to_dict()
artifact_dict_keys = list(artifact_dict.keys())
df = pd.DataFrame({0: gene_list, 1: 1/(np.arange(len(gene_list))+1)})
gsea_dict = {}
try:
pre_res = gp.prerank(rnk=df, gene_sets=artifact_dict,
permutation_num=100,
outdir=os.path.join(folder,'scTriangulate_local_mode_gsea/{}/{}'.format(key,name)),
min_size=1,
max_size=10000,
seed=6,
verbose=True)
except:
for metric in artifact_dict_keys:
gsea_dict[metric] = (0,0) else:
gsea_result = pre_res.res2d
metric_get = set(gsea_result.index.tolist())
for metric in artifact_dict_keys:
if metric in metric_get:
gsea_score = gsea_result.loc[gsea_result.index==metric,:]['nes'].to_list()[0]
gsea_hits = gsea_result.loc[gsea_result.index==metric,:]['matched_size'].to_list()[0]
gsea_dict[metric] = (gsea_score, gsea_hits)
else:
gsea_dict[metric] = (0,0)
return gsea_dict
def read_artifact_genes(species,criterion):
artifact = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),'artifact_genes.txt'),sep='\t',index_col=0)
artifact = artifact.loc[artifact['species']==species,:]
if criterion == 1:
artifact = artifact
elif criterion == 2:
artifact = artifact.loc[~(artifact['class']=='cellcycle'),:]
elif criterion == 3:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcycle')),:]
elif criterion == 4:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')),:]
elif criterion == 5:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')|(artifact['class']=='antisense')),:]
elif criterion == 6:
artifact = artifact.loc[~((artifact['class']=='ribosome')|(artifact['class']=='cellcylce')|(artifact['class']=='mitochondrial')|(artifact['class']=='antisense')|(artifact['class']=='predict_gene')),:]
return artifact
def purify_gene(genelist,species,criterion):
result = []
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
for gene in genelist:
if gene not in artifact_genes:
result.append(gene)
return result
def marker_gene(adata, key, species, criterion, folder):
if adata.uns.get('rank_genes_groups') != None:
del adata.uns['rank_genes_groups']
sc.tl.rank_genes_groups(adata, key, method='t-test',n_genes=adata.shape[1])
all_genes = adata.var_names.values
all_clusters = adata.obs[key].cat.categories
cluster2gene = dict()
rank_uns = adata.uns['rank_genes_groups']
pre_computed_dfs = []
for cluster in all_clusters:
cluster2gene[cluster] = []
df = compute_combo_score(rank_uns, cluster)
pre_computed_dfs.append(df)
for gene in all_genes:
index_store = []
for i,cluster in enumerate(all_clusters):
df = pre_computed_dfs[i]
try:
index = np.nonzero(df['names'].values == gene)[0][0]
except IndexError:
index = len(all_genes)
index_store.append(index)
if np.all(np.array(index_store) == len(all_genes)):
continue
assign = all_clusters[np.argmin(np.array(index_store))]
cluster2gene[assign].append((gene,np.min(index_store)))
for key_,value in cluster2gene.items():
gene = [item[0] for item in value]
rank = [item[1] for item in value]
temp = sorted(zip(gene,rank),key=lambda x:x[1])
cluster2gene[key_] = [item[0] for item in temp]
result = pd.Series(cluster2gene).to_frame()
result.columns = ['whole_marker_genes']
col_enrichr = []
col_gsea = []
col_purify = [] # genelist that have artifact genes removed
for cluster in result.index:
enrichr_dict = run_enrichr(result.loc[cluster,:].to_list()[0],key=key,name=cluster,folder=folder) # [0] because it is a [[gene_list]],we only need [gene_list]
gsea_dict = run_gsea(result.loc[cluster,:].to_list()[0],key=key,name=cluster,folder=folder)
purified = purify_gene(result.loc[cluster,:].to_list()[0],species,criterion) # the [0] is explained last line
col_enrichr.append(enrichr_dict)
col_gsea.append(gsea_dict)
col_purify.append(purified)
result['enrichr'] = col_enrichr
result['gsea'] = col_gsea
result['purify'] = col_purify
return result
def reassign_score(adata,key,marker,regress_size=False):
# get gene pool, slice the adata
num = 30
pool = []
for i in range(marker.shape[0]):
marker_genes = marker.iloc[i]['purify']
pick = marker_genes[:num] # if the list doesn't have more than 30 markers, it is oK, python will automatically choose all
pool.extend(pick)
pool = list(set(pool))
adata_now = adata[:,pool].copy()
tmp = adata_now.X
from sklearn.preprocessing import scale
tmp_scaled = scale(tmp,axis=0)
adata_now.X = tmp_scaled
from sklearn.decomposition import PCA
reducer = PCA(n_components=30)
scoring = reducer.fit_transform(X=adata_now.X)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
scoring_y = le.fit_transform(adata_now.obs[key].astype('str'))
order = le.classes_
X = np.empty([len(adata_now.obs[key].cat.categories),scoring.shape[1]])
y = []
for i,cluster in enumerate(adata_now.obs[key].cat.categories):
bool_index = adata_now.obs[key]==cluster
centroid = np.mean(scoring[bool_index,:],axis=0)
X[i,:] = centroid
y.append(cluster)
y = le.fit_transform(y)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
n_neighbors = 10
if X.shape[0] < n_neighbors:
n_neighbors = X.shape[0]
model = KNeighborsClassifier(n_neighbors=n_neighbors,weights='distance')
model.fit(X,y)
pred = model.predict(scoring)
mat = confusion_matrix(scoring_y,pred)
confusion_reassign = pd.DataFrame(data=mat,index=order,columns=order)
accuracy = []
for i in range(mat.shape[0]):
accuracy.append(mat[i,i]/np.sum(mat[i,:]))
cluster_to_accuracy = {}
for i,cluster in enumerate(order):
cluster_to_accuracy[cluster] = accuracy[i]
if regress_size:
key_metric_dict = cluster_to_accuracy
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1)
cluster_to_accuracy = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_accuracy, confusion_reassign
def background_normalizer(df,n_neighbors=10,scale=True):
from copy import deepcopy
df = deepcopy(df)
df['order'] = np.arange(df.shape[0])
col = []
for i in range(df.shape[0]):
this_metric = df[0][i]
distance_to_this = (df[0] - this_metric).abs()
df_tmp = deepcopy(df)
df_tmp['distance'] = distance_to_this.values
df_tmp.sort_values(by='distance',inplace=True)
neighbors_metric = df_tmp.iloc[:,0][:n_neighbors].values
mean_ = neighbors_metric.mean()
std_ = neighbors_metric.std()
if scale:
if std_ == 0:
col.append(0)
else:
col.append((this_metric-mean_)/std_)
else:
col.append(this_metric-mean_)
df['normalized'] = col
return df
def regress_size(df_inspect,regressor='background_zscore',n_neighbors=10,to_dict=False):
if regressor == 'background_zscore':
df_now = background_normalizer(df_inspect,n_neighbors,True)
residual = df_now['normalized'].values
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'background_mean':
df_now = background_normalizer(df_inspect,n_neighbors,False)
residual = df_now['normalized'].values
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'GLM':
endog = df_inspect[0]
exog = df_inspect[1]
import statsmodels.api as sm
exog = sm.add_constant(exog,prepend=True)
model = sm.GLM(endog,exog,family=sm.families.Gaussian())
res = model.fit()
residual = res.resid_response
normalized_metric_series = residual
elif regressor == 'Huber':
endog = df_inspect[0]
exog = df_inspect[1]
from sklearn.linear_model import HuberRegressor
model = HuberRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'RANSAC':
endog = df_inspect[0]
exog = df_inspect[1]
from sklearn.linear_model import RANSACRegressor
model = RANSACRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
elif regressor == 'TheilSen':
endog = df_inspect[0]
exog = df_inspect[1]
from sklearn.linear_model import TheilSenRegressor
model = TheilSenRegressor().fit(exog.values.reshape(-1,1),endog.values)
prediction = model.predict(exog.values.reshape(-1,1))
residual = endog.values - prediction
df_inspect[0] = residual
normalized_metric_series = df_inspect[0]
if to_dict:
normalized_metric_dict = normalized_metric_series.to_dict()
final = normalized_metric_dict
else:
final = normalized_metric_series
return final
def tf_idf_bare_compute(df,cluster):
tmp1 = df.loc[df['cluster'] == cluster, :].loc[:,df.columns!='cluster'].values
tf = np.count_nonzero(tmp1,axis=0) / tmp1.shape[0]
tf = tf + 1e-5
tmp2 = df.loc[:,df.columns!='cluster'].values
df_ = np.count_nonzero(tmp2,axis=0) / tmp2.shape[0]
df_ = df_ + 1e-5
idf = -np.log10(df_)
tf_idf_ori = tf * idf
return tf_idf_ori
def single_size_query(obs,c):
key = list(c.keys())[0]
cluster = list(c.values())[0]
size = obs.loc[obs[key]==cluster,:].shape[0]
return size
def get_size_in_metrics(obs,key):
key_size_dict = {}
for cluster in obs[key].unique():
size = single_size_query(obs,{key:cluster})
key_size_dict[cluster] = size
return key_size_dict
def tf_idf10_for_cluster(adata,key,species,criterion,regress_size=False):
df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names)
df['cluster'] = adata.obs[key].astype('str').values
cluster_to_tfidf10 = {}
cluster_to_exclusive = {}
for item in adata.obs[key].cat.categories:
a = tf_idf_bare_compute(df,item)
a_names = adata.var_names
test = pd.Series(data=a, index=a_names)
test.sort_values(ascending=False, inplace=True)
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
test_pure = test.loc[~test.index.isin(artifact_genes)]
result10 = test_pure.iloc[9]
cluster_to_tfidf10[item] = result10
cluster_to_exclusive[item] = test_pure.to_dict()
exclusive_genes = pd.Series(cluster_to_exclusive,name='genes')
if regress_size:
key_metric_dict = cluster_to_tfidf10
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1)
cluster_to_tfidf10 = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_tfidf10, exclusive_genes
def tf_idf5_for_cluster(adata,key,species,criterion,regress_size=False):
df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names)
df['cluster'] = adata.obs[key].astype('str').values
cluster_to_tfidf5 = {}
for item in adata.obs[key].cat.categories:
a = tf_idf_bare_compute(df,item)
a_names = adata.var_names
test = pd.Series(data=a, index=a_names)
test.sort_values(ascending=False, inplace=True)
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
test_pure = test.loc[~test.index.isin(artifact_genes)]
result5 = test_pure.iloc[4]
cluster_to_tfidf5[item] = result5
if regress_size:
key_metric_dict = cluster_to_tfidf5
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1)
cluster_to_tfidf5 = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_tfidf5
def tf_idf1_for_cluster(adata,key,species,criterion,regress_size=False):
df = pd.DataFrame(data=adata.X, index=adata.obs_names, columns=adata.var_names)
df['cluster'] = adata.obs[key].astype('str').values
cluster_to_tfidf1 = {}
for item in adata.obs[key].cat.categories:
a = tf_idf_bare_compute(df,item)
a_names = adata.var_names
test = pd.Series(data=a, index=a_names)
test.sort_values(ascending=False, inplace=True)
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
test_pure = test.loc[~test.index.isin(artifact_genes)]
result1 = test_pure.iloc[0]
cluster_to_tfidf1[item] = result1
if regress_size:
key_metric_dict = cluster_to_tfidf1
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1)
cluster_to_tfidf1 = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_tfidf1
def SCCAF_score(adata, key, species, criterion, scale_sccaf,regress_size=False):
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
artifact = read_artifact_genes(species,criterion)
artifact_genes = set(artifact.index.to_list())
X = adata[:,~adata.var_names.isin(artifact_genes)].X.copy()
Y = adata.obs[key].values
if scale_sccaf:
tmp = X
from sklearn.preprocessing import scale
tmp_scaled = scale(tmp,axis=0)
X = tmp_scaled
le = LabelEncoder()
Y = le.fit_transform(Y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
for train_index, test_index in sss.split(X, Y):
X_train = X[train_index]
Y_train = Y[train_index]
X_test = X[test_index]
Y_test = Y[test_index]
model = LogisticRegression(penalty='l1', solver='liblinear', max_iter=100000)
model.fit(X_train, Y_train)
result = model.predict(X_test)
m = confusion_matrix(Y_test, result)
confusion_sccaf = pd.DataFrame(data=m,index=le.classes_,columns=le.classes_)
numeric2reliable = []
for i in range(m.shape[0]):
numeric2reliable.append(m[i, i] / m[i, :].sum())
cluster_to_SCCAF = {}
for i in range(len(numeric2reliable)):
cluster_to_SCCAF[le.classes_[i]] = numeric2reliable[i]
if regress_size:
key_metric_dict = cluster_to_SCCAF
key_size_dict = get_size_in_metrics(adata.obs,key)
df_inspect = pd.concat([pd.Series(key_metric_dict),pd.Series(key_size_dict)],axis=1)
cluster_to_SCCAF = regress_size(df_inspect,regressor='GLM',to_dict=True)
return cluster_to_SCCAF, confusion_sccaf
| true | true |
f7fcf2b65de1d32b0e36ec91680fa812b0d37d92 | 6,535 | py | Python | scripts/jinja_helpers.py | jherico/OpenXR-Hpp | a582c75853737ee8269770a2732fef517ac1f9ea | [
"Apache-2.0"
] | null | null | null | scripts/jinja_helpers.py | jherico/OpenXR-Hpp | a582c75853737ee8269770a2732fef517ac1f9ea | [
"Apache-2.0"
] | null | null | null | scripts/jinja_helpers.py | jherico/OpenXR-Hpp | a582c75853737ee8269770a2732fef517ac1f9ea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2019 The Khronos Group Inc.
# Copyright (c) 2019 Collabora, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functionality to use Jinja2 when generating C/C++ code, while eliminating the need to import Jinja2 from any other file."""
import os
import re
from pathlib import Path
_ADDED_TO_PATH = False
OPENXR = os.getenv("OPENXR_REPO")
if not OPENXR:
OPENXR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'OpenXR-SDK-Source'))
def _add_to_path():
global _ADDED_TO_PATH
if not _ADDED_TO_PATH:
import sys
# Find Jinja2 in source tree, as last resort.
sys.path.append(
str(
Path(OPENXR).resolve() / "external" / "python"))
_ADDED_TO_PATH = True
_WHITESPACE = re.compile(r"[\s\n]+")
def _undecorate(name):
"""Undecorate a name by removing the leading Xr and making it lowercase."""
lower = name.lower()
assert(lower.startswith('xr'))
return lower[2:]
def _quote_string(s):
return '"{}"'.format(s)
def _base_name(name):
return name[2:]
def _collapse_whitespace(s):
return _WHITESPACE.sub(" ", s)
def _protect_begin(entity, parent=None):
if entity.protect_value:
if parent and parent.protect_string == entity.protect_string:
# No need to double-protect if condition the same
return ""
return "#if {}".format(entity.protect_string)
return ""
def _protect_end(entity, parent=None):
if entity.protect_value:
if parent and parent.protect_string == entity.protect_string:
# No need to double-protect if condition the same
return ""
return "#endif // {}".format(entity.protect_string)
return ""
def make_jinja_environment(file_with_templates_as_sibs=None, search_path=None):
"""Create a Jinja2 environment customized to generate C/C++ headers/code for Khronos APIs.
Delimiters have been changed from Jinja2 defaults to permit better interoperability with
editors and other tooling expecting C/C++ code, by combining them with comments:
- Blocks are bracketed like /*% block_contents %*/ instead of {% block_contents %}
- Variable outputs are bracketed like /*{ var }*/ instead of {{ var }}
- Line statements start with //# instead of just #
- Line comments start with //## instead of just ##
Other details:
- autoescape is turned off because this isn't HTML.
- trailing newline kept
- blocks are trimmed for easier markup.
- the loader is a file system loader, building a search path from file_with_templates_as_sibs
(if your template is a sibling of your source file, just pass file_with_templates_as_sibs=__file__),
and search_path (an iterable if you want more control)
Provided filters:
- quote_string - wrap something in double-quotes
- undecorate - same as the generator utility function: remove leading two-character API prefix and make lowercase.
- base_name - just removes leading two-character API prefix
- collapse_whitespace - minimizes internal whitespace with a regex
Provided functions used as globals:
- protect_begin(entity) and protect_end(entity) - use in a pair, and pass an entity
(function/command, struct, etc), and if it is noted with protect="something" in the XML,
the appropriate #if and #endif will be printed (for protect_begin and protect_end respectively.)
You may further add globals and filters to the returned environment.
"""
_add_to_path()
from jinja2 import Environment, FileSystemLoader
search_paths = []
if file_with_templates_as_sibs:
search_paths.append(
str(Path(file_with_templates_as_sibs).resolve().parent))
if search_path:
search_paths.extend(search_path)
env = Environment(keep_trailing_newline=True,
trim_blocks=True,
block_start_string="/*%",
block_end_string="%*/",
variable_start_string="/*{",
variable_end_string="}*/",
line_statement_prefix="//#",
line_comment_prefix="//##",
autoescape=False,
loader=FileSystemLoader(search_paths))
env.filters['quote_string'] = _quote_string
env.filters['undecorate'] = _undecorate
env.filters['base_name'] = _base_name
env.filters['collapse_whitespace'] = _collapse_whitespace
env.globals['protect_begin'] = _protect_begin
env.globals['protect_end'] = _protect_end
return env
class JinjaTemplate:
def __init__(self, env, fn):
"""Load and parse a Jinja2 template given a Jinja2 environment and the template file name.
Create the environment using make_jinja_environment().
Syntax errors are caught, have their details printed, then are re-raised (to stop execution).
"""
_add_to_path()
from jinja2 import TemplateSyntaxError
try:
self.template = env.get_template(fn)
except TemplateSyntaxError as e:
print("Jinja2 template syntax error during parse: {}:{} error: {}".
format(e.filename, e.lineno, e.message))
raise e
def render(self, *args, **kwargs):
"""Render the Jinja2 template with the provided context.
All arguments are passed through; this just wraps the Jinja2 template render method
to handle syntax error exceptions so that Jinja2 does not need to be imported anywhere
but this file.
"""
_add_to_path()
from jinja2 import TemplateSyntaxError
try:
return self.template.render(*args, **kwargs)
except TemplateSyntaxError as e:
error_str = "Jinja2 template syntax error during render: {}:{} error: {}".format(
e.filename, e.lineno, e.message)
print(error_str)
raise RuntimeError(error_str)
| 36.104972 | 135 | 0.669013 |
import os
import re
from pathlib import Path
_ADDED_TO_PATH = False
OPENXR = os.getenv("OPENXR_REPO")
if not OPENXR:
OPENXR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'OpenXR-SDK-Source'))
def _add_to_path():
global _ADDED_TO_PATH
if not _ADDED_TO_PATH:
import sys
sys.path.append(
str(
Path(OPENXR).resolve() / "external" / "python"))
_ADDED_TO_PATH = True
_WHITESPACE = re.compile(r"[\s\n]+")
def _undecorate(name):
lower = name.lower()
assert(lower.startswith('xr'))
return lower[2:]
def _quote_string(s):
return '"{}"'.format(s)
def _base_name(name):
return name[2:]
def _collapse_whitespace(s):
return _WHITESPACE.sub(" ", s)
def _protect_begin(entity, parent=None):
if entity.protect_value:
if parent and parent.protect_string == entity.protect_string:
return ""
return "#if {}".format(entity.protect_string)
return ""
def _protect_end(entity, parent=None):
if entity.protect_value:
if parent and parent.protect_string == entity.protect_string:
return ""
return "#endif // {}".format(entity.protect_string)
return ""
def make_jinja_environment(file_with_templates_as_sibs=None, search_path=None):
_add_to_path()
from jinja2 import Environment, FileSystemLoader
search_paths = []
if file_with_templates_as_sibs:
search_paths.append(
str(Path(file_with_templates_as_sibs).resolve().parent))
if search_path:
search_paths.extend(search_path)
env = Environment(keep_trailing_newline=True,
trim_blocks=True,
block_start_string="/*%",
block_end_string="%*/",
variable_start_string="/*{",
variable_end_string="}*/",
line_statement_prefix="//#",
line_comment_prefix="//##",
autoescape=False,
loader=FileSystemLoader(search_paths))
env.filters['quote_string'] = _quote_string
env.filters['undecorate'] = _undecorate
env.filters['base_name'] = _base_name
env.filters['collapse_whitespace'] = _collapse_whitespace
env.globals['protect_begin'] = _protect_begin
env.globals['protect_end'] = _protect_end
return env
class JinjaTemplate:
def __init__(self, env, fn):
_add_to_path()
from jinja2 import TemplateSyntaxError
try:
self.template = env.get_template(fn)
except TemplateSyntaxError as e:
print("Jinja2 template syntax error during parse: {}:{} error: {}".
format(e.filename, e.lineno, e.message))
raise e
def render(self, *args, **kwargs):
_add_to_path()
from jinja2 import TemplateSyntaxError
try:
return self.template.render(*args, **kwargs)
except TemplateSyntaxError as e:
error_str = "Jinja2 template syntax error during render: {}:{} error: {}".format(
e.filename, e.lineno, e.message)
print(error_str)
raise RuntimeError(error_str)
| true | true |
f7fcf341ee87b22e586a360dfca375e724590cb3 | 5,139 | py | Python | static/data/functional_tests/random_guides.py | joshim5/CRISPR-Library-Designer | 2def1e4351c82056587620f7520ec922761ac8f3 | [
"BSD-3-Clause"
] | 17 | 2017-05-24T18:57:56.000Z | 2021-04-18T05:00:10.000Z | static/data/functional_tests/random_guides.py | joshim5/CRISPR-Library-Designer | 2def1e4351c82056587620f7520ec922761ac8f3 | [
"BSD-3-Clause"
] | 10 | 2017-09-11T09:17:51.000Z | 2022-03-11T23:18:50.000Z | static/data/functional_tests/random_guides.py | joshim5/CRISPR-Library-Designer | 2def1e4351c82056587620f7520ec922761ac8f3 | [
"BSD-3-Clause"
] | 5 | 2017-07-28T23:59:51.000Z | 2022-01-04T19:22:22.000Z | # Python packages
import json
import os.path
from Queue import PriorityQueue
import random
import re
import time
import pickle
import numpy as np
start_time = time.time()
# CLD code from parent directories
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(currentdir)))
os.sys.path.insert(0,rootdir)
import azimuth.model_comparison
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, score, exon_ranking, ensembl_gene, gene_name):
self.start = start
self.seq = seq
self.PAM = PAM
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
return {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"selected": self.selected,
}
def __cmp__(self, other):
return cmp(self.score, other.score)
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"use_Doench": True,
"quantity": 100
}
# azimuth model
print "loading azimuth models", time.time() - start_time
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
azimuth_scores = {}
def get_azimuth_score(mer30):
if mer30 in azimuth_scores:
return azimuth_scores[mer30]
else:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
# print "generating Azimuth", mer30, score
azimuth_scores[mer30] = score
return score
modPAM = params["PAM"].upper()
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCh37_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read()
else:
return None
def exome(gene):
seq = ""
exon = 0
exon_seq = gene_exon_file(gene, exon)
while exon_seq:
seq += exon_seq
exon += 1
exon_seq = gene_exon_file(gene, exon)
return seq
def random_guides(gene, library_size):
seq = exome(gene["ensembl_id"])
q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq):
PAM_start = m.start()
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) != 30:
print "Error! The following guide is not long enough:", seq, mer30, gene["ensembl_id"], gene["name"]
score = get_azimuth_score(mer30)
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, score, -1, gene["ensembl_id"], gene["name"])
# If there's enough room, add it, no question.
if q.qsize() < max_queue_size:
q.put(potential_gRNA)
# We remove the other case, since we want to add anything that is given to this function.
#### Otherwise, take higher score
###else:
### lowest_gRNA = q.get()
### if potential_gRNA.score > lowest_gRNA.score:
### q.put(potential_gRNA)
### else:
### q.put(lowest_gRNA)
# Logic continues here, outside process_guide
seq_rc = revcompl(seq)
forward_matches = [('Forward', m) for m in re.finditer(params["modPAM"], seq)]
reverse_matches = [('Reverse', m) for m in re.finditer(params["modPAM"], seq_rc)]
all_matches = forward_matches + reverse_matches
random.shuffle(all_matches)
# generate library_size number of guides
required = library_size
for match in all_matches:
if required == 0:
break
direction, m = match
if params["prime5"] and (m.start() < params["protospacer_len"] + 4 or m.start() + params["PAM_len"] + 3 > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
if direction == 'Forward':
process_guide(m, True, params["quantity"], seq)
else:
process_guide(m, True, params["quantity"], seq_rc)
required -= 1
# Pop gRNAs into our 'permament' storage, i.e. return them.
gRNAs = []
while not q.empty():
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
return gRNAs
| 31.335366 | 153 | 0.678926 |
import json
import os.path
from Queue import PriorityQueue
import random
import re
import time
import pickle
import numpy as np
start_time = time.time()
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(currentdir)))
os.sys.path.insert(0,rootdir)
import azimuth.model_comparison
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, score, exon_ranking, ensembl_gene, gene_name):
self.start = start
self.seq = seq
self.PAM = PAM
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
return {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"selected": self.selected,
}
def __cmp__(self, other):
return cmp(self.score, other.score)
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"use_Doench": True,
"quantity": 100
}
print "loading azimuth models", time.time() - start_time
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
azimuth_scores = {}
def get_azimuth_score(mer30):
if mer30 in azimuth_scores:
return azimuth_scores[mer30]
else:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
azimuth_scores[mer30] = score
return score
modPAM = params["PAM"].upper()
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCh37_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read()
else:
return None
def exome(gene):
seq = ""
exon = 0
exon_seq = gene_exon_file(gene, exon)
while exon_seq:
seq += exon_seq
exon += 1
exon_seq = gene_exon_file(gene, exon)
return seq
def random_guides(gene, library_size):
seq = exome(gene["ensembl_id"])
q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq):
PAM_start = m.start()
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) != 30:
print "Error! The following guide is not long enough:", seq, mer30, gene["ensembl_id"], gene["name"]
score = get_azimuth_score(mer30)
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, score, -1, gene["ensembl_id"], gene["name"])
if q.qsize() < max_queue_size:
q.put(potential_gRNA)
# We remove the other case, since we want to add anything that is given to this function.
#### Otherwise, take higher score
###else:
### lowest_gRNA = q.get()
### if potential_gRNA.score > lowest_gRNA.score:
### q.put(potential_gRNA)
### else:
### q.put(lowest_gRNA)
# Logic continues here, outside process_guide
seq_rc = revcompl(seq)
forward_matches = [('Forward', m) for m in re.finditer(params["modPAM"], seq)]
reverse_matches = [('Reverse', m) for m in re.finditer(params["modPAM"], seq_rc)]
all_matches = forward_matches + reverse_matches
random.shuffle(all_matches)
# generate library_size number of guides
required = library_size
for match in all_matches:
if required == 0:
break
direction, m = match
if params["prime5"] and (m.start() < params["protospacer_len"] + 4 or m.start() + params["PAM_len"] + 3 > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
if direction == 'Forward':
process_guide(m, True, params["quantity"], seq)
else:
process_guide(m, True, params["quantity"], seq_rc)
required -= 1
# Pop gRNAs into our 'permament' storage, i.e. return them.
gRNAs = []
while not q.empty():
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
return gRNAs
| false | true |
f7fcf5be5f4ea2467d0ae6c51612ad43b19030c2 | 82 | py | Python | core/arxiv/submission/services/stream/__init__.py | NeolithEra/arxiv-submission-core | d4f20be62a882d2d5f3d1584eda69e7d90ca2c12 | [
"MIT"
] | 14 | 2019-05-26T22:52:17.000Z | 2021-11-05T12:26:46.000Z | core/arxiv/submission/services/stream/__init__.py | NeolithEra/arxiv-submission-core | d4f20be62a882d2d5f3d1584eda69e7d90ca2c12 | [
"MIT"
] | 30 | 2018-01-31T19:16:08.000Z | 2018-12-08T08:41:04.000Z | core/arxiv/submission/services/stream/__init__.py | NeolithEra/arxiv-submission-core | d4f20be62a882d2d5f3d1584eda69e7d90ca2c12 | [
"MIT"
] | 8 | 2019-01-10T22:01:39.000Z | 2021-11-20T21:44:51.000Z | """Emits events to the submission stream."""
from .stream import StreamPublisher
| 20.5 | 44 | 0.768293 |
from .stream import StreamPublisher
| true | true |
f7fcf5eed351ac2824c32556f398df8a9228f07d | 66,833 | py | Python | bcdi/postprocessing/facet_recognition.py | DSimonne/bcdi | 5740a75576d7c3760ac72358acfb51321d51f82b | [
"CECILL-B"
] | 1 | 2021-11-03T08:49:41.000Z | 2021-11-03T08:49:41.000Z | bcdi/postprocessing/facet_recognition.py | DSimonne/bcdi | 5740a75576d7c3760ac72358acfb51321d51f82b | [
"CECILL-B"
] | null | null | null | bcdi/postprocessing/facet_recognition.py | DSimonne/bcdi | 5740a75576d7c3760ac72358acfb51321d51f82b | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# authors:
# Jerome Carnis, carnis_jerome@yahoo.fr
"""Functions related to facet recognition of nanocrystals."""
import sys
from numbers import Real
import numpy as np
from matplotlib import patches
from matplotlib import pyplot as plt
from scipy import ndimage, stats
from scipy.interpolate import RegularGridInterpolator, griddata
from scipy.ndimage.measurements import center_of_mass
from scipy.signal import convolve
from skimage.feature import corner_peaks
from skimage.segmentation import watershed
from bcdi.graph import graph_utils as gu
from bcdi.graph.colormap import ColormapFactory
from bcdi.utils import utilities as util
from bcdi.utils import validation as valid
default_cmap = ColormapFactory().cmap
def calc_stereoproj_facet(projection_axis, vectors, radius_mean, stereo_center):
"""
Calculate the coordinates of normals in the stereographic projection.
The calculation depends on the reference axis. See: Nanoscale 10, 4833 (2018).
:param projection_axis: the projection is performed on q plane perpendicular to
that axis (0, 1 or 2)
:param vectors: array of vectors to be projected (nb_vectors rows x 3 columns)
:param radius_mean: q radius from which the projection will be done
:param stereo_center: offset of the projection plane along the reflection axis,
in the same unit as radius_mean. If stereo_center = 0, the projection plane will
be the equator.
:return: the coordinates of the stereographic projection for the projection from
the South pole(1st and 2nd columns) and from the North pole (3rd and 4th
columns) projection, rescaled from radius_mean to 90 degrees
"""
if projection_axis not in [0, 1, 2]:
raise ValueError(
"reflection_axis should be a basis axis of the reconstructed array"
)
# calculate u and v from xyz
stereo_proj = np.zeros((vectors.shape[0], 4), dtype=vectors.dtype)
# stereo_proj[:, 0] is the euclidian u_south,
# stereo_proj[:, 1] is the euclidian v_south
# stereo_proj[:, 2] is the euclidian u_north,
# stereo_proj[:, 3] is the euclidian v_north
if (
projection_axis == 0
): # q aligned along the 1st axis (Z downstream in CXI convention)
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + vectors[idx, 0] - stereo_center)
) # u_s
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + vectors[idx, 0] - stereo_center)
) # v_s
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + stereo_center - vectors[idx, 0])
) # u_n
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + stereo_center - vectors[idx, 0])
) # v_n
uv_labels = (
"axis 1",
"axis 2",
) # axes corresponding to u and v respectively, used in plots
elif (
projection_axis == 1
): # q aligned along the 2nd axis (Y vertical up in CXI convention)
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + vectors[idx, 1] - stereo_center)
) # u_s
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + vectors[idx, 1] - stereo_center)
) # v_s
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + stereo_center - vectors[idx, 1])
) # u_n
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + stereo_center - vectors[idx, 1])
) # v_n
uv_labels = (
"axis 0",
"axis 2",
) # axes corresponding to u and v respectively, used in plots
else: # q aligned along the 3rd axis (X outboard in CXI convention)
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + vectors[idx, 2] - stereo_center)
) # u_s
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + vectors[idx, 2] - stereo_center)
) # v_s
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + stereo_center - vectors[idx, 2])
) # u_n
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + stereo_center - vectors[idx, 2])
) # v_n
uv_labels = (
"axis 0",
"axis 1",
) # axes corresponding to u and v respectively, used in plots
stereo_proj = stereo_proj / radius_mean * 90 # rescale from radius_mean to 90
return stereo_proj, uv_labels
def detect_edges(faces):
"""
Find indices of vertices defining non-shared edges.
:param faces: ndarray of m*3 faces
:return: 1D list of indices of vertices defining non-shared edges (near hole...)
"""
# Get the three edges per triangle
edge1 = np.copy(faces[:, 0:2])
edge2 = np.array([np.copy(faces[:, 0]), np.copy(faces[:, 2])]).T
edge3 = np.array([np.copy(faces[:, 1]), np.copy(faces[:, 2])]).T
edge1.sort(axis=1)
edge2.sort(axis=1)
edge3.sort(axis=1)
# list of edges without redundancy
edges = np.concatenate((edge1, edge2, edge3), axis=0)
edge_list, _, edges_counts = np.unique(
edges, return_index=True, return_counts=True, axis=0
)
# isolate non redundant edges
unique_edges = edge_list[edges_counts == 1].flatten()
return unique_edges
def distance_threshold(fit, indices, plane_shape, max_distance=0.90):
"""
Filter out pixels depending on their distance to a fit plane.
:param fit: coefficients of the plane (a, b, c, d) such that a*x + b*y + c*z + d = 0
:param indices: tuple or array of plane indices, x being the 1st tuple element or
array row, y the 2nd tuple element or array row and z the third tuple element or
array row
:param plane_shape: shape of the initial plane array
:param max_distance: max distance allowed from the fit plane in pixels
:return: the updated plane, a stop flag
"""
indices = np.asarray(indices)
plane = np.zeros(plane_shape, dtype=int)
no_points = False
if len(indices[0]) == 0:
no_points = True
return plane, no_points
# remove outsiders based on their distance to the plane
plane_normal = np.array(
[fit[0], fit[1], fit[2]]
) # normal is [a, b, c] if ax+by+cz+d=0
for point in range(len(indices[0])):
dist = abs(
fit[0] * indices[0, point]
+ fit[1] * indices[1, point]
+ fit[2] * indices[2, point]
+ fit[3]
) / np.linalg.norm(plane_normal)
if dist < max_distance:
plane[indices[0, point], indices[1, point], indices[2, point]] = 1
if plane[plane == 1].sum() == 0:
print("Distance_threshold: no points for plane")
no_points = True
return plane, no_points
return plane, no_points
def equirectangular_proj(
normals,
intensity,
cmap=default_cmap,
bw_method=0.03,
min_distance=10,
background_threshold=-0.35,
debugging=False,
):
"""
Detect facets in an object.
It uses an equirectangular projection of normals to mesh triangles and watershed
segmentation.
:param normals: normals array
:param intensity: intensity array
:param cmap: colormap used for plotting
:param bw_method: bw_method of gaussian_kde
:param min_distance: min_distance of corner_peaks()
:param background_threshold: threshold for background determination
(depth of the KDE)
:param debugging: if True, show plots for debugging
:return: ndarray of labelled regions
"""
# check normals for nan
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
# calculate latitude and longitude from xyz,
# this is equal to the equirectangular flat square projection
long_lat = np.zeros((normals.shape[0], 2), dtype=normals.dtype)
for i in range(normals.shape[0]):
if normals[i, 1] == 0 and normals[i, 0] == 0:
continue
long_lat[i, 0] = np.arctan2(normals[i, 1], normals[i, 0]) # longitude
long_lat[i, 1] = np.arcsin(normals[i, 2]) # latitude
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(long_lat[:, 0], long_lat[:, 1], c=intensity, cmap=cmap)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
plt.axis("scaled")
plt.title("Equirectangular projection of the weighted point densities before KDE")
plt.pause(0.1)
# kernel density estimation
kde = stats.gaussian_kde(long_lat.T, bw_method=bw_method)
# input should be a 2D array with shape (# of dims, # of data)
# Create a regular 3D grid
yi, xi = np.mgrid[
-np.pi / 2 : np.pi / 2 : 150j, -np.pi : np.pi : 300j
] # vertical, horizontal
# Evaluate the KDE on a regular grid...
coords = np.vstack([item.ravel() for item in [xi, yi]])
# coords is a contiguous flattened array of coordinates of shape (2, size(xi))
density = -1 * kde(coords).reshape(
xi.shape
) # inverse density for later watershed segmentation
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(xi, yi, c=density, cmap=cmap, vmin=-1.5, vmax=0)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
fig.colorbar(scatter)
plt.axis("scaled")
plt.title("Equirectangular projection of the KDE")
plt.pause(0.1)
# identification of local minima
density[density > background_threshold] = 0 # define the background
mask = np.copy(density)
mask[mask != 0] = 1
plt.figure()
plt.imshow(mask, cmap=cmap, interpolation="nearest")
plt.title("Background mask")
plt.gca().invert_yaxis()
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(xi, yi, c=density, cmap=cmap)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
fig.colorbar(scatter)
plt.axis("scaled")
plt.title("KDE after background definition")
plt.pause(0.1)
# Generate the markers as local minima of the distance to the background
distances = ndimage.distance_transform_edt(density)
if debugging:
plt.figure()
plt.imshow(distances, cmap=cmap, interpolation="nearest")
plt.title("Distances")
plt.gca().invert_yaxis()
plt.pause(0.1)
# find peaks
local_maxi = corner_peaks(
distances, exclude_border=False, min_distance=min_distance, indices=False
) #
if debugging:
plt.figure()
plt.imshow(local_maxi, interpolation="nearest")
plt.title("local_maxi")
plt.gca().invert_yaxis()
plt.pause(0.1)
# define markers for each peak
markers = ndimage.label(local_maxi)[0]
if debugging:
plt.figure()
plt.imshow(markers, interpolation="nearest")
plt.title("markers")
plt.colorbar()
plt.gca().invert_yaxis()
plt.pause(0.1)
# watershed segmentation
labels = watershed(-1 * distances, markers, mask=mask)
print("There are", str(labels.max()), "facets") # label 0 is the background
plt.figure()
plt.imshow(labels, cmap=cmap, interpolation="nearest")
plt.title("Separated objects")
plt.colorbar()
plt.gca().invert_yaxis()
plt.pause(0.1)
return labels, long_lat
def find_facet(
refplane_indices,
surf_indices,
original_shape,
step_shift,
plane_label,
plane_coeffs,
min_points,
debugging=False,
):
"""
Shift a fit plane along its normal until it reaches the surface of a faceted object.
:param refplane_indices: a tuple of 3 arrays (1D, length N) describing the
coordinates of the plane voxels, x values being the 1st tuple element, y values
the 2nd tuple element and z values the 3rd tuple element (output of np.nonzero)
:param surf_indices: a tuple of 3 arrays (1D, length N) describing the coordinates
of the surface voxels, x values being the 1st tuple element, y values the 2nd
tuple element and z values the 3rd tuple element (output of np.nonzero)
:param original_shape: the shape of the full dataset (amplitude object,
eventually upsampled)
:param step_shift: the amplitude of the shift to be applied to the plane
along its normal
:param plane_label: the label of the plane, used in comments
:param plane_coeffs: a tuple of coefficient (a, b, c, d) such that ax+by+cz+d=0
:param min_points: threshold, minimum number of points that should coincide
between the fit plane and the object surface
:param debugging: True to see debugging plots
:return: the shift that needs to be applied to the fit plane in order to best
match with the object surface
"""
if not isinstance(refplane_indices, tuple):
raise ValueError("refplane_indices should be a tuple of 3 1D ndarrays")
if not isinstance(surf_indices, tuple):
raise ValueError("surf_indices should be a tuple of 3 1D ndarrays")
surf0, surf1, surf2 = surf_indices
plane_normal = np.array(
[plane_coeffs[0], plane_coeffs[1], plane_coeffs[2]]
) # normal is [a, b, c] if ax+by+cz+d=0
# loop until the surface is crossed or the iteration limit is reached
common_previous = 0
found_plane = 0
nbloop = 1
crossed_surface = 0
shift_direction = 0
while found_plane == 0:
common_points = 0
nb_points = len(surf0)
# shift indices
plane_newindices0, plane_newindices1, plane_newindices2 = offset_plane(
indices=refplane_indices,
offset=nbloop * step_shift,
plane_normal=plane_normal,
)
nb_newpoints = len(plane_newindices0)
for point in range(nb_newpoints):
for point2 in range(nb_points):
if (
plane_newindices0[point] == surf0[point2]
and plane_newindices1[point] == surf1[point2]
and plane_newindices2[point] == surf2[point2]
):
common_points = common_points + 1
if debugging:
temp_coeff3 = plane_coeffs[3] - nbloop * step_shift
dist = np.zeros(nb_points)
for point in range(nb_points):
dist[point] = (
plane_coeffs[0] * surf0[point]
+ plane_coeffs[1] * surf1[point]
+ plane_coeffs[2] * surf2[point]
+ temp_coeff3
) / np.linalg.norm(plane_normal)
temp_mean_dist = dist.mean()
plane = np.zeros(original_shape)
plane[plane_newindices0, plane_newindices1, plane_newindices2] = 1
# plot plane points overlaid with the support
gu.scatter_plot_overlaid(
arrays=(
np.concatenate(
(
plane_newindices0[:, np.newaxis],
plane_newindices1[:, np.newaxis],
plane_newindices2[:, np.newaxis],
),
axis=1,
),
np.concatenate(
(
surf0[:, np.newaxis],
surf1[:, np.newaxis],
surf2[:, np.newaxis],
),
axis=1,
),
),
markersizes=(8, 2),
markercolors=("b", "r"),
labels=("axis 0", "axis 1", "axis 2"),
title="Plane"
+ str(plane_label)
+ " after shifting - iteration"
+ str(nbloop),
)
print(
"(while) iteration ",
nbloop,
"- Mean distance of the plane to outer shell = "
+ str("{:.2f}".format(temp_mean_dist))
+ "\n pixels - common_points = ",
common_points,
)
if common_points != 0: # some plane points are in commun with the surface layer
if common_points >= common_previous:
found_plane = 0
common_previous = common_points
print(
"(while, common_points != 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
crossed_surface = 1
elif (
common_points < min_points
): # try to keep enough points for statistics, half step back
found_plane = 1
print(
"(while, common_points != 0), "
"exiting while loop after threshold reached - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
"- next step common points=",
common_points,
)
else:
found_plane = 0
common_previous = common_points
print(
"(while, common_points != 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
crossed_surface = 1
else: # no commun points, the plane is not intersecting the surface layer
if crossed_surface == 1: # found the outer shell, which is 1 step before
found_plane = 1
print(
"(while, common_points = 0), exiting while loop - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
"- next step common points=",
common_points,
)
elif not shift_direction:
if nbloop < 5: # continue to scan
print(
"(while, common_points = 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
else: # scan in the other direction
shift_direction = 1
print("Shift scanning direction")
step_shift = -1 * step_shift
nbloop = 1
else: # shift_direction = 1
if nbloop < 10:
print(
"(while, common_points = 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
else: # we were already unsuccessfull in the other direction, give up
print(
"(while, common_points = 0),"
" no point from support is intersecting the plane ",
plane_label,
)
break
return (nbloop - 1) * step_shift
def find_neighbours(vertices, faces):
"""
Get the list of neighbouring vertices for each vertex.
:param vertices: ndarray of n*3 vertices
:param faces: ndarray of m*3 faces
:return: list of lists of indices
"""
neighbors = [None] * vertices.shape[0]
nb_faces = faces.shape[0]
for indx in range(nb_faces):
if neighbors[faces[indx, 0]] is None:
neighbors[faces[indx, 0]] = [faces[indx, 1], faces[indx, 2]]
else:
neighbors[faces[indx, 0]].append(faces[indx, 1])
neighbors[faces[indx, 0]].append(faces[indx, 2])
if neighbors[faces[indx, 1]] is None:
neighbors[faces[indx, 1]] = [faces[indx, 2], faces[indx, 0]]
else:
neighbors[faces[indx, 1]].append(faces[indx, 2])
neighbors[faces[indx, 1]].append(faces[indx, 0])
if neighbors[faces[indx, 2]] is None:
neighbors[faces[indx, 2]] = [faces[indx, 0], faces[indx, 1]]
else:
neighbors[faces[indx, 2]].append(faces[indx, 0])
neighbors[faces[indx, 2]].append(faces[indx, 1])
for indx, neighbor in enumerate(neighbors):
# remove None values
temp_list = [point for point in neighbor if point is not None]
# remove redundant indices in each sublist
neighbors[indx] = list(set(temp_list))
return neighbors
def fit_plane(plane, label, debugging=False):
"""
Fit a plane to labelled indices using the equation a*x+ b*y + c*z + d = 0.
:param plane: 3D binary array, where the voxels belonging to the plane are set
to 1 and others are set to 0.
:param label: int, label of the plane used for the title in plots
:param debugging: show plots for debugging
:return: fit parameters (a, b, c, d), plane indices after filtering,
errors associated, a stop flag
"""
indices = np.asarray(np.nonzero(plane))
no_points = False
if len(indices[0]) == 0:
no_points = True
return 0, indices, 0, no_points
for idx in range(2):
# remove isolated points, which probably do not belong to the plane
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points before coordination threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
for point in range(indices.shape[1]):
neighbors = plane[
indices[0, point] - 2 : indices[0, point] + 3,
indices[1, point] - 2 : indices[1, point] + 3,
indices[2, point] - 2 : indices[2, point] + 3,
].sum()
if neighbors < 5:
plane[indices[0, point], indices[1, point], indices[2, point]] = 0
print(
"Fit plane",
label,
", ",
str(indices.shape[1] - plane[plane == 1].sum()),
"points isolated, ",
str(plane[plane == 1].sum()),
"remaining",
)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points after coordination threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
# update plane indices
indices = np.asarray(np.nonzero(plane))
if len(indices[0]) == 0:
no_points = True
return 0, indices, 0, no_points
# remove also points farther away than the median distance to the COM
dist = np.zeros(indices.shape[1])
x_com, y_com, z_com = center_of_mass(plane)
for point in range(indices.shape[1]):
dist[point] = np.sqrt(
(indices[0, point] - x_com) ** 2
+ (indices[1, point] - y_com) ** 2
+ (indices[2, point] - z_com) ** 2
)
median_dist = np.median(dist)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points before distance threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
for point in range(indices.shape[1]):
if dist[point] > median_dist:
plane[indices[0, point], indices[1, point], indices[2, point]] = 0
print(
"Fit plane",
label,
", ",
str(indices.shape[1] - plane[plane == 1].sum()),
"points too far from COM, ",
str(plane[plane == 1].sum()),
"remaining",
)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points after distance threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
# update plane indices and check if enough points remain
indices = np.asarray(np.nonzero(plane))
if len(indices[0]) < 5:
no_points = True
return 0, indices, 0, no_points
# the fit parameters are (a, b, c, d) such that a*x + b*y + c*z + d = 0
params, std_param, valid_plane = util.plane_fit(
indices=indices, label=label, threshold=1, debugging=debugging
)
if not valid_plane:
plane[indices] = 0
no_points = True
return params, indices, std_param, no_points
def grow_facet(fit, plane, label, support, max_distance=0.90, debugging=True):
"""
Find voxels of the object which belong to a facet.
It uses the facet plane equation and the distance to the plane to find such voxels.
:param fit: coefficients of the plane (a, b, c, d) such that a*x + b*y + c*z + d = 0
:param plane: 3D binary support of the plane, with shape of the full dataset
:param label: the label of the plane processed
:param support: 3D binary support of the reconstructed object,
with shape of the full dataset
:param max_distance: in pixels, maximum allowed distance to the facet plane
of a voxel
:param debugging: set to True to see plots
:return: the updated plane, a stop flag
"""
nbz, nby, nbx = plane.shape
indices = np.nonzero(plane)
if len(indices[0]) == 0:
no_points = True
return plane, no_points
kernel = np.ones((3, 3, 3))
start_z = max(indices[0].min() - 20, 0)
stop_z = min(indices[0].max() + 21, nbz)
start_y = max(indices[1].min() - 20, 0)
stop_y = min(indices[1].max() + 21, nby)
start_x = max(indices[2].min() - 20, 0)
stop_x = min(indices[2].max() + 21, nbx)
# find nearby voxels using the coordination number
obj = np.copy(plane[start_z:stop_z, start_y:stop_y, start_x:stop_x])
coord = np.rint(convolve(obj, kernel, mode="same"))
coord = coord.astype(int)
coord[np.nonzero(coord)] = 1
if debugging:
gu.scatter_plot_overlaid(
arrays=(np.asarray(np.nonzero(coord)).T, np.asarray(np.nonzero(obj)).T),
markersizes=(2, 8),
markercolors=("b", "r"),
labels=("x", "y", "z"),
title="Plane" + str(label) + " before facet growing and coord matrix",
)
# update plane with new voxels
temp_plane = np.copy(plane)
temp_plane[start_z:stop_z, start_y:stop_y, start_x:stop_x] = coord
# remove voxels not belonging to the support
temp_plane[support == 0] = 0
# check distance of new voxels to the plane
plane, no_points = distance_threshold(
fit=fit,
indices=np.nonzero(temp_plane),
plane_shape=temp_plane.shape,
max_distance=max_distance,
)
plane_normal = fit[:-1] # normal is [a, b, c] if ax+by+cz+d=0
# calculate the local gradient for each point of the plane,
# gradients is a list of arrays of 3 vector components
indices = np.nonzero(plane)
gradients = surface_gradient(
list(zip(indices[0], indices[1], indices[2])), support=support
)
count_grad = 0
nb_indices = len(indices[0])
for idx in range(nb_indices):
if np.dot(plane_normal, gradients[idx]) < 0.75:
# 0.85 is too restrictive checked CH4760 S11 plane 1
plane[indices[0][idx], indices[1][idx], indices[2][idx]] = 0
count_grad += 1
indices = np.nonzero(plane)
if debugging and len(indices[0]) != 0:
gu.scatter_plot(
array=np.asarray(indices).T,
labels=("x", "y", "z"),
title="Plane" + str(label) + " after 1 cycle of facet growing",
)
print(f"{count_grad} points excluded by gradient filtering")
print(str(len(indices[0])) + " after 1 cycle of facet growing")
return plane, no_points
def offset_plane(indices, offset, plane_normal):
"""
Shift plane indices by the offset value in order to scan perpendicular to the plane.
:param indices: tuple of 3 1D ndarrays (array shape = nb_points)
:param offset: offset to be applied to the indices (offset of the plane)
:param plane_normal: ndarray of 3 elements, normal to the plane
:return: offseted indices
"""
if not isinstance(indices, tuple):
raise ValueError("indices should be a tuple of 3 1D ndarrays")
new_indices0 = np.rint(
indices[0]
+ offset
* np.dot(np.array([1, 0, 0]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
new_indices1 = np.rint(
indices[1]
+ offset
* np.dot(np.array([0, 1, 0]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
new_indices2 = np.rint(
indices[2]
+ offset
* np.dot(np.array([0, 0, 1]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
return new_indices0, new_indices1, new_indices2
def remove_duplicates(vertices, faces, debugging=False):
"""
Remove duplicates in a list of vertices and faces.
A face is a triangle made of three vertices.
:param vertices: a ndarray of vertices, shape (N, 3)
:param faces: a ndarray of vertex indices, shape (M, 3)
:param debugging: True to see which vertices are duplicated and how lists are
modified
:return: the updated vertices and faces with duplicates removed in place
"""
# find indices which are duplicated
uniq_vertices, uniq_inverse = np.unique(vertices, axis=0, return_inverse=True)
indices, count = np.unique(uniq_inverse, return_counts=True)
duplicated_indices = indices[count != 1] # list of vertices which are not unique
# for each duplicated vertex, build the list of the corresponding identical vertices
list_duplicated = []
for idx, value in enumerate(duplicated_indices):
same_vertices = np.argwhere(vertices == uniq_vertices[value, :])
# same_vertices is a ndarray of the form
# [[ind0, 0], [ind0, 1], [ind0, 2], [ind1, 0], [ind1, 1], [ind1, 2],...]
list_duplicated.append(list(same_vertices[::3, 0]))
# remove duplicates in vertices
remove_vertices = [value for sublist in list_duplicated for value in sublist[1:]]
vertices = np.delete(vertices, remove_vertices, axis=0)
print(len(remove_vertices), "duplicated vertices removed")
# remove duplicated_vertices in faces
for idx, temp_array in enumerate(list_duplicated):
for idy in range(1, len(temp_array)):
duplicated_value = temp_array[idy]
faces[faces == duplicated_value] = temp_array[0]
# temp_array[0] is the unique value, others are duplicates
# all indices above duplicated_value have to be decreased by 1
# to keep the match with the number of vertices
faces[faces > duplicated_value] = faces[faces > duplicated_value] - 1
# update accordingly all indices above temp_array[idy]
if debugging:
print("temp_array before", temp_array)
print("list_duplicated before", list_duplicated)
temp_array = [
(value - 1) if value > duplicated_value else value
for value in temp_array
]
list_duplicated = [
[
(value - 1) if value > duplicated_value else value
for value in sublist
]
for sublist in list_duplicated
]
if debugging:
print("temp_array after", temp_array)
print("list_duplicated after", list_duplicated)
# look for faces with 2 identical vertices
# (cannot define later a normal to these faces)
remove_faces = []
for idx in range(faces.shape[0]):
if np.unique(faces[idx, :], axis=0).shape[0] != faces[idx, :].shape[0]:
remove_faces.append(idx)
faces = np.delete(faces, remove_faces, axis=0)
print(len(remove_faces), "faces with identical vertices removed")
return vertices, faces
def surface_indices(surface, plane_indices, margin=3):
"""
Find surface indices potentially belonging to a plane.
It crops the surface around the plane with a certain margin, and find corresponding
surface indices.
:param surface: the 3D surface binary array
:param plane_indices: tuple of 3 1D-arrays of plane indices
:param margin: margin to include aroung plane indices, in pixels
:return: 3*1D arrays of surface indices
"""
valid.valid_ndarray(surface, ndim=3)
if not isinstance(plane_indices, tuple):
plane_indices = tuple(plane_indices)
surf_indices = np.nonzero(
surface[
plane_indices[0].min() - margin : plane_indices[0].max() + margin,
plane_indices[1].min() - margin : plane_indices[1].max() + margin,
plane_indices[2].min() - margin : plane_indices[2].max() + margin,
]
)
surf0 = (
surf_indices[0] + plane_indices[0].min() - margin
) # add margin plane_indices[0].min() - margin
surf1 = (
surf_indices[1] + plane_indices[1].min() - margin
) # add margin plane_indices[1].min() - margin
surf2 = (
surf_indices[2] + plane_indices[2].min() - margin
) # add margin plane_indices[2].min() - margin
return surf0, surf1, surf2
def stereographic_proj(
normals,
intensity,
max_angle,
savedir,
voxel_size,
projection_axis,
min_distance=10,
background_south=-1000,
background_north=-1000,
save_txt=False,
cmap=default_cmap,
planes_south=None,
planes_north=None,
plot_planes=True,
scale="linear",
comment_fig="",
debugging=False,
):
"""
Detect facets in an object.
It uses a stereographic projection of normals to mesh triangles and watershed
segmentation.
:param normals: array of normals to mesh triangles (nb_normals rows x 3 columns)
:param intensity: array of intensities (nb_normals rows x 1 column)
:param max_angle: maximum angle in degree of the stereographic projection
(should be larger than 90)
:param savedir: directory for saving figures
:param voxel_size: tuple of three numbers corresponding to the real-space
voxel size in each dimension
:param projection_axis: the projection is performed on a plane perpendicular to
that axis (0, 1 or 2)
:param min_distance: min_distance of corner_peaks()
:param background_south: threshold for background determination in the projection
from South
:param background_north: threshold for background determination in the projection
from North
:param save_txt: if True, will save coordinates in a .txt file
:param cmap: colormap used for plotting pole figures
:param planes_south: dictionnary of crystallographic planes, e.g.
{'111':angle_with_reflection}
:param planes_north: dictionnary of crystallographic planes, e.g.
{'111':angle_with_reflection}
:param plot_planes: if True, will draw circles corresponding to crystallographic
planes in the pole figure
:param scale: 'linear' or 'log', scale for the colorbar of the plot
:param comment_fig: string, comment for the filename when saving figures
:param debugging: show plots for debugging
:return:
- labels_south and labels_north as 2D arrays for each projection from South and
North
- a (Nx4) array: projected coordinates of normals from South (u column 0,
v column 1) and North (u column2 , v column 3). The coordinates are in
degrees, not indices.
- the list of rows to remove
"""
def mouse_move(event):
"""Write the density value at the position of the mouse pointer."""
nonlocal density_south, density_north, u_grid, v_grid, ax0, ax1
if event.inaxes == ax0:
index_u = util.find_nearest(u_grid[0, :], event.xdata, width=None)
index_v = util.find_nearest(v_grid[:, 0], event.ydata, width=None)
sys.stdout.write(
"\rKDE South:" + str("{:.0f}".format(density_south[index_v, index_u]))
)
sys.stdout.flush()
elif event.inaxes == ax1:
index_u = util.find_nearest(u_grid[0, :], event.xdata, width=None)
index_v = util.find_nearest(v_grid[:, 0], event.ydata, width=None)
sys.stdout.write(
"\rKDE North:" + str("{:.0f}".format(density_north[index_v, index_u]))
)
sys.stdout.flush()
else:
pass
if comment_fig and comment_fig[-1] != "_":
comment_fig = comment_fig + "_"
radius_mean = 1 # normals are normalized
stereo_center = 0 # COM of the weighted point density,
# where the projection plane intersects the reference axis
# since the normals have their origin at 0,
# the projection plane is the equator and stereo_center=0
# check normals for nan
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
# recalculate normals considering the anisotropy of voxel sizes
# (otherwise angles are wrong)
# the stereographic projection is in reciprocal space,
# therefore we need to use the reciprocal voxel sizes
iso_normals = np.copy(normals)
iso_normals[:, 0] = iso_normals[:, 0] * 2 * np.pi / voxel_size[0]
iso_normals[:, 1] = iso_normals[:, 1] * 2 * np.pi / voxel_size[1]
iso_normals[:, 2] = iso_normals[:, 2] * 2 * np.pi / voxel_size[2]
# normalize iso_normals
iso_normals_length = np.sqrt(
iso_normals[:, 0] ** 2 + iso_normals[:, 1] ** 2 + iso_normals[:, 2] ** 2
)
iso_normals = iso_normals / iso_normals_length[:, np.newaxis]
# calculate the normalized Euclidian metric coordinates u and v from xyz
stereo_proj, uv_labels = calc_stereoproj_facet(
projection_axis=projection_axis,
vectors=iso_normals,
radius_mean=radius_mean,
stereo_center=stereo_center,
)
# stereo_proj[:, 0] is the euclidian u_south,
# stereo_proj[:, 1] is the euclidian v_south
# stereo_proj[:, 2] is the euclidian u_north,
# stereo_proj[:, 3] is the euclidian v_north
# remove intensity where stereo_proj is infinite
list_bad = np.argwhere(
np.isinf(stereo_proj) | np.isnan(stereo_proj)
) # elementwise or
remove_row = list(set(list_bad[:, 0])) # remove duplicated row indices
print(
"remove_row indices (the stereographic projection is infinite or nan): ",
remove_row,
"\n",
)
stereo_proj = np.delete(stereo_proj, remove_row, axis=0)
intensity = np.delete(intensity, remove_row, axis=0)
fig, _ = gu.contour_stereographic(
euclidian_u=stereo_proj[:, 0],
euclidian_v=stereo_proj[:, 1],
color=intensity,
radius_mean=radius_mean,
planes=planes_south,
max_angle=max_angle,
scale=scale,
title="Projection from\nSouth pole",
plot_planes=plot_planes,
uv_labels=uv_labels,
debugging=debugging,
)
fig.savefig(savedir + comment_fig + "South pole_" + scale + ".png")
fig, _ = gu.contour_stereographic(
euclidian_u=stereo_proj[:, 2],
euclidian_v=stereo_proj[:, 3],
color=intensity,
radius_mean=radius_mean,
planes=planes_north,
max_angle=max_angle,
scale=scale,
title="Projection from\nNorth pole",
plot_planes=plot_planes,
uv_labels=uv_labels,
debugging=debugging,
)
fig.savefig(savedir + comment_fig + "North pole_" + scale + ".png")
# regrid stereo_proj
# stereo_proj[:, 0] is the euclidian u_south,
# stereo_proj[:, 1] is the euclidian v_south
# stereo_proj[:, 2] is the euclidian u_north,
# stereo_proj[:, 3] is the euclidian v_north
nb_points = 4 * max_angle + 1
v_grid, u_grid = np.mgrid[
-max_angle : max_angle : (nb_points * 1j),
-max_angle : max_angle : (nb_points * 1j),
]
# v_grid changes vertically, u_grid horizontally
nby, nbx = u_grid.shape
density_south = griddata(
(stereo_proj[:, 0], stereo_proj[:, 1]),
intensity,
(u_grid, v_grid),
method="linear",
) # S
density_north = griddata(
(stereo_proj[:, 2], stereo_proj[:, 3]),
intensity,
(u_grid, v_grid),
method="linear",
) # N
# normalize for plotting
density_south = density_south / density_south[density_south > 0].max() * 10000
density_north = density_north / density_north[density_north > 0].max() * 10000
if save_txt:
# save metric coordinates in text file
density_south[np.isnan(density_south)] = 0.0
density_north[np.isnan(density_north)] = 0.0
with open(savedir + "CDI_poles.dat", "w") as file:
for ii in range(len(v_grid)):
for jj in range(len(u_grid)):
file.write(
str(v_grid[ii, 0])
+ "\t"
+ str(u_grid[0, jj])
+ "\t"
+ str(density_south[ii, jj])
+ "\t"
+ str(v_grid[ii, 0])
+ "\t"
+ str(u_grid[0, jj])
+ "\t"
+ str(density_north[ii, jj])
+ "\n"
)
# inverse densities for watershed segmentation
density_south = -1 * density_south
density_north = -1 * density_north
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 9))
img0 = ax0.scatter(u_grid, v_grid, c=density_south, cmap=cmap)
ax0.set_xlim(-max_angle, max_angle)
ax0.set_ylim(-max_angle, max_angle)
ax0.axis("scaled")
gu.colorbar(img0)
ax0.set_title("KDE \nSouth pole")
img1 = ax1.scatter(u_grid, v_grid, c=density_north, cmap=cmap)
ax1.set_xlim(-max_angle, max_angle)
ax1.set_ylim(-max_angle, max_angle)
ax1.axis("scaled")
gu.colorbar(img1)
ax1.set_title("KDE \nNorth pole")
fig.text(0.32, 0.90, "Read the threshold value in the console", size=16)
fig.text(0.32, 0.85, "Click on the figure to resume the execution", size=16)
fig.tight_layout()
cid = plt.connect("motion_notify_event", mouse_move)
fig.waitforbuttonpress()
plt.disconnect(cid)
print("\n")
# identification of local minima
density_south[
density_south > background_south
] = 0 # define the background in the density of normals
mask_south = np.copy(density_south)
mask_south[mask_south != 0] = 1
density_north[
density_north > background_north
] = 0 # define the background in the density of normals
mask_north = np.copy(density_north)
mask_north[mask_north != 0] = 1
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2, figsize=(12, 9))
ax0.imshow(mask_south, cmap=cmap, interpolation="nearest")
ax0.set_title("Background mask South")
ax0.invert_yaxis()
img1 = ax1.scatter(u_grid, v_grid, c=density_south, cmap=cmap)
ax1.set_xlim(-max_angle, max_angle)
ax1.set_ylim(-max_angle, max_angle)
ax1.axis("scaled")
gu.colorbar(img1)
ax1.set_title("KDE South pole\nafter background definition")
circle = patches.Circle((0, 0), 90, color="w", fill=False, linewidth=1.5)
ax1.add_artist(circle)
ax2.imshow(mask_north, cmap=cmap, interpolation="nearest")
ax2.set_title("Background mask North")
ax2.invert_yaxis()
img3 = ax3.scatter(u_grid, v_grid, c=density_north, cmap=cmap)
ax3.set_xlim(-max_angle, max_angle)
ax3.set_ylim(-max_angle, max_angle)
ax3.axis("scaled")
gu.colorbar(img3)
ax3.set_title("KDE North pole\nafter background definition")
circle = patches.Circle((0, 0), 90, color="w", fill=False, linewidth=1.5)
ax3.add_artist(circle)
fig.tight_layout()
plt.pause(0.1)
##########################################################################
# Generate the markers as local maxima of the distance to the background #
##########################################################################
distances_south = ndimage.distance_transform_edt(density_south)
distances_north = ndimage.distance_transform_edt(density_north)
if debugging:
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
img0 = ax0.imshow(distances_south, cmap=cmap, interpolation="nearest")
ax0.set_title("Distances South")
gu.colorbar(img0)
ax0.invert_yaxis()
img1 = ax1.imshow(distances_north, cmap=cmap, interpolation="nearest")
ax1.set_title("Distances North")
gu.colorbar(img1)
ax1.invert_yaxis()
fig.tight_layout()
plt.pause(0.1)
local_maxi_south = corner_peaks(
distances_south, exclude_border=False, min_distance=min_distance, indices=False
)
local_maxi_north = corner_peaks(
distances_north, exclude_border=False, min_distance=min_distance, indices=False
)
if debugging:
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
ax0.imshow(local_maxi_south, interpolation="nearest")
ax0.set_title("local_maxi South before filtering")
ax0.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax0.add_artist(circle)
ax1.imshow(local_maxi_north, interpolation="nearest")
ax1.set_title("local_maxi North before filtering")
ax1.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax1.add_artist(circle)
fig.tight_layout()
plt.pause(0.1)
# define the marker for each peak
markers_south = ndimage.label(local_maxi_south)[0] # range from 0 to nb_peaks
# define non overlaping markers for the North projection:
# the first marker value is (markers_south.max()+1)
markers_north = ndimage.label(local_maxi_north)[0] + markers_south.max(initial=None)
# markers_north.min() is 0 since it is the background
markers_north[markers_north == markers_south.max(initial=None)] = 0
if debugging:
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
ax0.imshow(
markers_south, interpolation="nearest", cmap="binary", vmin=0, vmax=1
)
ax0.set_title("markers South")
ax0.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax0.add_artist(circle)
ax1.imshow(
markers_north, interpolation="nearest", cmap="binary", vmin=0, vmax=1
)
ax1.set_title("markers North")
ax1.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax1.add_artist(circle)
fig.tight_layout()
plt.pause(0.1)
##########################
# watershed segmentation #
##########################
labels_south = watershed(-1 * distances_south, markers_south, mask=mask_south)
labels_north = watershed(-1 * distances_north, markers_north, mask=mask_north)
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 9))
img0 = ax0.imshow(labels_south, cmap=cmap, interpolation="nearest")
ax0.set_title("Labels South")
ax0.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax0.add_artist(circle)
gu.colorbar(img0, numticks=int(labels_south.max() + 1))
img1 = ax1.imshow(labels_north, cmap=cmap, interpolation="nearest")
ax1.set_title("Labels North")
ax1.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax1.add_artist(circle)
gu.colorbar(img1, numticks=int(labels_north.max() + 1))
fig.tight_layout()
plt.pause(0.1)
fig.savefig(savedir + comment_fig + "labels.png")
return labels_south, labels_north, stereo_proj, remove_row
def surface_gradient(points, support, width=2):
"""
Calculate the support gradient at point.
:param points: tuple or list of tuples of 3 integers (z, y, x), position where
to calculate the gradient vector
:param support: 3D numpy binary array, being 1 in the crystal and 0 outside
:param width: half-width of the window where the gradient will be calculated
(the support gradient is nonzero on a single layer, it avoids missing it)
:return: a list of normalized vector(s) (array(s) of 3 numbers) oriented
towards the exterior of the cristal
"""
gradz, grady, gradx = np.gradient(support, 1) # support
vectors = []
if not isinstance(points, list):
points = [points]
for _, point in enumerate(points):
# round the point to integer numbers
point = [int(np.rint(point[idx])) for idx in range(3)]
# calculate the gradient in a small window around point
# (gradient will be nonzero on a single layer)
gradz_slice = gradz[
point[0] - width : point[0] + width + 1,
point[1] - width : point[1] + width + 1,
point[2] - width : point[2] + width + 1,
]
val = (gradz_slice != 0).sum()
if val == 0:
vector_z = 0
else:
vector_z = gradz_slice.sum() / val
grady_slice = grady[
point[0] - width : point[0] + width + 1,
point[1] - width : point[1] + width + 1,
point[2] - width : point[2] + width + 1,
]
val = (grady_slice != 0).sum()
if val == 0:
vector_y = 0
else:
vector_y = grady_slice.sum() / val
gradx_slice = gradx[
point[0] - width : point[0] + width + 1,
point[1] - width : point[1] + width + 1,
point[2] - width : point[2] + width + 1,
]
val = (gradx_slice != 0).sum()
if val == 0:
vector_x = 0
else:
vector_x = gradx_slice.sum() / val
# support was 1 inside, 0 outside,
# the vector needs to be flipped to point towards the outside
vectors.append(
[-vector_z, -vector_y, -vector_x]
/ np.linalg.norm([-vector_z, -vector_y, -vector_x])
)
return vectors
def taubin_smooth(
faces,
vertices,
cmap=default_cmap,
iterations=10,
lamda=0.33,
mu=0.34,
radius=0.1,
debugging=False,
):
"""
Perform Taubin's smoothing of a mesh.
It performs a back and forward Laplacian smoothing "without shrinking" of a
triangulated mesh, as described by Gabriel Taubin (ICCV '95)
:param faces: m*3 ndarray of m faces defined by 3 indices of vertices
:param vertices: n*3 ndarray of n vertices defined by 3 positions
:param cmap: colormap used for plotting
:param iterations: number of iterations for smoothing
:param lamda: smoothing variable 0 < lambda < mu < 1
:param mu: smoothing variable 0 < lambda < mu < 1
:param radius: radius around which the normals are integrated in the calculation
of the density of normals
:param debugging: show plots for debugging
:return: smoothened vertices (ndarray n*3), normals to triangle (ndarray m*3),
weighted density of normals, updated faces, errors
"""
from mpl_toolkits.mplot3d import Axes3D
plt.ion()
print("Original number of vertices:", vertices.shape[0])
print("Original number of faces:", faces.shape[0])
new_vertices = np.copy(vertices)
for k in range(iterations):
# check the unicity of vertices otherwise 0 distance would happen
if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:
print("\nTaubin smoothing / lambda: duplicated vertices at iteration", k)
new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)
vertices = np.copy(new_vertices)
neighbours = find_neighbours(
vertices, faces
) # get the indices of neighboring vertices for each vertex
indices_edges = detect_edges(
faces
) # find indices of vertices defining non-shared edges (near hole...)
for i in range(vertices.shape[0]):
indices = neighbours[i] # list of indices
distances = np.sqrt(
np.sum((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)
)
weights = distances ** (-1)
vectoren = weights[:, np.newaxis] * vertices[indices, :]
totaldist = sum(weights)
new_vertices[i, :] = vertices[i, :] + lamda * (
np.sum(vectoren, axis=0) / totaldist - vertices[i, :]
)
if indices_edges.size != 0:
new_vertices[indices_edges, :] = vertices[indices_edges, :]
# check the unicity of vertices otherwise 0 distance would happen
if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:
print("\nTaubin smoothing / mu: duplicated vertices at iteration", k)
new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)
vertices = np.copy(new_vertices)
neighbours = find_neighbours(
vertices, faces
) # get the indices of neighboring vertices for each vertex
indices_edges = detect_edges(
faces
) # find indices of vertices defining non-shared edges (near hole...)
for i in range(vertices.shape[0]):
indices = neighbours[i] # list of indices
distances = np.sqrt(
np.sum((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)
)
weights = distances ** (-1)
vectoren = weights[:, np.newaxis] * vertices[indices, :]
totaldist = sum(weights)
new_vertices[i, :] = vertices[i, :] - mu * (
sum(vectoren) / totaldist - vertices[i, :]
)
if indices_edges.size != 0:
new_vertices[indices_edges, :] = vertices[indices_edges, :]
# check the unicity of vertices otherwise 0 distance would happen
if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:
print("\nTaubin smoothing / exiting loop: duplicated vertices")
new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)
nan_vertices = np.argwhere(np.isnan(new_vertices[:, 0]))
print(
"Number of nan in new_vertices:",
nan_vertices.shape[0],
"; Total number of vertices:",
new_vertices.shape[0],
)
# Create an indexed view into the vertex array using
# the array of three indices for triangles
tris = new_vertices[faces]
# Calculate the normal for all the triangles,
# by taking the cross product of the vectors v1-v0,
# and v2-v0 in each triangle
normals = np.cross(tris[:, 1] - tris[:, 0], tris[:, 2] - tris[::, 0])
areas = np.array([1 / 2 * np.linalg.norm(normal) for normal in normals])
normals_length = np.sqrt(
normals[:, 0] ** 2 + normals[:, 1] ** 2 + normals[:, 2] ** 2
)
normals = -1 * normals / normals_length[:, np.newaxis] # flip and normalize normals
# n is now an array of normalized normals, one per triangle.
# calculate the colormap for plotting
# the weighted point density of normals on a sphere
intensity = np.zeros(normals.shape[0], dtype=normals.dtype)
for i in range(normals.shape[0]):
distances = np.sqrt(
np.sum((normals - normals[i, :]) ** 2, axis=1)
) # ndarray of normals.shape[0]
intensity[i] = np.multiply(
areas[distances < radius], distances[distances < radius]
).sum()
# normals are weighted by the area of mesh triangles
intensity = intensity / max(intensity)
if debugging:
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(normals[:, 0], normals[:, 1], normals[:, 2], c=intensity, cmap=cmap)
ax.set_xlim(-1, 1)
ax.set_xlabel("z")
ax.set_ylim(-1, 1)
ax.set_ylabel("y")
ax.set_zlim(-1, 1)
ax.set_zlabel("x")
plt.title("Weighted point densities before KDE")
plt.pause(0.1)
err_normals = np.argwhere(np.isnan(normals[:, 0]))
normals[err_normals, :] = normals[err_normals - 1, :]
plt.ioff()
# check normals for nan
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
return new_vertices, normals, areas, intensity, faces, err_normals
def update_logfile(
support,
strain_array,
summary_file,
allpoints_file,
label=0,
angle_plane=np.nan,
plane_coeffs=(0, 0, 0, 0),
plane_normal=(0, 0, 0),
):
"""
Update log files use in the facet_strain.py script.
:param support: the 3D binary support defining voxels to be saved in the logfile
:param strain_array: the 3D strain array
:param summary_file: the handle for the file summarizing strain statistics per facet
:param allpoints_file: the handle for the file giving the strain and the label
for each voxel
:param label: the label of the plane
:param angle_plane: the angle of the plane with the measurement direction
:param plane_coeffs: the fit coefficients (a,b,c,d) of the plane such
that ax+by+cz+d=0
:param plane_normal: the normal to the plane
:return: nothing
"""
if (support.ndim != 3) or (strain_array.ndim != 3):
raise ValueError("The support and the strain arrays should be 3D arrays")
support_indices = np.nonzero(support == 1)
ind_z = support_indices[0]
ind_y = support_indices[1]
ind_x = support_indices[2]
nb_points = len(support_indices[0])
for idx in range(nb_points):
if strain_array[ind_z[idx], ind_y[idx], ind_x[idx]] != 0:
# remove the artefact from YY reconstrutions at the bottom facet
allpoints_file.write(
"{0: <10}".format(str(label))
+ "\t"
+ "{0: <10}".format(str("{:.3f}".format(angle_plane)))
+ "\t"
+ "{0: <10}".format(
str(
"{:.7f}".format(
strain_array[ind_z[idx], ind_y[idx], ind_x[idx]]
)
)
)
+ "\t"
+ "{0: <10}".format(str(ind_z[idx]))
+ "\t"
+ "{0: <10}".format(str(ind_y[idx]))
+ "\t"
+ "{0: <10}".format(str(ind_x[idx]))
+ "\n"
)
str_array = strain_array[support == 1]
str_array[
str_array == 0
] = np.nan # remove the artefact from YY reconstrutions at the bottom facet
support_strain = np.mean(str_array[~np.isnan(str_array)])
support_deviation = np.std(str_array[~np.isnan(str_array)])
# support_strain = np.mean(strain_array[support == 1])
# support_deviation = np.std(strain_array[support == 1])
summary_file.write(
"{0: <10}".format(str(label))
+ "\t"
+ "{0: <10}".format(str("{:.3f}".format(angle_plane)))
+ "\t"
+ "{0: <10}".format(str(nb_points))
+ "\t"
+ "{0: <10}".format(str("{:.7f}".format(support_strain)))
+ "\t"
+ "{0: <10}".format(str("{:.7f}".format(support_deviation)))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[0])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[1])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[2])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[3])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[0])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[1])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[2])))
+ "\n"
)
def upsample(array, upsampling_factor, voxelsizes=None, title="", debugging=False):
"""
Upsample array using a factor of upsampling.
:param array: the real array to be upsampled
:param upsampling_factor: int, the upsampling factor
:param voxelsizes: list, the voxel sizes of array
:param title: title for the debugging plot
:param debugging: True to see plots
:return: the upsampled array
"""
valid.valid_ndarray(array, ndim=(2, 3))
ndim = array.ndim
valid.valid_item(
value=upsampling_factor,
allowed_types=int,
min_included=1,
name="utils.upsample",
)
if voxelsizes is None:
voxelsizes = (1,) * ndim
valid.valid_container(
voxelsizes,
container_types=(list, tuple, np.ndarray),
length=ndim,
item_types=Real,
min_excluded=0,
name="utils.upsample",
)
vmin, vmax = array.min(), array.max()
if ndim == 3:
if debugging:
gu.multislices_plot(
array,
sum_frames=False,
title=title + " before upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
nbz, nby, nbx = array.shape
numz, numy, numx = (
nbz * upsampling_factor,
nby * upsampling_factor,
nbx * upsampling_factor,
)
newvoxelsizes = [voxsize / upsampling_factor for voxsize in voxelsizes]
newz, newy, newx = np.meshgrid(
np.arange(-numz // 2, numz // 2, 1) * newvoxelsizes[0],
np.arange(-numy // 2, numy // 2, 1) * newvoxelsizes[1],
np.arange(-numx // 2, numx // 2, 1) * newvoxelsizes[2],
indexing="ij",
)
rgi = RegularGridInterpolator(
(
np.arange(-nbz // 2, nbz // 2) * voxelsizes[0],
np.arange(-nby // 2, nby // 2) * voxelsizes[1],
np.arange(-nbx // 2, nbx // 2) * voxelsizes[2],
),
array,
method="linear",
bounds_error=False,
fill_value=0,
)
obj = rgi(
np.concatenate(
(
newz.reshape((1, newz.size)),
newy.reshape((1, newz.size)),
newx.reshape((1, newz.size)),
)
).transpose()
)
obj = obj.reshape((numz, numy, numx)).astype(array.dtype)
if debugging:
gu.multislices_plot(
obj,
sum_frames=False,
title=title + " after upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
else: # 2D case
if debugging:
gu.imshow_plot(
array,
title=title + " before upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
nby, nbx = array.shape
numy, numx = nby * upsampling_factor, nbx * upsampling_factor
newvoxelsizes = [voxsize / upsampling_factor for voxsize in voxelsizes]
newy, newx = np.meshgrid(
np.arange(-numy // 2, numy // 2, 1) * newvoxelsizes[0],
np.arange(-numx // 2, numx // 2, 1) * newvoxelsizes[1],
indexing="ij",
)
rgi = RegularGridInterpolator(
(
np.arange(-nby // 2, nby // 2) * voxelsizes[0],
np.arange(-nbx // 2, nbx // 2) * voxelsizes[1],
),
array,
method="linear",
bounds_error=False,
fill_value=0,
)
obj = rgi(
np.concatenate(
(newy.reshape((1, newy.size)), newx.reshape((1, newy.size)))
).transpose()
)
obj = obj.reshape((numy, numx)).astype(array.dtype)
if debugging:
gu.imshow_plot(
obj,
title=title + " after upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
return obj, newvoxelsizes
| 37.504489 | 88 | 0.581479 |
import sys
from numbers import Real
import numpy as np
from matplotlib import patches
from matplotlib import pyplot as plt
from scipy import ndimage, stats
from scipy.interpolate import RegularGridInterpolator, griddata
from scipy.ndimage.measurements import center_of_mass
from scipy.signal import convolve
from skimage.feature import corner_peaks
from skimage.segmentation import watershed
from bcdi.graph import graph_utils as gu
from bcdi.graph.colormap import ColormapFactory
from bcdi.utils import utilities as util
from bcdi.utils import validation as valid
default_cmap = ColormapFactory().cmap
def calc_stereoproj_facet(projection_axis, vectors, radius_mean, stereo_center):
if projection_axis not in [0, 1, 2]:
raise ValueError(
"reflection_axis should be a basis axis of the reconstructed array"
)
stereo_proj = np.zeros((vectors.shape[0], 4), dtype=vectors.dtype)
if (
projection_axis == 0
):
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + vectors[idx, 0] - stereo_center)
)
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + vectors[idx, 0] - stereo_center)
)
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + stereo_center - vectors[idx, 0])
)
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + stereo_center - vectors[idx, 0])
)
uv_labels = (
"axis 1",
"axis 2",
)
elif (
projection_axis == 1
):
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + vectors[idx, 1] - stereo_center)
)
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + vectors[idx, 1] - stereo_center)
)
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + stereo_center - vectors[idx, 1])
)
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + stereo_center - vectors[idx, 1])
)
uv_labels = (
"axis 0",
"axis 2",
)
else:
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + vectors[idx, 2] - stereo_center)
)
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + vectors[idx, 2] - stereo_center)
)
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + stereo_center - vectors[idx, 2])
)
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + stereo_center - vectors[idx, 2])
)
uv_labels = (
"axis 0",
"axis 1",
)
stereo_proj = stereo_proj / radius_mean * 90
return stereo_proj, uv_labels
def detect_edges(faces):
edge1 = np.copy(faces[:, 0:2])
edge2 = np.array([np.copy(faces[:, 0]), np.copy(faces[:, 2])]).T
edge3 = np.array([np.copy(faces[:, 1]), np.copy(faces[:, 2])]).T
edge1.sort(axis=1)
edge2.sort(axis=1)
edge3.sort(axis=1)
edges = np.concatenate((edge1, edge2, edge3), axis=0)
edge_list, _, edges_counts = np.unique(
edges, return_index=True, return_counts=True, axis=0
)
unique_edges = edge_list[edges_counts == 1].flatten()
return unique_edges
def distance_threshold(fit, indices, plane_shape, max_distance=0.90):
indices = np.asarray(indices)
plane = np.zeros(plane_shape, dtype=int)
no_points = False
if len(indices[0]) == 0:
no_points = True
return plane, no_points
plane_normal = np.array(
[fit[0], fit[1], fit[2]]
)
for point in range(len(indices[0])):
dist = abs(
fit[0] * indices[0, point]
+ fit[1] * indices[1, point]
+ fit[2] * indices[2, point]
+ fit[3]
) / np.linalg.norm(plane_normal)
if dist < max_distance:
plane[indices[0, point], indices[1, point], indices[2, point]] = 1
if plane[plane == 1].sum() == 0:
print("Distance_threshold: no points for plane")
no_points = True
return plane, no_points
return plane, no_points
def equirectangular_proj(
normals,
intensity,
cmap=default_cmap,
bw_method=0.03,
min_distance=10,
background_threshold=-0.35,
debugging=False,
):
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
long_lat = np.zeros((normals.shape[0], 2), dtype=normals.dtype)
for i in range(normals.shape[0]):
if normals[i, 1] == 0 and normals[i, 0] == 0:
continue
long_lat[i, 0] = np.arctan2(normals[i, 1], normals[i, 0])
long_lat[i, 1] = np.arcsin(normals[i, 2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(long_lat[:, 0], long_lat[:, 1], c=intensity, cmap=cmap)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
plt.axis("scaled")
plt.title("Equirectangular projection of the weighted point densities before KDE")
plt.pause(0.1)
kde = stats.gaussian_kde(long_lat.T, bw_method=bw_method)
-np.pi / 2 : np.pi / 2 : 150j, -np.pi : np.pi : 300j
]
coords = np.vstack([item.ravel() for item in [xi, yi]])
density = -1 * kde(coords).reshape(
xi.shape
)
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(xi, yi, c=density, cmap=cmap, vmin=-1.5, vmax=0)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
fig.colorbar(scatter)
plt.axis("scaled")
plt.title("Equirectangular projection of the KDE")
plt.pause(0.1)
density[density > background_threshold] = 0
mask = np.copy(density)
mask[mask != 0] = 1
plt.figure()
plt.imshow(mask, cmap=cmap, interpolation="nearest")
plt.title("Background mask")
plt.gca().invert_yaxis()
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(xi, yi, c=density, cmap=cmap)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
fig.colorbar(scatter)
plt.axis("scaled")
plt.title("KDE after background definition")
plt.pause(0.1)
distances = ndimage.distance_transform_edt(density)
if debugging:
plt.figure()
plt.imshow(distances, cmap=cmap, interpolation="nearest")
plt.title("Distances")
plt.gca().invert_yaxis()
plt.pause(0.1)
local_maxi = corner_peaks(
distances, exclude_border=False, min_distance=min_distance, indices=False
)
if debugging:
plt.figure()
plt.imshow(local_maxi, interpolation="nearest")
plt.title("local_maxi")
plt.gca().invert_yaxis()
plt.pause(0.1)
markers = ndimage.label(local_maxi)[0]
if debugging:
plt.figure()
plt.imshow(markers, interpolation="nearest")
plt.title("markers")
plt.colorbar()
plt.gca().invert_yaxis()
plt.pause(0.1)
labels = watershed(-1 * distances, markers, mask=mask)
print("There are", str(labels.max()), "facets")
plt.figure()
plt.imshow(labels, cmap=cmap, interpolation="nearest")
plt.title("Separated objects")
plt.colorbar()
plt.gca().invert_yaxis()
plt.pause(0.1)
return labels, long_lat
def find_facet(
refplane_indices,
surf_indices,
original_shape,
step_shift,
plane_label,
plane_coeffs,
min_points,
debugging=False,
):
if not isinstance(refplane_indices, tuple):
raise ValueError("refplane_indices should be a tuple of 3 1D ndarrays")
if not isinstance(surf_indices, tuple):
raise ValueError("surf_indices should be a tuple of 3 1D ndarrays")
surf0, surf1, surf2 = surf_indices
plane_normal = np.array(
[plane_coeffs[0], plane_coeffs[1], plane_coeffs[2]]
)
common_previous = 0
found_plane = 0
nbloop = 1
crossed_surface = 0
shift_direction = 0
while found_plane == 0:
common_points = 0
nb_points = len(surf0)
plane_newindices0, plane_newindices1, plane_newindices2 = offset_plane(
indices=refplane_indices,
offset=nbloop * step_shift,
plane_normal=plane_normal,
)
nb_newpoints = len(plane_newindices0)
for point in range(nb_newpoints):
for point2 in range(nb_points):
if (
plane_newindices0[point] == surf0[point2]
and plane_newindices1[point] == surf1[point2]
and plane_newindices2[point] == surf2[point2]
):
common_points = common_points + 1
if debugging:
temp_coeff3 = plane_coeffs[3] - nbloop * step_shift
dist = np.zeros(nb_points)
for point in range(nb_points):
dist[point] = (
plane_coeffs[0] * surf0[point]
+ plane_coeffs[1] * surf1[point]
+ plane_coeffs[2] * surf2[point]
+ temp_coeff3
) / np.linalg.norm(plane_normal)
temp_mean_dist = dist.mean()
plane = np.zeros(original_shape)
plane[plane_newindices0, plane_newindices1, plane_newindices2] = 1
gu.scatter_plot_overlaid(
arrays=(
np.concatenate(
(
plane_newindices0[:, np.newaxis],
plane_newindices1[:, np.newaxis],
plane_newindices2[:, np.newaxis],
),
axis=1,
),
np.concatenate(
(
surf0[:, np.newaxis],
surf1[:, np.newaxis],
surf2[:, np.newaxis],
),
axis=1,
),
),
markersizes=(8, 2),
markercolors=("b", "r"),
labels=("axis 0", "axis 1", "axis 2"),
title="Plane"
+ str(plane_label)
+ " after shifting - iteration"
+ str(nbloop),
)
print(
"(while) iteration ",
nbloop,
"- Mean distance of the plane to outer shell = "
+ str("{:.2f}".format(temp_mean_dist))
+ "\n pixels - common_points = ",
common_points,
)
if common_points != 0:
if common_points >= common_previous:
found_plane = 0
common_previous = common_points
print(
"(while, common_points != 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
crossed_surface = 1
elif (
common_points < min_points
):
found_plane = 1
print(
"(while, common_points != 0), "
"exiting while loop after threshold reached - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
"- next step common points=",
common_points,
)
else:
found_plane = 0
common_previous = common_points
print(
"(while, common_points != 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
crossed_surface = 1
else:
if crossed_surface == 1:
found_plane = 1
print(
"(while, common_points = 0), exiting while loop - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
"- next step common points=",
common_points,
)
elif not shift_direction:
if nbloop < 5:
print(
"(while, common_points = 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
else:
shift_direction = 1
print("Shift scanning direction")
step_shift = -1 * step_shift
nbloop = 1
else:
if nbloop < 10:
print(
"(while, common_points = 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
else:
print(
"(while, common_points = 0),"
" no point from support is intersecting the plane ",
plane_label,
)
break
return (nbloop - 1) * step_shift
def find_neighbours(vertices, faces):
neighbors = [None] * vertices.shape[0]
nb_faces = faces.shape[0]
for indx in range(nb_faces):
if neighbors[faces[indx, 0]] is None:
neighbors[faces[indx, 0]] = [faces[indx, 1], faces[indx, 2]]
else:
neighbors[faces[indx, 0]].append(faces[indx, 1])
neighbors[faces[indx, 0]].append(faces[indx, 2])
if neighbors[faces[indx, 1]] is None:
neighbors[faces[indx, 1]] = [faces[indx, 2], faces[indx, 0]]
else:
neighbors[faces[indx, 1]].append(faces[indx, 2])
neighbors[faces[indx, 1]].append(faces[indx, 0])
if neighbors[faces[indx, 2]] is None:
neighbors[faces[indx, 2]] = [faces[indx, 0], faces[indx, 1]]
else:
neighbors[faces[indx, 2]].append(faces[indx, 0])
neighbors[faces[indx, 2]].append(faces[indx, 1])
for indx, neighbor in enumerate(neighbors):
temp_list = [point for point in neighbor if point is not None]
neighbors[indx] = list(set(temp_list))
return neighbors
def fit_plane(plane, label, debugging=False):
indices = np.asarray(np.nonzero(plane))
no_points = False
if len(indices[0]) == 0:
no_points = True
return 0, indices, 0, no_points
for idx in range(2):
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points before coordination threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
for point in range(indices.shape[1]):
neighbors = plane[
indices[0, point] - 2 : indices[0, point] + 3,
indices[1, point] - 2 : indices[1, point] + 3,
indices[2, point] - 2 : indices[2, point] + 3,
].sum()
if neighbors < 5:
plane[indices[0, point], indices[1, point], indices[2, point]] = 0
print(
"Fit plane",
label,
", ",
str(indices.shape[1] - plane[plane == 1].sum()),
"points isolated, ",
str(plane[plane == 1].sum()),
"remaining",
)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points after coordination threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
indices = np.asarray(np.nonzero(plane))
if len(indices[0]) == 0:
no_points = True
return 0, indices, 0, no_points
dist = np.zeros(indices.shape[1])
x_com, y_com, z_com = center_of_mass(plane)
for point in range(indices.shape[1]):
dist[point] = np.sqrt(
(indices[0, point] - x_com) ** 2
+ (indices[1, point] - y_com) ** 2
+ (indices[2, point] - z_com) ** 2
)
median_dist = np.median(dist)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points before distance threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
for point in range(indices.shape[1]):
if dist[point] > median_dist:
plane[indices[0, point], indices[1, point], indices[2, point]] = 0
print(
"Fit plane",
label,
", ",
str(indices.shape[1] - plane[plane == 1].sum()),
"points too far from COM, ",
str(plane[plane == 1].sum()),
"remaining",
)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points after distance threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
indices = np.asarray(np.nonzero(plane))
if len(indices[0]) < 5:
no_points = True
return 0, indices, 0, no_points
params, std_param, valid_plane = util.plane_fit(
indices=indices, label=label, threshold=1, debugging=debugging
)
if not valid_plane:
plane[indices] = 0
no_points = True
return params, indices, std_param, no_points
def grow_facet(fit, plane, label, support, max_distance=0.90, debugging=True):
nbz, nby, nbx = plane.shape
indices = np.nonzero(plane)
if len(indices[0]) == 0:
no_points = True
return plane, no_points
kernel = np.ones((3, 3, 3))
start_z = max(indices[0].min() - 20, 0)
stop_z = min(indices[0].max() + 21, nbz)
start_y = max(indices[1].min() - 20, 0)
stop_y = min(indices[1].max() + 21, nby)
start_x = max(indices[2].min() - 20, 0)
stop_x = min(indices[2].max() + 21, nbx)
obj = np.copy(plane[start_z:stop_z, start_y:stop_y, start_x:stop_x])
coord = np.rint(convolve(obj, kernel, mode="same"))
coord = coord.astype(int)
coord[np.nonzero(coord)] = 1
if debugging:
gu.scatter_plot_overlaid(
arrays=(np.asarray(np.nonzero(coord)).T, np.asarray(np.nonzero(obj)).T),
markersizes=(2, 8),
markercolors=("b", "r"),
labels=("x", "y", "z"),
title="Plane" + str(label) + " before facet growing and coord matrix",
)
temp_plane = np.copy(plane)
temp_plane[start_z:stop_z, start_y:stop_y, start_x:stop_x] = coord
temp_plane[support == 0] = 0
plane, no_points = distance_threshold(
fit=fit,
indices=np.nonzero(temp_plane),
plane_shape=temp_plane.shape,
max_distance=max_distance,
)
plane_normal = fit[:-1]
indices = np.nonzero(plane)
gradients = surface_gradient(
list(zip(indices[0], indices[1], indices[2])), support=support
)
count_grad = 0
nb_indices = len(indices[0])
for idx in range(nb_indices):
if np.dot(plane_normal, gradients[idx]) < 0.75:
plane[indices[0][idx], indices[1][idx], indices[2][idx]] = 0
count_grad += 1
indices = np.nonzero(plane)
if debugging and len(indices[0]) != 0:
gu.scatter_plot(
array=np.asarray(indices).T,
labels=("x", "y", "z"),
title="Plane" + str(label) + " after 1 cycle of facet growing",
)
print(f"{count_grad} points excluded by gradient filtering")
print(str(len(indices[0])) + " after 1 cycle of facet growing")
return plane, no_points
def offset_plane(indices, offset, plane_normal):
if not isinstance(indices, tuple):
raise ValueError("indices should be a tuple of 3 1D ndarrays")
new_indices0 = np.rint(
indices[0]
+ offset
* np.dot(np.array([1, 0, 0]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
new_indices1 = np.rint(
indices[1]
+ offset
* np.dot(np.array([0, 1, 0]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
new_indices2 = np.rint(
indices[2]
+ offset
* np.dot(np.array([0, 0, 1]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
return new_indices0, new_indices1, new_indices2
def remove_duplicates(vertices, faces, debugging=False):
uniq_vertices, uniq_inverse = np.unique(vertices, axis=0, return_inverse=True)
indices, count = np.unique(uniq_inverse, return_counts=True)
duplicated_indices = indices[count != 1]
list_duplicated = []
for idx, value in enumerate(duplicated_indices):
same_vertices = np.argwhere(vertices == uniq_vertices[value, :])
list_duplicated.append(list(same_vertices[::3, 0]))
remove_vertices = [value for sublist in list_duplicated for value in sublist[1:]]
vertices = np.delete(vertices, remove_vertices, axis=0)
print(len(remove_vertices), "duplicated vertices removed")
for idx, temp_array in enumerate(list_duplicated):
for idy in range(1, len(temp_array)):
duplicated_value = temp_array[idy]
faces[faces == duplicated_value] = temp_array[0]
faces[faces > duplicated_value] = faces[faces > duplicated_value] - 1
if debugging:
print("temp_array before", temp_array)
print("list_duplicated before", list_duplicated)
temp_array = [
(value - 1) if value > duplicated_value else value
for value in temp_array
]
list_duplicated = [
[
(value - 1) if value > duplicated_value else value
for value in sublist
]
for sublist in list_duplicated
]
if debugging:
print("temp_array after", temp_array)
print("list_duplicated after", list_duplicated)
remove_faces = []
for idx in range(faces.shape[0]):
if np.unique(faces[idx, :], axis=0).shape[0] != faces[idx, :].shape[0]:
remove_faces.append(idx)
faces = np.delete(faces, remove_faces, axis=0)
print(len(remove_faces), "faces with identical vertices removed")
return vertices, faces
def surface_indices(surface, plane_indices, margin=3):
valid.valid_ndarray(surface, ndim=3)
if not isinstance(plane_indices, tuple):
plane_indices = tuple(plane_indices)
surf_indices = np.nonzero(
surface[
plane_indices[0].min() - margin : plane_indices[0].max() + margin,
plane_indices[1].min() - margin : plane_indices[1].max() + margin,
plane_indices[2].min() - margin : plane_indices[2].max() + margin,
]
)
surf0 = (
surf_indices[0] + plane_indices[0].min() - margin
)
surf1 = (
surf_indices[1] + plane_indices[1].min() - margin
)
surf2 = (
surf_indices[2] + plane_indices[2].min() - margin
)
return surf0, surf1, surf2
def stereographic_proj(
normals,
intensity,
max_angle,
savedir,
voxel_size,
projection_axis,
min_distance=10,
background_south=-1000,
background_north=-1000,
save_txt=False,
cmap=default_cmap,
planes_south=None,
planes_north=None,
plot_planes=True,
scale="linear",
comment_fig="",
debugging=False,
):
def mouse_move(event):
nonlocal density_south, density_north, u_grid, v_grid, ax0, ax1
if event.inaxes == ax0:
index_u = util.find_nearest(u_grid[0, :], event.xdata, width=None)
index_v = util.find_nearest(v_grid[:, 0], event.ydata, width=None)
sys.stdout.write(
"\rKDE South:" + str("{:.0f}".format(density_south[index_v, index_u]))
)
sys.stdout.flush()
elif event.inaxes == ax1:
index_u = util.find_nearest(u_grid[0, :], event.xdata, width=None)
index_v = util.find_nearest(v_grid[:, 0], event.ydata, width=None)
sys.stdout.write(
"\rKDE North:" + str("{:.0f}".format(density_north[index_v, index_u]))
)
sys.stdout.flush()
else:
pass
if comment_fig and comment_fig[-1] != "_":
comment_fig = comment_fig + "_"
radius_mean = 1
stereo_center = 0
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
iso_normals = np.copy(normals)
iso_normals[:, 0] = iso_normals[:, 0] * 2 * np.pi / voxel_size[0]
iso_normals[:, 1] = iso_normals[:, 1] * 2 * np.pi / voxel_size[1]
iso_normals[:, 2] = iso_normals[:, 2] * 2 * np.pi / voxel_size[2]
iso_normals_length = np.sqrt(
iso_normals[:, 0] ** 2 + iso_normals[:, 1] ** 2 + iso_normals[:, 2] ** 2
)
iso_normals = iso_normals / iso_normals_length[:, np.newaxis]
stereo_proj, uv_labels = calc_stereoproj_facet(
projection_axis=projection_axis,
vectors=iso_normals,
radius_mean=radius_mean,
stereo_center=stereo_center,
)
list_bad = np.argwhere(
np.isinf(stereo_proj) | np.isnan(stereo_proj)
)
remove_row = list(set(list_bad[:, 0]))
print(
"remove_row indices (the stereographic projection is infinite or nan): ",
remove_row,
"\n",
)
stereo_proj = np.delete(stereo_proj, remove_row, axis=0)
intensity = np.delete(intensity, remove_row, axis=0)
fig, _ = gu.contour_stereographic(
euclidian_u=stereo_proj[:, 0],
euclidian_v=stereo_proj[:, 1],
color=intensity,
radius_mean=radius_mean,
planes=planes_south,
max_angle=max_angle,
scale=scale,
title="Projection from\nSouth pole",
plot_planes=plot_planes,
uv_labels=uv_labels,
debugging=debugging,
)
fig.savefig(savedir + comment_fig + "South pole_" + scale + ".png")
fig, _ = gu.contour_stereographic(
euclidian_u=stereo_proj[:, 2],
euclidian_v=stereo_proj[:, 3],
color=intensity,
radius_mean=radius_mean,
planes=planes_north,
max_angle=max_angle,
scale=scale,
title="Projection from\nNorth pole",
plot_planes=plot_planes,
uv_labels=uv_labels,
debugging=debugging,
)
fig.savefig(savedir + comment_fig + "North pole_" + scale + ".png")
nb_points = 4 * max_angle + 1
v_grid, u_grid = np.mgrid[
-max_angle : max_angle : (nb_points * 1j),
-max_angle : max_angle : (nb_points * 1j),
]
nby, nbx = u_grid.shape
density_south = griddata(
(stereo_proj[:, 0], stereo_proj[:, 1]),
intensity,
(u_grid, v_grid),
method="linear",
)
density_north = griddata(
(stereo_proj[:, 2], stereo_proj[:, 3]),
intensity,
(u_grid, v_grid),
method="linear",
)
density_south = density_south / density_south[density_south > 0].max() * 10000
density_north = density_north / density_north[density_north > 0].max() * 10000
if save_txt:
density_south[np.isnan(density_south)] = 0.0
density_north[np.isnan(density_north)] = 0.0
with open(savedir + "CDI_poles.dat", "w") as file:
for ii in range(len(v_grid)):
for jj in range(len(u_grid)):
file.write(
str(v_grid[ii, 0])
+ "\t"
+ str(u_grid[0, jj])
+ "\t"
+ str(density_south[ii, jj])
+ "\t"
+ str(v_grid[ii, 0])
+ "\t"
+ str(u_grid[0, jj])
+ "\t"
+ str(density_north[ii, jj])
+ "\n"
)
density_south = -1 * density_south
density_north = -1 * density_north
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 9))
img0 = ax0.scatter(u_grid, v_grid, c=density_south, cmap=cmap)
ax0.set_xlim(-max_angle, max_angle)
ax0.set_ylim(-max_angle, max_angle)
ax0.axis("scaled")
gu.colorbar(img0)
ax0.set_title("KDE \nSouth pole")
img1 = ax1.scatter(u_grid, v_grid, c=density_north, cmap=cmap)
ax1.set_xlim(-max_angle, max_angle)
ax1.set_ylim(-max_angle, max_angle)
ax1.axis("scaled")
gu.colorbar(img1)
ax1.set_title("KDE \nNorth pole")
fig.text(0.32, 0.90, "Read the threshold value in the console", size=16)
fig.text(0.32, 0.85, "Click on the figure to resume the execution", size=16)
fig.tight_layout()
cid = plt.connect("motion_notify_event", mouse_move)
fig.waitforbuttonpress()
plt.disconnect(cid)
print("\n")
density_south[
density_south > background_south
] = 0
mask_south = np.copy(density_south)
mask_south[mask_south != 0] = 1
density_north[
density_north > background_north
] = 0
mask_north = np.copy(density_north)
mask_north[mask_north != 0] = 1
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2, figsize=(12, 9))
ax0.imshow(mask_south, cmap=cmap, interpolation="nearest")
ax0.set_title("Background mask South")
ax0.invert_yaxis()
img1 = ax1.scatter(u_grid, v_grid, c=density_south, cmap=cmap)
ax1.set_xlim(-max_angle, max_angle)
ax1.set_ylim(-max_angle, max_angle)
ax1.axis("scaled")
gu.colorbar(img1)
ax1.set_title("KDE South pole\nafter background definition")
circle = patches.Circle((0, 0), 90, color="w", fill=False, linewidth=1.5)
ax1.add_artist(circle)
ax2.imshow(mask_north, cmap=cmap, interpolation="nearest")
ax2.set_title("Background mask North")
ax2.invert_yaxis()
img3 = ax3.scatter(u_grid, v_grid, c=density_north, cmap=cmap)
ax3.set_xlim(-max_angle, max_angle)
ax3.set_ylim(-max_angle, max_angle)
ax3.axis("scaled")
gu.colorbar(img3)
ax3.set_title("KDE North pole\nafter background definition")
circle = patches.Circle((0, 0), 90, color="w", fill=False, linewidth=1.5)
ax3.add_artist(circle)
fig.tight_layout()
plt.pause(0.1)
tect_edges(
faces
)
for i in range(vertices.shape[0]):
indices = neighbours[i]
distances = np.sqrt(
np.sum((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)
)
weights = distances ** (-1)
vectoren = weights[:, np.newaxis] * vertices[indices, :]
totaldist = sum(weights)
new_vertices[i, :] = vertices[i, :] + lamda * (
np.sum(vectoren, axis=0) / totaldist - vertices[i, :]
)
if indices_edges.size != 0:
new_vertices[indices_edges, :] = vertices[indices_edges, :]
if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:
print("\nTaubin smoothing / mu: duplicated vertices at iteration", k)
new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)
vertices = np.copy(new_vertices)
neighbours = find_neighbours(
vertices, faces
)
indices_edges = detect_edges(
faces
)
for i in range(vertices.shape[0]):
indices = neighbours[i]
distances = np.sqrt(
np.sum((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)
)
weights = distances ** (-1)
vectoren = weights[:, np.newaxis] * vertices[indices, :]
totaldist = sum(weights)
new_vertices[i, :] = vertices[i, :] - mu * (
sum(vectoren) / totaldist - vertices[i, :]
)
if indices_edges.size != 0:
new_vertices[indices_edges, :] = vertices[indices_edges, :]
if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:
print("\nTaubin smoothing / exiting loop: duplicated vertices")
new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)
nan_vertices = np.argwhere(np.isnan(new_vertices[:, 0]))
print(
"Number of nan in new_vertices:",
nan_vertices.shape[0],
"; Total number of vertices:",
new_vertices.shape[0],
)
tris = new_vertices[faces]
normals = np.cross(tris[:, 1] - tris[:, 0], tris[:, 2] - tris[::, 0])
areas = np.array([1 / 2 * np.linalg.norm(normal) for normal in normals])
normals_length = np.sqrt(
normals[:, 0] ** 2 + normals[:, 1] ** 2 + normals[:, 2] ** 2
)
normals = -1 * normals / normals_length[:, np.newaxis]
intensity = np.zeros(normals.shape[0], dtype=normals.dtype)
for i in range(normals.shape[0]):
distances = np.sqrt(
np.sum((normals - normals[i, :]) ** 2, axis=1)
)
intensity[i] = np.multiply(
areas[distances < radius], distances[distances < radius]
).sum()
intensity = intensity / max(intensity)
if debugging:
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(normals[:, 0], normals[:, 1], normals[:, 2], c=intensity, cmap=cmap)
ax.set_xlim(-1, 1)
ax.set_xlabel("z")
ax.set_ylim(-1, 1)
ax.set_ylabel("y")
ax.set_zlim(-1, 1)
ax.set_zlabel("x")
plt.title("Weighted point densities before KDE")
plt.pause(0.1)
err_normals = np.argwhere(np.isnan(normals[:, 0]))
normals[err_normals, :] = normals[err_normals - 1, :]
plt.ioff()
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
return new_vertices, normals, areas, intensity, faces, err_normals
def update_logfile(
support,
strain_array,
summary_file,
allpoints_file,
label=0,
angle_plane=np.nan,
plane_coeffs=(0, 0, 0, 0),
plane_normal=(0, 0, 0),
):
if (support.ndim != 3) or (strain_array.ndim != 3):
raise ValueError("The support and the strain arrays should be 3D arrays")
support_indices = np.nonzero(support == 1)
ind_z = support_indices[0]
ind_y = support_indices[1]
ind_x = support_indices[2]
nb_points = len(support_indices[0])
for idx in range(nb_points):
if strain_array[ind_z[idx], ind_y[idx], ind_x[idx]] != 0:
allpoints_file.write(
"{0: <10}".format(str(label))
+ "\t"
+ "{0: <10}".format(str("{:.3f}".format(angle_plane)))
+ "\t"
+ "{0: <10}".format(
str(
"{:.7f}".format(
strain_array[ind_z[idx], ind_y[idx], ind_x[idx]]
)
)
)
+ "\t"
+ "{0: <10}".format(str(ind_z[idx]))
+ "\t"
+ "{0: <10}".format(str(ind_y[idx]))
+ "\t"
+ "{0: <10}".format(str(ind_x[idx]))
+ "\n"
)
str_array = strain_array[support == 1]
str_array[
str_array == 0
] = np.nan
support_strain = np.mean(str_array[~np.isnan(str_array)])
support_deviation = np.std(str_array[~np.isnan(str_array)])
summary_file.write(
"{0: <10}".format(str(label))
+ "\t"
+ "{0: <10}".format(str("{:.3f}".format(angle_plane)))
+ "\t"
+ "{0: <10}".format(str(nb_points))
+ "\t"
+ "{0: <10}".format(str("{:.7f}".format(support_strain)))
+ "\t"
+ "{0: <10}".format(str("{:.7f}".format(support_deviation)))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[0])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[1])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[2])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[3])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[0])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[1])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[2])))
+ "\n"
)
def upsample(array, upsampling_factor, voxelsizes=None, title="", debugging=False):
valid.valid_ndarray(array, ndim=(2, 3))
ndim = array.ndim
valid.valid_item(
value=upsampling_factor,
allowed_types=int,
min_included=1,
name="utils.upsample",
)
if voxelsizes is None:
voxelsizes = (1,) * ndim
valid.valid_container(
voxelsizes,
container_types=(list, tuple, np.ndarray),
length=ndim,
item_types=Real,
min_excluded=0,
name="utils.upsample",
)
vmin, vmax = array.min(), array.max()
if ndim == 3:
if debugging:
gu.multislices_plot(
array,
sum_frames=False,
title=title + " before upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
nbz, nby, nbx = array.shape
numz, numy, numx = (
nbz * upsampling_factor,
nby * upsampling_factor,
nbx * upsampling_factor,
)
newvoxelsizes = [voxsize / upsampling_factor for voxsize in voxelsizes]
newz, newy, newx = np.meshgrid(
np.arange(-numz // 2, numz // 2, 1) * newvoxelsizes[0],
np.arange(-numy // 2, numy // 2, 1) * newvoxelsizes[1],
np.arange(-numx // 2, numx // 2, 1) * newvoxelsizes[2],
indexing="ij",
)
rgi = RegularGridInterpolator(
(
np.arange(-nbz // 2, nbz // 2) * voxelsizes[0],
np.arange(-nby // 2, nby // 2) * voxelsizes[1],
np.arange(-nbx // 2, nbx // 2) * voxelsizes[2],
),
array,
method="linear",
bounds_error=False,
fill_value=0,
)
obj = rgi(
np.concatenate(
(
newz.reshape((1, newz.size)),
newy.reshape((1, newz.size)),
newx.reshape((1, newz.size)),
)
).transpose()
)
obj = obj.reshape((numz, numy, numx)).astype(array.dtype)
if debugging:
gu.multislices_plot(
obj,
sum_frames=False,
title=title + " after upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
else:
if debugging:
gu.imshow_plot(
array,
title=title + " before upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
nby, nbx = array.shape
numy, numx = nby * upsampling_factor, nbx * upsampling_factor
newvoxelsizes = [voxsize / upsampling_factor for voxsize in voxelsizes]
newy, newx = np.meshgrid(
np.arange(-numy // 2, numy // 2, 1) * newvoxelsizes[0],
np.arange(-numx // 2, numx // 2, 1) * newvoxelsizes[1],
indexing="ij",
)
rgi = RegularGridInterpolator(
(
np.arange(-nby // 2, nby // 2) * voxelsizes[0],
np.arange(-nbx // 2, nbx // 2) * voxelsizes[1],
),
array,
method="linear",
bounds_error=False,
fill_value=0,
)
obj = rgi(
np.concatenate(
(newy.reshape((1, newy.size)), newx.reshape((1, newy.size)))
).transpose()
)
obj = obj.reshape((numy, numx)).astype(array.dtype)
if debugging:
gu.imshow_plot(
obj,
title=title + " after upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
return obj, newvoxelsizes
| true | true |
f7fcf6d6614c0a5eecfa0ac0d46e9ca33e2d8077 | 4,981 | py | Python | tests/test_sorting.py | deepsingh9868/pygorithm | c4aaa6acf15f78630d1fe8866e491ece4fdafef1 | [
"MIT"
] | 2 | 2020-08-26T10:00:46.000Z | 2020-08-27T19:47:57.000Z | tests/test_sorting.py | realChainLife/pygorithm | f72e666ee913836b95f09cced47bcc03ad68a05d | [
"MIT"
] | null | null | null | tests/test_sorting.py | realChainLife/pygorithm | f72e666ee913836b95f09cced47bcc03ad68a05d | [
"MIT"
] | null | null | null | import unittest
import random
from pygorithm.sorting import (
bubble_sort,
insertion_sort,
selection_sort,
merge_sort,
quick_sort,
counting_sort,
bucket_sort,
shell_sort,
heap_sort,
brick_sort,
tim_sort,
cocktail_sort,
gnome_sort
)
class TestSortingAlgorithm:
def test_test_setup(self):
self.assertIsNotNone(getattr(self, 'sort', None))
self.assertIsNotNone(getattr(self, 'inplace', None))
self.assertIsNotNone(getattr(self, 'alph_support', None))
def _check_sort_list(self, arr, expected):
cp_arr = list(arr)
sarr = self.sort(cp_arr)
self.assertTrue(
isinstance(sarr, list), 'weird result type: ' + str(type(sarr)))
self.assertEqual(len(sarr), len(arr))
self.assertEqual(sarr, expected)
if self.inplace:
self.assertTrue(cp_arr is sarr, 'was not inplace')
else:
self.assertTrue(cp_arr is not sarr, 'was inplace')
self.assertEqual(cp_arr, arr, 'inplace modified list')
def _check_sort_alph(self, inp, expected):
if not self.alph_support:
return
self._check_sort_list(list(inp), list(expected))
def test_sort_empty(self):
self._check_sort_list([], [])
def test_sort_single(self):
self._check_sort_list([5], [5])
def test_sort_single_alph(self):
self._check_sort_alph('a', 'a')
def test_sort_two_inorder(self):
self._check_sort_list([1, 2], [1, 2])
def test_sort_two_outoforder(self):
self._check_sort_list([2, 1], [1, 2])
def test_sort_5_random_numeric(self):
arr = list(range(5))
random.shuffle(arr)
self._check_sort_list(arr, list(range(5)))
def test_sort_15_random_numeric(self):
arr = list(range(15))
random.shuffle(arr)
self._check_sort_list(arr, list(range(15)))
def test_sort_5_random_alph(self):
arr = ['a', 'b', 'c', 'd', 'e']
random.shuffle(arr)
self._check_sort_alph(''.join(arr), 'abcde')
def test_sort_15_random_alph(self):
arr = [chr(ord('a') + i) for i in range(15)]
exp = ''.join(arr)
random.shuffle(arr)
self._check_sort_alph(''.join(arr), exp)
class TestBubbleSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return bubble_sort.sort(arr)
class TestInsertionSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return insertion_sort.sort(arr)
class TestSelectionSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return selection_sort.sort(arr)
class TestMergeSort(unittest.TestCase, TestSortingAlgorithm):
inplace = False
alph_support = True
@staticmethod
def sort(arr):
return merge_sort.sort(arr)
class TestQuickSort(unittest.TestCase, TestSortingAlgorithm):
inplace = False
alph_support = True
@staticmethod
def sort(arr):
return quick_sort.sort(arr)
class TestCountingSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = False
@staticmethod
def sort(arr):
return counting_sort.sort(arr)
class TestBucketSort(unittest.TestCase, TestSortingAlgorithm):
inplace = False
alph_support = True
@staticmethod
def sort(arr):
return bucket_sort.sort(arr)
class TestShellSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return shell_sort.sort(arr)
class TestHeapSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return heap_sort.sort(arr)
class TestBrickSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return brick_sort.brick_sort(arr)
class TestTimSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
# use a smaller run for testing
return tim_sort.tim_sort(arr, run=4)
class TestCocktailSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return cocktail_sort.cocktail_sort(arr)
class TestGnomeSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return gnome_sort.gnome_sort(arr)
if __name__ == '__main__':
unittest.main()
| 24.536946 | 77 | 0.636017 | import unittest
import random
from pygorithm.sorting import (
bubble_sort,
insertion_sort,
selection_sort,
merge_sort,
quick_sort,
counting_sort,
bucket_sort,
shell_sort,
heap_sort,
brick_sort,
tim_sort,
cocktail_sort,
gnome_sort
)
class TestSortingAlgorithm:
def test_test_setup(self):
self.assertIsNotNone(getattr(self, 'sort', None))
self.assertIsNotNone(getattr(self, 'inplace', None))
self.assertIsNotNone(getattr(self, 'alph_support', None))
def _check_sort_list(self, arr, expected):
cp_arr = list(arr)
sarr = self.sort(cp_arr)
self.assertTrue(
isinstance(sarr, list), 'weird result type: ' + str(type(sarr)))
self.assertEqual(len(sarr), len(arr))
self.assertEqual(sarr, expected)
if self.inplace:
self.assertTrue(cp_arr is sarr, 'was not inplace')
else:
self.assertTrue(cp_arr is not sarr, 'was inplace')
self.assertEqual(cp_arr, arr, 'inplace modified list')
def _check_sort_alph(self, inp, expected):
if not self.alph_support:
return
self._check_sort_list(list(inp), list(expected))
def test_sort_empty(self):
self._check_sort_list([], [])
def test_sort_single(self):
self._check_sort_list([5], [5])
def test_sort_single_alph(self):
self._check_sort_alph('a', 'a')
def test_sort_two_inorder(self):
self._check_sort_list([1, 2], [1, 2])
def test_sort_two_outoforder(self):
self._check_sort_list([2, 1], [1, 2])
def test_sort_5_random_numeric(self):
arr = list(range(5))
random.shuffle(arr)
self._check_sort_list(arr, list(range(5)))
def test_sort_15_random_numeric(self):
arr = list(range(15))
random.shuffle(arr)
self._check_sort_list(arr, list(range(15)))
def test_sort_5_random_alph(self):
arr = ['a', 'b', 'c', 'd', 'e']
random.shuffle(arr)
self._check_sort_alph(''.join(arr), 'abcde')
def test_sort_15_random_alph(self):
arr = [chr(ord('a') + i) for i in range(15)]
exp = ''.join(arr)
random.shuffle(arr)
self._check_sort_alph(''.join(arr), exp)
class TestBubbleSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return bubble_sort.sort(arr)
class TestInsertionSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return insertion_sort.sort(arr)
class TestSelectionSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return selection_sort.sort(arr)
class TestMergeSort(unittest.TestCase, TestSortingAlgorithm):
inplace = False
alph_support = True
@staticmethod
def sort(arr):
return merge_sort.sort(arr)
class TestQuickSort(unittest.TestCase, TestSortingAlgorithm):
inplace = False
alph_support = True
@staticmethod
def sort(arr):
return quick_sort.sort(arr)
class TestCountingSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = False
@staticmethod
def sort(arr):
return counting_sort.sort(arr)
class TestBucketSort(unittest.TestCase, TestSortingAlgorithm):
inplace = False
alph_support = True
@staticmethod
def sort(arr):
return bucket_sort.sort(arr)
class TestShellSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return shell_sort.sort(arr)
class TestHeapSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return heap_sort.sort(arr)
class TestBrickSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return brick_sort.brick_sort(arr)
class TestTimSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return tim_sort.tim_sort(arr, run=4)
class TestCocktailSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return cocktail_sort.cocktail_sort(arr)
class TestGnomeSort(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return gnome_sort.gnome_sort(arr)
if __name__ == '__main__':
unittest.main()
| true | true |
f7fcf6f5099c3dc6ff35609546fe2c90c186de2a | 6,318 | py | Python | tests/regressiontests/generic_views/views.py | kix/django | 5262a288df07daa050a0e17669c3f103f47a8640 | [
"BSD-3-Clause"
] | 3 | 2015-10-14T09:13:48.000Z | 2021-01-01T06:31:25.000Z | tests/regressiontests/generic_views/views.py | kix/django | 5262a288df07daa050a0e17669c3f103f47a8640 | [
"BSD-3-Clause"
] | 1 | 2016-02-19T00:22:18.000Z | 2016-02-19T00:22:18.000Z | tests/regressiontests/generic_views/views.py | kix/django | 5262a288df07daa050a0e17669c3f103f47a8640 | [
"BSD-3-Clause"
] | 1 | 2019-07-15T02:35:16.000Z | 2019-07-15T02:35:16.000Z | from __future__ import absolute_import
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from .forms import AuthorForm
from .models import Artist, Author, Book, Page, BookSigning
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
context = super(CustomTemplateView, self).get_context_data(**kwargs)
context.update({'key': 'value'})
return context
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'first': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
def get_queryset(self):
return Author.objects.all()
class BookDetailGetObjectCustomQueryset(BookDetail):
def get_object(self, queryset=None):
return super(BookDetailGetObjectCustomQueryset,self).get_object(
queryset=Book.objects.filter(pk=2))
class CustomContextView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name='dummy')
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {'custom_key': 'custom_value'}
context.update(kwargs)
return super(CustomContextView, self).get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
class BookSigningConfig(object):
model = BookSigning
date_field = 'event_date'
# use the same templates as for books
def get_template_names(self):
return ['generic_views/book%s.html' % self.template_name_suffix]
class BookSigningArchive(BookSigningConfig, generic.ArchiveIndexView):
pass
class BookSigningYearArchive(BookSigningConfig, generic.YearArchiveView):
pass
class BookSigningMonthArchive(BookSigningConfig, generic.MonthArchiveView):
pass
class BookSigningWeekArchive(BookSigningConfig, generic.WeekArchiveView):
pass
class BookSigningDayArchive(BookSigningConfig, generic.DayArchiveView):
pass
class BookSigningTodayArchive(BookSigningConfig, generic.TodayArchiveView):
pass
class BookSigningDetail(BookSigningConfig, generic.DateDetailView):
context_object_name = 'book'
class NonModel(object):
id = "non_model_1"
_meta = None
class NonModelDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
model = NonModel
def get_object(self, queryset=None):
return NonModel()
| 25.893443 | 89 | 0.731719 | from __future__ import absolute_import
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from .forms import AuthorForm
from .models import Artist, Author, Book, Page, BookSigning
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
context = super(CustomTemplateView, self).get_context_data(**kwargs)
context.update({'key': 'value'})
return context
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'first': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
def get_queryset(self):
return Author.objects.all()
class BookDetailGetObjectCustomQueryset(BookDetail):
def get_object(self, queryset=None):
return super(BookDetailGetObjectCustomQueryset,self).get_object(
queryset=Book.objects.filter(pk=2))
class CustomContextView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name='dummy')
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {'custom_key': 'custom_value'}
context.update(kwargs)
return super(CustomContextView, self).get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
class BookSigningConfig(object):
model = BookSigning
date_field = 'event_date'
def get_template_names(self):
return ['generic_views/book%s.html' % self.template_name_suffix]
class BookSigningArchive(BookSigningConfig, generic.ArchiveIndexView):
pass
class BookSigningYearArchive(BookSigningConfig, generic.YearArchiveView):
pass
class BookSigningMonthArchive(BookSigningConfig, generic.MonthArchiveView):
pass
class BookSigningWeekArchive(BookSigningConfig, generic.WeekArchiveView):
pass
class BookSigningDayArchive(BookSigningConfig, generic.DayArchiveView):
pass
class BookSigningTodayArchive(BookSigningConfig, generic.TodayArchiveView):
pass
class BookSigningDetail(BookSigningConfig, generic.DateDetailView):
context_object_name = 'book'
class NonModel(object):
id = "non_model_1"
_meta = None
class NonModelDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
model = NonModel
def get_object(self, queryset=None):
return NonModel()
| true | true |
f7fcf6f7e2eba96181cf6d743971d1ed2037837c | 1,605 | py | Python | regression/main.py | vinnamkim/examples | aa6b820964fd903a0b5795a7d40ffd332d83e6d7 | [
"BSD-3-Clause"
] | 20 | 2018-07-27T15:14:44.000Z | 2022-03-10T06:44:46.000Z | regression/main.py | JingshuaiLiu/examples | f1838619141d7f2a2553f7282c642a6f51a4df48 | [
"BSD-3-Clause"
] | 1 | 2020-11-18T22:15:54.000Z | 2020-11-18T22:15:54.000Z | regression/main.py | JingshuaiLiu/examples | f1838619141d7f2a2553f7282c642a6f51a4df48 | [
"BSD-3-Clause"
] | 19 | 2018-07-27T07:42:22.000Z | 2021-05-12T01:36:10.000Z | #!/usr/bin/env python
from __future__ import print_function
from itertools import count
import torch
import torch.nn.functional as F
POLY_DEGREE = 4
W_target = torch.randn(POLY_DEGREE, 1) * 5
b_target = torch.randn(1) * 5
def make_features(x):
"""Builds features i.e. a matrix with columns [x, x^2, x^3, x^4]."""
x = x.unsqueeze(1)
return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)
def f(x):
"""Approximated function."""
return x.mm(W_target) + b_target.item()
def poly_desc(W, b):
"""Creates a string description of a polynomial."""
result = 'y = '
for i, w in enumerate(W):
result += '{:+.2f} x^{} '.format(w, len(W) - i)
result += '{:+.2f}'.format(b[0])
return result
def get_batch(batch_size=32):
"""Builds a batch i.e. (x, f(x)) pair."""
random = torch.randn(batch_size)
x = make_features(random)
y = f(x)
return x, y
# Define model
fc = torch.nn.Linear(W_target.size(0), 1)
for batch_idx in count(1):
# Get data
batch_x, batch_y = get_batch()
# Reset gradients
fc.zero_grad()
# Forward pass
output = F.smooth_l1_loss(fc(batch_x), batch_y)
loss = output.item()
# Backward pass
output.backward()
# Apply gradients
for param in fc.parameters():
param.data.add_(-0.1 * param.grad.data)
# Stop criterion
if loss < 1e-3:
break
print('Loss: {:.6f} after {} batches'.format(loss, batch_idx))
print('==> Learned function:\t' + poly_desc(fc.weight.view(-1), fc.bias))
print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))
| 23.26087 | 73 | 0.623053 |
from __future__ import print_function
from itertools import count
import torch
import torch.nn.functional as F
POLY_DEGREE = 4
W_target = torch.randn(POLY_DEGREE, 1) * 5
b_target = torch.randn(1) * 5
def make_features(x):
x = x.unsqueeze(1)
return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)
def f(x):
return x.mm(W_target) + b_target.item()
def poly_desc(W, b):
result = 'y = '
for i, w in enumerate(W):
result += '{:+.2f} x^{} '.format(w, len(W) - i)
result += '{:+.2f}'.format(b[0])
return result
def get_batch(batch_size=32):
random = torch.randn(batch_size)
x = make_features(random)
y = f(x)
return x, y
fc = torch.nn.Linear(W_target.size(0), 1)
for batch_idx in count(1):
batch_x, batch_y = get_batch()
fc.zero_grad()
output = F.smooth_l1_loss(fc(batch_x), batch_y)
loss = output.item()
output.backward()
for param in fc.parameters():
param.data.add_(-0.1 * param.grad.data)
if loss < 1e-3:
break
print('Loss: {:.6f} after {} batches'.format(loss, batch_idx))
print('==> Learned function:\t' + poly_desc(fc.weight.view(-1), fc.bias))
print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))
| true | true |
f7fcf71f4c1ce16d87fdc9952fba51e30607b51d | 6,221 | py | Python | first_strike/controller_helpers.py | WoolleySheep/first-strike | 15f93cccba3cfdb2c2b75524004a08bfef42235a | [
"MIT"
] | null | null | null | first_strike/controller_helpers.py | WoolleySheep/first-strike | 15f93cccba3cfdb2c2b75524004a08bfef42235a | [
"MIT"
] | null | null | null | first_strike/controller_helpers.py | WoolleySheep/first-strike | 15f93cccba3cfdb2c2b75524004a08bfef42235a | [
"MIT"
] | null | null | null | """
Collection of functions that are useful in the construction of both
rocket and turret controllers.
"""
import math
from dataclasses import dataclass
from typing import Optional
from helpers import Helpers
from history import History
from math_helpers import normalise_angle
from parameters import ObstacleParameters, Parameters
from physics import Physics
@dataclass
class ControllerHelpers:
parameters: Parameters
history: History
physics: Physics
helpers: Helpers
def firing_angle2hit_rocket(self) -> Optional[float]:
"""
Calculates the firing angle to hit the rocket.
If the turret was currently at said angle and fired a projectile, and the rocket continued at its current velocity,
then the projectile would hit the target.
https://math.stackexchange.com/questions/213545/solving-trigonometric-equations-of-the-form-a-sin-x-b-cos-x-c
return:
firing angle (rad): The angle as described above
Will return None if no firing angle is possible due to high rocket velocity
"""
projectile_speed = self.parameters.turret.projectile_speed
turret_location = self.parameters.turret.location
rocket_velocity = self.physics.calc_rocket_velocity()
rocket_location = self.history.rocket.location
try:
k = (turret_location.y - rocket_location.y) / (
turret_location.x - rocket_location.x
)
except ZeroDivisionError: # k = inf
try:
m = math.asin(rocket_velocity.x / projectile_speed)
except ValueError: # Intercept is not possible due to rocket velocity
return None
beta = math.pi / 2
else:
try:
a = -projectile_speed
b = k * projectile_speed
c = k * rocket_velocity.x - rocket_velocity.y
m = math.asin(c / math.sqrt(a ** 2 + b ** 2))
except ValueError: # Intercept is not possible due to rocket velocity
return None
A = a / math.sqrt(a ** 2 + b ** 2)
B = b / math.sqrt(a ** 2 + b ** 2)
beta = math.atan2(B, A)
firing_angle = normalise_angle(m - beta)
if self.will_firing_angle_hit(firing_angle):
return firing_angle
return normalise_angle(math.pi - m - beta)
def will_firing_angle_hit(self, theta: float) -> bool:
"""Checks if the current firing angle will hit the rocket.
If the turret was a angle theta, and a projectile was fired, and the rocket
continued at its current velocity, would the projectile intercept the rocket.
args:
theta (rad): The firing angle of the turret
return:
_: Projectile will intercept rocket
"""
rocket_location = self.history.rocket.location
rocket_velocity = self.physics.calc_rocket_velocity()
turret_location = self.parameters.turret.location
projectile_speed = self.parameters.turret.projectile_speed
try:
x_intercept_time = (turret_location.x - rocket_location.x) / (
rocket_velocity.x - projectile_speed * math.cos(theta)
)
except ZeroDivisionError:
x_velocities_equal = True
x_intercepts = math.isclose(turret_location.x, rocket_location.x)
else:
x_velocities_equal = False
x_intercepts = x_intercept_time >= 0
try:
y_intercept_time = (turret_location.y - rocket_location.y) / (
rocket_velocity.y - projectile_speed * math.sin(theta)
)
except ZeroDivisionError:
y_velocities_equal = True
y_intercepts = math.isclose(turret_location.y, rocket_location.y)
else:
y_velocities_equal = False
y_intercepts = y_intercept_time >= 0
return (
not (x_velocities_equal and y_velocities_equal)
and x_intercepts
and y_intercepts
and (
math.isclose(x_intercept_time, y_intercept_time)
or (x_velocities_equal is not y_velocities_equal)
)
)
# Miscellaneous helper functions
def calc_position_relative2rocket(self, location):
return location - self.history.rocket.location
def calc_angle2position_relative2rocket(self, location):
return self.calc_position_relative2rocket(location).angle
def calc_distance_between_rocket_and_position(self, location):
return self.calc_position_relative2rocket(location).magnitude
def calc_turret_position_relative2rocket(self):
turret_location = self.parameters.turret.location
return self.calc_position_relative2rocket(turret_location)
def calc_dist_between_rocket_and_turret(self):
turret_location = self.parameters.turret.location
return self.calc_distance_between_rocket_and_position(turret_location)
def calc_angle2turret_relative2rocket(self):
turret_location = self.parameters.turret.location
return self.calc_angle2position_relative2rocket(turret_location)
def calc_projectile_location_relative2rocket(self, projectile):
projectile_location = self.helpers.calc_projectile_location(projectile)
return self.calc_position_relative2rocket(projectile_location)
def calc_dist_between_rocket_and_projectile(self, projectile):
return self.calc_projectile_location_relative2rocket(projectile).magnitude
def calc_angle2projectile_relative2rocket(self, projectile):
return self.calc_projectile_location_relative2rocket(projectile).angle
def calc_obstacle_location_relative2rocket(self, obstacle: ObstacleParameters):
return obstacle.location - self.history.rocket.location
def calc_dist_between_rocket_and_obstacle(self, obstacle: ObstacleParameters):
return self.calc_obstacle_location_relative2rocket(obstacle).magnitude
def calc_angle_from_rocket2obstacle(self, obstacle: ObstacleParameters):
return self.calc_obstacle_location_relative2rocket(obstacle).angle
| 34.949438 | 123 | 0.678669 |
import math
from dataclasses import dataclass
from typing import Optional
from helpers import Helpers
from history import History
from math_helpers import normalise_angle
from parameters import ObstacleParameters, Parameters
from physics import Physics
@dataclass
class ControllerHelpers:
parameters: Parameters
history: History
physics: Physics
helpers: Helpers
def firing_angle2hit_rocket(self) -> Optional[float]:
projectile_speed = self.parameters.turret.projectile_speed
turret_location = self.parameters.turret.location
rocket_velocity = self.physics.calc_rocket_velocity()
rocket_location = self.history.rocket.location
try:
k = (turret_location.y - rocket_location.y) / (
turret_location.x - rocket_location.x
)
except ZeroDivisionError:
try:
m = math.asin(rocket_velocity.x / projectile_speed)
except ValueError:
return None
beta = math.pi / 2
else:
try:
a = -projectile_speed
b = k * projectile_speed
c = k * rocket_velocity.x - rocket_velocity.y
m = math.asin(c / math.sqrt(a ** 2 + b ** 2))
except ValueError:
return None
A = a / math.sqrt(a ** 2 + b ** 2)
B = b / math.sqrt(a ** 2 + b ** 2)
beta = math.atan2(B, A)
firing_angle = normalise_angle(m - beta)
if self.will_firing_angle_hit(firing_angle):
return firing_angle
return normalise_angle(math.pi - m - beta)
def will_firing_angle_hit(self, theta: float) -> bool:
rocket_location = self.history.rocket.location
rocket_velocity = self.physics.calc_rocket_velocity()
turret_location = self.parameters.turret.location
projectile_speed = self.parameters.turret.projectile_speed
try:
x_intercept_time = (turret_location.x - rocket_location.x) / (
rocket_velocity.x - projectile_speed * math.cos(theta)
)
except ZeroDivisionError:
x_velocities_equal = True
x_intercepts = math.isclose(turret_location.x, rocket_location.x)
else:
x_velocities_equal = False
x_intercepts = x_intercept_time >= 0
try:
y_intercept_time = (turret_location.y - rocket_location.y) / (
rocket_velocity.y - projectile_speed * math.sin(theta)
)
except ZeroDivisionError:
y_velocities_equal = True
y_intercepts = math.isclose(turret_location.y, rocket_location.y)
else:
y_velocities_equal = False
y_intercepts = y_intercept_time >= 0
return (
not (x_velocities_equal and y_velocities_equal)
and x_intercepts
and y_intercepts
and (
math.isclose(x_intercept_time, y_intercept_time)
or (x_velocities_equal is not y_velocities_equal)
)
)
def calc_position_relative2rocket(self, location):
return location - self.history.rocket.location
def calc_angle2position_relative2rocket(self, location):
return self.calc_position_relative2rocket(location).angle
def calc_distance_between_rocket_and_position(self, location):
return self.calc_position_relative2rocket(location).magnitude
def calc_turret_position_relative2rocket(self):
turret_location = self.parameters.turret.location
return self.calc_position_relative2rocket(turret_location)
def calc_dist_between_rocket_and_turret(self):
turret_location = self.parameters.turret.location
return self.calc_distance_between_rocket_and_position(turret_location)
def calc_angle2turret_relative2rocket(self):
turret_location = self.parameters.turret.location
return self.calc_angle2position_relative2rocket(turret_location)
def calc_projectile_location_relative2rocket(self, projectile):
projectile_location = self.helpers.calc_projectile_location(projectile)
return self.calc_position_relative2rocket(projectile_location)
def calc_dist_between_rocket_and_projectile(self, projectile):
return self.calc_projectile_location_relative2rocket(projectile).magnitude
def calc_angle2projectile_relative2rocket(self, projectile):
return self.calc_projectile_location_relative2rocket(projectile).angle
def calc_obstacle_location_relative2rocket(self, obstacle: ObstacleParameters):
return obstacle.location - self.history.rocket.location
def calc_dist_between_rocket_and_obstacle(self, obstacle: ObstacleParameters):
return self.calc_obstacle_location_relative2rocket(obstacle).magnitude
def calc_angle_from_rocket2obstacle(self, obstacle: ObstacleParameters):
return self.calc_obstacle_location_relative2rocket(obstacle).angle
| true | true |
f7fcf7559948b6752dd0ee377be44bd42c092522 | 351 | py | Python | forest_lite/server/lib/palette.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
] | 6 | 2020-08-05T16:12:57.000Z | 2022-01-06T01:34:19.000Z | forest_lite/server/lib/palette.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
] | 49 | 2020-08-14T13:58:32.000Z | 2021-06-29T11:42:32.000Z | forest_lite/server/lib/palette.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
] | 2 | 2020-12-03T09:24:13.000Z | 2021-04-11T06:10:36.000Z | import bokeh.palettes
def all_palettes():
"""List of palette definitions"""
for name in bokeh.palettes.all_palettes:
for number in bokeh.palettes.all_palettes[name]:
yield {
"name": name,
"number": number,
"palette": bokeh.palettes.all_palettes[name][number]
}
| 27 | 68 | 0.566952 | import bokeh.palettes
def all_palettes():
for name in bokeh.palettes.all_palettes:
for number in bokeh.palettes.all_palettes[name]:
yield {
"name": name,
"number": number,
"palette": bokeh.palettes.all_palettes[name][number]
}
| true | true |
f7fcf941f3cd4af62f3f9eb6cc7c272feecf370e | 841 | py | Python | salt/runners/http.py | dr4Ke/salt | 8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b | [
"Apache-2.0"
] | 1 | 2015-08-20T21:55:17.000Z | 2015-08-20T21:55:17.000Z | salt/runners/http.py | dr4Ke/salt | 8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b | [
"Apache-2.0"
] | null | null | null | salt/runners/http.py | dr4Ke/salt | 8ffa4903c9ed10c81e1a6c7b967dc9532f320c0b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Module for making various web calls. Primarily designed for webhooks and the
like, but also useful for basic http testing.
'''
# Import salt libs
import salt.output
import salt.utils.http
def query(url, output=True, **kwargs):
'''
Query a resource, and decode the return data
CLI Example:
.. code-block:: bash
salt-run http.query http://somelink.com/
salt-run http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt-run http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
'''
if 'node' not in kwargs:
kwargs['node'] = 'master'
ret = salt.utils.http.query(url=url, opts=__opts__, **kwargs)
if output:
salt.output.display_output(ret, '', __opts__)
return ret
| 24.735294 | 76 | 0.631391 |
import salt.output
import salt.utils.http
def query(url, output=True, **kwargs):
if 'node' not in kwargs:
kwargs['node'] = 'master'
ret = salt.utils.http.query(url=url, opts=__opts__, **kwargs)
if output:
salt.output.display_output(ret, '', __opts__)
return ret
| true | true |
f7fcf96d3c14550d50ca53e82769bf24822182de | 2,034 | py | Python | src/z3c/autoinclude/dependency.py | datakurre/z3c.autoinclude | 5e42815357d97d2704bff61f3eb7ed2ddc891907 | [
"ZPL-2.1"
] | null | null | null | src/z3c/autoinclude/dependency.py | datakurre/z3c.autoinclude | 5e42815357d97d2704bff61f3eb7ed2ddc891907 | [
"ZPL-2.1"
] | null | null | null | src/z3c/autoinclude/dependency.py | datakurre/z3c.autoinclude | 5e42815357d97d2704bff61f3eb7ed2ddc891907 | [
"ZPL-2.1"
] | null | null | null | import os
import logging
from zope.dottedname.resolve import resolve
from pkg_resources import resource_exists
from pkg_resources import get_provider
from pkg_resources import get_distribution
from z3c.autoinclude.utils import DistributionManager
from z3c.autoinclude.utils import ZCMLInfo
class DependencyFinder(DistributionManager):
def includableInfo(self, zcml_to_look_for):
"""Return the packages in the dependencies which are includable.
zcml_to_look_for - a list of zcml filenames we are looking for
Returns a dictionary with the include candidates as keys, and lists
of dotted names of packages that contain the include candidates as
values.
"""
result = ZCMLInfo(zcml_to_look_for)
for req in self.context.requires():
dist_manager = DistributionManager(get_provider(req))
for dotted_name in dist_manager.dottedNames():
try:
module = resolve(dotted_name)
except ImportError as exc:
logging.getLogger("z3c.autoinclude").warn(
"resolve(%r) raised import error: %s" % (dotted_name, exc))
continue
for candidate in zcml_to_look_for:
candidate_path = os.path.join(
os.path.dirname(module.__file__), candidate)
if os.path.isfile(candidate_path):
result[candidate].append(dotted_name)
return result
def package_includes(project_name, zcml_filenames=None):
"""
Convenience function for finding zcml to load from requirements for
a given project. Takes a project name. DistributionNotFound errors
will be raised for uninstalled projects.
"""
if zcml_filenames is None:
zcml_filenames = ['meta.zcml', 'configure.zcml', 'overrides.zcml']
dist = get_distribution(project_name)
include_finder = DependencyFinder(dist)
return include_finder.includableInfo(zcml_filenames)
| 41.510204 | 83 | 0.67355 | import os
import logging
from zope.dottedname.resolve import resolve
from pkg_resources import resource_exists
from pkg_resources import get_provider
from pkg_resources import get_distribution
from z3c.autoinclude.utils import DistributionManager
from z3c.autoinclude.utils import ZCMLInfo
class DependencyFinder(DistributionManager):
def includableInfo(self, zcml_to_look_for):
result = ZCMLInfo(zcml_to_look_for)
for req in self.context.requires():
dist_manager = DistributionManager(get_provider(req))
for dotted_name in dist_manager.dottedNames():
try:
module = resolve(dotted_name)
except ImportError as exc:
logging.getLogger("z3c.autoinclude").warn(
"resolve(%r) raised import error: %s" % (dotted_name, exc))
continue
for candidate in zcml_to_look_for:
candidate_path = os.path.join(
os.path.dirname(module.__file__), candidate)
if os.path.isfile(candidate_path):
result[candidate].append(dotted_name)
return result
def package_includes(project_name, zcml_filenames=None):
if zcml_filenames is None:
zcml_filenames = ['meta.zcml', 'configure.zcml', 'overrides.zcml']
dist = get_distribution(project_name)
include_finder = DependencyFinder(dist)
return include_finder.includableInfo(zcml_filenames)
| true | true |
f7fcf99f3451a0205655041b775aba1dff36b6fe | 65,510 | py | Python | discord/state.py | rldnyt/discord.pyc | e9d190b79ce6df798f144d1abea19f863e3fdbb9 | [
"MIT"
] | 23 | 2021-08-28T10:14:19.000Z | 2021-12-24T15:10:58.000Z | discord/state.py | Sengolda/discord.py | 5cd31dd6c2fe1a7b5d9538ff949fc371e92ec26b | [
"MIT"
] | 2 | 2021-08-31T08:16:17.000Z | 2021-08-31T15:21:40.000Z | discord/state.py | Sengolda/discord.py | 5cd31dd6c2fe1a7b5d9538ff949fc371e92ec26b | [
"MIT"
] | 3 | 2021-08-31T07:37:40.000Z | 2021-09-14T11:59:47.000Z | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
from collections import deque, OrderedDict
import copy
import datetime
import itertools
import logging
from typing import (
Dict,
Optional,
TYPE_CHECKING,
Union,
Callable,
Any,
List,
TypeVar,
Coroutine,
Sequence,
Tuple,
Deque,
)
import inspect
import os
from .guild import Guild
from .activity import BaseActivity
from .user import User, ClientUser
from .emoji import Emoji
from .mentions import AllowedMentions
from .partial_emoji import PartialEmoji
from .message import Message
from .channel import *
from .channel import _channel_factory
from .raw_models import *
from .member import Member
from .role import Role
from .enums import ChannelType, try_enum, Status
from . import utils
from .flags import ApplicationFlags, Intents, MemberCacheFlags
from .object import Object
from .invite import Invite
from .integrations import _integration_factory
from .interactions import Interaction
from .ui.view import ViewStore, View
from .stage_instance import StageInstance
from .threads import Thread, ThreadMember
from .sticker import GuildSticker
if TYPE_CHECKING:
from .abc import PrivateChannel
from .message import MessageableChannel
from .guild import GuildChannel, VocalGuildChannel
from .http import HTTPClient
from .voice_client import VoiceProtocol
from .client import Client
from .gateway import DiscordWebSocket
from .types.activity import Activity as ActivityPayload
from .types.channel import DMChannel as DMChannelPayload
from .types.user import User as UserPayload
from .types.emoji import Emoji as EmojiPayload
from .types.sticker import GuildSticker as GuildStickerPayload
from .types.guild import Guild as GuildPayload
from .types.message import Message as MessagePayload
T = TypeVar("T")
CS = TypeVar("CS", bound="ConnectionState")
Channel = Union[GuildChannel, VocalGuildChannel, PrivateChannel, PartialMessageable]
class ChunkRequest:
def __init__(
self,
guild_id: int,
loop: asyncio.AbstractEventLoop,
resolver: Callable[[int], Any],
*,
cache: bool = True,
) -> None:
self.guild_id: int = guild_id
self.resolver: Callable[[int], Any] = resolver
self.loop: asyncio.AbstractEventLoop = loop
self.cache: bool = cache
self.nonce: str = os.urandom(16).hex()
self.buffer: List[Member] = []
self.waiters: List[asyncio.Future[List[Member]]] = []
def add_members(self, members: List[Member]) -> None:
self.buffer.extend(members)
if self.cache:
guild = self.resolver(self.guild_id)
if guild is None:
return
for member in members:
existing = guild.get_member(member.id)
if existing is None or existing.joined_at is None:
guild._add_member(member)
async def wait(self) -> List[Member]:
future = self.loop.create_future()
self.waiters.append(future)
try:
return await future
finally:
self.waiters.remove(future)
def get_future(self) -> asyncio.Future[List[Member]]:
future = self.loop.create_future()
self.waiters.append(future)
return future
def done(self) -> None:
for future in self.waiters:
if not future.done():
future.set_result(self.buffer)
_log = logging.getLogger(__name__)
async def logging_coroutine(
coroutine: Coroutine[Any, Any, T], *, info: str
) -> Optional[T]:
try:
await coroutine
except Exception:
_log.exception("Exception occurred during %s", info)
class ConnectionState:
if TYPE_CHECKING:
_get_websocket: Callable[..., DiscordWebSocket]
_get_client: Callable[..., Client]
_parsers: Dict[str, Callable[[Dict[str, Any]], None]]
def __init__(
self,
*,
dispatch: Callable,
handlers: Dict[str, Callable],
hooks: Dict[str, Callable],
http: HTTPClient,
loop: asyncio.AbstractEventLoop,
**options: Any,
) -> None:
self.loop: asyncio.AbstractEventLoop = loop
self.http: HTTPClient = http
self.max_messages: Optional[int] = options.get("max_messages", 1000)
if self.max_messages is not None and self.max_messages <= 0:
self.max_messages = 1000
self.dispatch: Callable = dispatch
self.handlers: Dict[str, Callable] = handlers
self.hooks: Dict[str, Callable] = hooks
self.shard_count: Optional[int] = None
self._ready_task: Optional[asyncio.Task] = None
self.application_id: Optional[int] = utils._get_as_snowflake(
options, "application_id"
)
self.heartbeat_timeout: float = options.get("heartbeat_timeout", 60.0)
self.guild_ready_timeout: float = options.get("guild_ready_timeout", 2.0)
if self.guild_ready_timeout < 0:
raise ValueError("guild_ready_timeout cannot be negative")
allowed_mentions = options.get("allowed_mentions")
if allowed_mentions is not None and not isinstance(
allowed_mentions, AllowedMentions
):
raise TypeError("allowed_mentions parameter must be AllowedMentions")
self.allowed_mentions: Optional[AllowedMentions] = allowed_mentions
self._chunk_requests: Dict[Union[int, str], ChunkRequest] = {}
activity = options.get("activity", None)
if activity:
if not isinstance(activity, BaseActivity):
raise TypeError("activity parameter must derive from BaseActivity.")
activity = activity.to_dict()
status = options.get("status", None)
if status:
if status is Status.offline:
status = "invisible"
else:
status = str(status)
intents = options.get("intents", None)
if intents is not None:
if not isinstance(intents, Intents):
raise TypeError(
f"intents parameter must be Intent not {type(intents)!r}"
)
else:
intents = Intents.default()
if not intents.guilds:
_log.warning(
"Guilds intent seems to be disabled. This may cause state related issues."
)
self._chunk_guilds: bool = options.get(
"chunk_guilds_at_startup", intents.members
)
# Ensure these two are set properly
if not intents.members and self._chunk_guilds:
raise ValueError(
"Intents.members must be enabled to chunk guilds at startup."
)
cache_flags = options.get("member_cache_flags", None)
if cache_flags is None:
cache_flags = MemberCacheFlags.from_intents(intents)
else:
if not isinstance(cache_flags, MemberCacheFlags):
raise TypeError(
f"member_cache_flags parameter must be MemberCacheFlags not {type(cache_flags)!r}"
)
cache_flags._verify_intents(intents)
self.member_cache_flags: MemberCacheFlags = cache_flags
self._activity: Optional[ActivityPayload] = activity
self._status: Optional[str] = status
self._intents: Intents = intents
if not intents.members or cache_flags._empty:
self.store_user = self.create_user # type: ignore
self.deref_user = self.deref_user_no_intents # type: ignore
self.parsers = parsers = {}
for attr, func in inspect.getmembers(self):
if attr.startswith("parse_"):
parsers[attr[6:].upper()] = func
self.clear()
def clear(self, *, views: bool = True) -> None:
self.user: Optional[ClientUser] = None
# Originally, this code used WeakValueDictionary to maintain references to the
# global user mapping.
# However, profiling showed that this came with two cons:
# 1. The __weakref__ slot caused a non-trivial increase in memory
# 2. The performance of the mapping caused store_user to be a bottleneck.
# Since this is undesirable, a mapping is now used instead with stored
# references now using a regular dictionary with eviction being done
# using __del__. Testing this for memory leaks led to no discernable leaks,
# though more testing will have to be done.
self._users: Dict[int, User] = {}
self._emojis: Dict[int, Emoji] = {}
self._stickers: Dict[int, GuildSticker] = {}
self._guilds: Dict[int, Guild] = {}
if views:
self._view_store: ViewStore = ViewStore(self)
self._voice_clients: Dict[int, VoiceProtocol] = {}
# LRU of max size 128
self._private_channels: OrderedDict[int, PrivateChannel] = OrderedDict()
# extra dict to look up private channels by user id
self._private_channels_by_user: Dict[int, DMChannel] = {}
if self.max_messages is not None:
self._messages: Optional[Deque[Message]] = deque(maxlen=self.max_messages)
else:
self._messages: Optional[Deque[Message]] = None
def process_chunk_requests(
self, guild_id: int, nonce: Optional[str], members: List[Member], complete: bool
) -> None:
removed = []
for key, request in self._chunk_requests.items():
if request.guild_id == guild_id and request.nonce == nonce:
request.add_members(members)
if complete:
request.done()
removed.append(key)
for key in removed:
del self._chunk_requests[key]
def call_handlers(self, key: str, *args: Any, **kwargs: Any) -> None:
try:
func = self.handlers[key]
except KeyError:
pass
else:
func(*args, **kwargs)
async def call_hooks(self, key: str, *args: Any, **kwargs: Any) -> None:
try:
coro = self.hooks[key]
except KeyError:
pass
else:
await coro(*args, **kwargs)
@property
def self_id(self) -> Optional[int]:
u = self.user
return u.id if u else None
@property
def intents(self) -> Intents:
ret = Intents.none()
ret.value = self._intents.value
return ret
@property
def voice_clients(self) -> List[VoiceProtocol]:
return list(self._voice_clients.values())
def _get_voice_client(self, guild_id: Optional[int]) -> Optional[VoiceProtocol]:
# the keys of self._voice_clients are ints
return self._voice_clients.get(guild_id) # type: ignore
def _add_voice_client(self, guild_id: int, voice: VoiceProtocol) -> None:
self._voice_clients[guild_id] = voice
def _remove_voice_client(self, guild_id: int) -> None:
self._voice_clients.pop(guild_id, None)
def _update_references(self, ws: DiscordWebSocket) -> None:
for vc in self.voice_clients:
vc.main_ws = ws # type: ignore
def store_user(self, data: UserPayload) -> User:
user_id = int(data["id"])
try:
return self._users[user_id]
except KeyError:
user = User(state=self, data=data)
if user.discriminator != "0000":
self._users[user_id] = user
user._stored = True
return user
def deref_user(self, user_id: int) -> None:
self._users.pop(user_id, None)
def create_user(self, data: UserPayload) -> User:
return User(state=self, data=data)
def deref_user_no_intents(self, user_id: int) -> None:
return
def get_user(self, id: Optional[int]) -> Optional[User]:
# the keys of self._users are ints
return self._users.get(id) # type: ignore
def store_emoji(self, guild: Guild, data: EmojiPayload) -> Emoji:
# the id will be present here
emoji_id = int(data["id"]) # type: ignore
self._emojis[emoji_id] = emoji = Emoji(guild=guild, state=self, data=data)
return emoji
def store_sticker(self, guild: Guild, data: GuildStickerPayload) -> GuildSticker:
sticker_id = int(data["id"])
self._stickers[sticker_id] = sticker = GuildSticker(state=self, data=data)
return sticker
def store_view(self, view: View, message_id: Optional[int] = None) -> None:
self._view_store.add_view(view, message_id)
def prevent_view_updates_for(self, message_id: int) -> Optional[View]:
return self._view_store.remove_message_tracking(message_id)
@property
def persistent_views(self) -> Sequence[View]:
return self._view_store.persistent_views
@property
def guilds(self) -> List[Guild]:
return list(self._guilds.values())
def _get_guild(self, guild_id: Optional[int]) -> Optional[Guild]:
# the keys of self._guilds are ints
return self._guilds.get(guild_id) # type: ignore
def _add_guild(self, guild: Guild) -> None:
self._guilds[guild.id] = guild
def _remove_guild(self, guild: Guild) -> None:
self._guilds.pop(guild.id, None)
for emoji in guild.emojis:
self._emojis.pop(emoji.id, None)
for sticker in guild.stickers:
self._stickers.pop(sticker.id, None)
del guild
@property
def emojis(self) -> List[Emoji]:
return list(self._emojis.values())
@property
def stickers(self) -> List[GuildSticker]:
return list(self._stickers.values())
def get_emoji(self, emoji_id: Optional[int]) -> Optional[Emoji]:
# the keys of self._emojis are ints
return self._emojis.get(emoji_id) # type: ignore
def get_sticker(self, sticker_id: Optional[int]) -> Optional[GuildSticker]:
# the keys of self._stickers are ints
return self._stickers.get(sticker_id) # type: ignore
@property
def private_channels(self) -> List[PrivateChannel]:
return list(self._private_channels.values())
def _get_private_channel(
self, channel_id: Optional[int]
) -> Optional[PrivateChannel]:
try:
# the keys of self._private_channels are ints
value = self._private_channels[channel_id] # type: ignore
except KeyError:
return None
else:
self._private_channels.move_to_end(channel_id) # type: ignore
return value
def _get_private_channel_by_user(
self, user_id: Optional[int]
) -> Optional[DMChannel]:
# the keys of self._private_channels are ints
return self._private_channels_by_user.get(user_id) # type: ignore
def _add_private_channel(self, channel: PrivateChannel) -> None:
channel_id = channel.id
self._private_channels[channel_id] = channel
if len(self._private_channels) > 128:
_, to_remove = self._private_channels.popitem(last=False)
if isinstance(to_remove, DMChannel) and to_remove.recipient:
self._private_channels_by_user.pop(to_remove.recipient.id, None)
if isinstance(channel, DMChannel) and channel.recipient:
self._private_channels_by_user[channel.recipient.id] = channel
def add_dm_channel(self, data: DMChannelPayload) -> DMChannel:
# self.user is *always* cached when this is called
channel = DMChannel(me=self.user, state=self, data=data) # type: ignore
self._add_private_channel(channel)
return channel
def _remove_private_channel(self, channel: PrivateChannel) -> None:
self._private_channels.pop(channel.id, None)
if isinstance(channel, DMChannel):
recipient = channel.recipient
if recipient is not None:
self._private_channels_by_user.pop(recipient.id, None)
def _get_message(self, msg_id: Optional[int]) -> Optional[Message]:
return (
utils.find(lambda m: m.id == msg_id, reversed(self._messages))
if self._messages
else None
)
def _add_guild_from_data(self, data: GuildPayload) -> Guild:
guild = Guild(data=data, state=self)
self._add_guild(guild)
return guild
def _guild_needs_chunking(self, guild: Guild) -> bool:
# If presences are enabled then we get back the old guild.large behaviour
return (
self._chunk_guilds
and not guild.chunked
and not (self._intents.presences and not guild.large)
)
def _get_guild_channel(
self, data: MessagePayload
) -> Tuple[Union[Channel, Thread], Optional[Guild]]:
channel_id = int(data["channel_id"])
try:
guild = self._get_guild(int(data["guild_id"]))
except KeyError:
channel = DMChannel._from_message(self, channel_id)
guild = None
else:
channel = guild and guild._resolve_channel(channel_id)
return channel or PartialMessageable(state=self, id=channel_id), guild
async def chunker(
self,
guild_id: int,
query: str = "",
limit: int = 0,
presences: bool = False,
*,
nonce: Optional[str] = None,
) -> None:
ws = self._get_websocket(guild_id) # This is ignored upstream
await ws.request_chunks(
guild_id, query=query, limit=limit, presences=presences, nonce=nonce
)
async def query_members(
self,
guild: Guild,
query: str,
limit: int,
user_ids: List[int],
cache: bool,
presences: bool,
):
guild_id = guild.id
ws = self._get_websocket(guild_id)
if ws is None:
raise RuntimeError("Somehow do not have a websocket for this guild_id")
request = ChunkRequest(guild.id, self.loop, self._get_guild, cache=cache)
self._chunk_requests[request.nonce] = request
try:
# start the query operation
await ws.request_chunks(
guild_id,
query=query,
limit=limit,
user_ids=user_ids,
presences=presences,
nonce=request.nonce,
)
return await asyncio.wait_for(request.wait(), timeout=30.0)
except asyncio.TimeoutError:
_log.warning(
"Timed out waiting for chunks with query %r and limit %d for guild_id %d",
query,
limit,
guild_id,
)
raise
async def _delay_ready(self) -> None:
try:
states = []
while True:
# this snippet of code is basically waiting N seconds
# until the last GUILD_CREATE was sent
try:
guild = await asyncio.wait_for(
self._ready_state.get(), timeout=self.guild_ready_timeout
)
except asyncio.TimeoutError:
break
else:
if self._guild_needs_chunking(guild):
future = await self.chunk_guild(guild, wait=False)
states.append((guild, future))
else:
if guild.unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
for guild, future in states:
try:
await asyncio.wait_for(future, timeout=5.0)
except asyncio.TimeoutError:
_log.warning(
"Shard ID %s timed out waiting for chunks for guild_id %s.",
guild.shard_id,
guild.id,
)
if guild.unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
except asyncio.CancelledError:
pass
else:
# dispatch the event
self.call_handlers("ready")
self.dispatch("ready")
finally:
self._ready_task = None
def parse_ready(self, data) -> None:
if self._ready_task is not None:
self._ready_task.cancel()
self._ready_state = asyncio.Queue()
self.clear(views=False)
self.user = ClientUser(state=self, data=data["user"])
self.store_user(data["user"])
if self.application_id is None:
try:
application = data["application"]
except KeyError:
pass
else:
self.application_id = utils._get_as_snowflake(application, "id")
# flags will always be present here
self.application_flags = ApplicationFlags._from_value(application["flags"]) # type: ignore
for guild_data in data["guilds"]:
self._add_guild_from_data(guild_data)
self.dispatch("connect")
self._ready_task = asyncio.create_task(self._delay_ready())
def parse_resumed(self, data) -> None:
self.dispatch("resumed")
def parse_message_create(self, data) -> None:
channel, _ = self._get_guild_channel(data)
# channel would be the correct type here
message = Message(channel=channel, data=data, state=self) # type: ignore
self.dispatch("message", message)
if self._messages is not None:
self._messages.append(message)
# we ensure that the channel is either a TextChannel or Thread
if channel and channel.__class__ in (TextChannel, Thread):
channel.last_message_id = message.id # type: ignore
def parse_message_delete(self, data) -> None:
raw = RawMessageDeleteEvent(data)
found = self._get_message(raw.message_id)
raw.cached_message = found
self.dispatch("raw_message_delete", raw)
if self._messages is not None and found is not None:
self.dispatch("message_delete", found)
self._messages.remove(found)
def parse_message_delete_bulk(self, data) -> None:
raw = RawBulkMessageDeleteEvent(data)
if self._messages:
found_messages = [
message for message in self._messages if message.id in raw.message_ids
]
else:
found_messages = []
raw.cached_messages = found_messages
self.dispatch("raw_bulk_message_delete", raw)
if found_messages:
self.dispatch("bulk_message_delete", found_messages)
for msg in found_messages:
# self._messages won't be None here
self._messages.remove(msg) # type: ignore
def parse_message_update(self, data) -> None:
raw = RawMessageUpdateEvent(data)
message = self._get_message(raw.message_id)
if message is not None:
older_message = copy.copy(message)
raw.cached_message = older_message
self.dispatch("raw_message_edit", raw)
message._update(data)
# Coerce the `after` parameter to take the new updated Member
# ref: #5999
older_message.author = message.author
self.dispatch("message_edit", older_message, message)
else:
self.dispatch("raw_message_edit", raw)
if "components" in data and self._view_store.is_message_tracked(raw.message_id):
self._view_store.update_from_message(raw.message_id, data["components"])
def parse_message_reaction_add(self, data) -> None:
emoji = data["emoji"]
emoji_id = utils._get_as_snowflake(emoji, "id")
emoji = PartialEmoji.with_state(
self, id=emoji_id, animated=emoji.get("animated", False), name=emoji["name"]
)
raw = RawReactionActionEvent(data, emoji, "REACTION_ADD")
member_data = data.get("member")
if member_data:
guild = self._get_guild(raw.guild_id)
if guild is not None:
raw.member = Member(data=member_data, guild=guild, state=self)
else:
raw.member = None
else:
raw.member = None
self.dispatch("raw_reaction_add", raw)
# rich interface here
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
reaction = message._add_reaction(data, emoji, raw.user_id)
user = raw.member or self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch("reaction_add", reaction, user)
def parse_message_reaction_remove_all(self, data) -> None:
raw = RawReactionClearEvent(data)
self.dispatch("raw_reaction_clear", raw)
message = self._get_message(raw.message_id)
if message is not None:
old_reactions = message.reactions.copy()
message.reactions.clear()
self.dispatch("reaction_clear", message, old_reactions)
def parse_message_reaction_remove(self, data) -> None:
emoji = data["emoji"]
emoji_id = utils._get_as_snowflake(emoji, "id")
emoji = PartialEmoji.with_state(self, id=emoji_id, name=emoji["name"])
raw = RawReactionActionEvent(data, emoji, "REACTION_REMOVE")
self.dispatch("raw_reaction_remove", raw)
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
try:
reaction = message._remove_reaction(data, emoji, raw.user_id)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
user = self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch("reaction_remove", reaction, user)
def parse_message_reaction_remove_emoji(self, data) -> None:
emoji = data["emoji"]
emoji_id = utils._get_as_snowflake(emoji, "id")
emoji = PartialEmoji.with_state(self, id=emoji_id, name=emoji["name"])
raw = RawReactionClearEmojiEvent(data, emoji)
self.dispatch("raw_reaction_clear_emoji", raw)
message = self._get_message(raw.message_id)
if message is not None:
try:
reaction = message._clear_emoji(emoji)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
if reaction:
self.dispatch("reaction_clear_emoji", reaction)
def parse_interaction_create(self, data) -> None:
interaction = Interaction(data=data, state=self)
if data["type"] == 3: # interaction component
custom_id = interaction.data["custom_id"] # type: ignore
component_type = interaction.data["component_type"] # type: ignore
self._view_store.dispatch(component_type, custom_id, interaction)
self.dispatch("interaction", interaction)
def parse_presence_update(self, data) -> None:
guild_id = utils._get_as_snowflake(data, "guild_id")
# guild_id won't be None here
guild = self._get_guild(guild_id)
if guild is None:
_log.debug(
"PRESENCE_UPDATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
return
user = data["user"]
member_id = int(user["id"])
member = guild.get_member(member_id)
if member is None:
_log.debug(
"PRESENCE_UPDATE referencing an unknown member ID: %s. Discarding",
member_id,
)
return
old_member = Member._copy(member)
user_update = member._presence_update(data=data, user=user)
if user_update:
self.dispatch("user_update", user_update[0], user_update[1])
self.dispatch("presence_update", old_member, member)
def parse_user_update(self, data) -> None:
# self.user is *always* cached when this is called
user: ClientUser = self.user # type: ignore
user._update(data)
ref = self._users.get(user.id)
if ref:
ref._update(data)
def parse_invite_create(self, data) -> None:
invite = Invite.from_gateway(state=self, data=data)
self.dispatch("invite_create", invite)
def parse_invite_delete(self, data) -> None:
invite = Invite.from_gateway(state=self, data=data)
self.dispatch("invite_delete", invite)
def parse_channel_delete(self, data) -> None:
guild = self._get_guild(utils._get_as_snowflake(data, "guild_id"))
channel_id = int(data["id"])
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
guild._remove_channel(channel)
self.dispatch("guild_channel_delete", channel)
def parse_channel_update(self, data) -> None:
channel_type = try_enum(ChannelType, data.get("type"))
channel_id = int(data["id"])
if channel_type is ChannelType.group:
channel = self._get_private_channel(channel_id)
old_channel = copy.copy(channel)
# the channel is a GroupChannel
channel._update_group(data) # type: ignore
self.dispatch("private_channel_update", old_channel, channel)
return
guild_id = utils._get_as_snowflake(data, "guild_id")
guild = self._get_guild(guild_id)
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
old_channel = copy.copy(channel)
channel._update(guild, data)
self.dispatch("guild_channel_update", old_channel, channel)
else:
_log.debug(
"CHANNEL_UPDATE referencing an unknown channel ID: %s. Discarding.",
channel_id,
)
else:
_log.debug(
"CHANNEL_UPDATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
def parse_channel_create(self, data) -> None:
factory, ch_type = _channel_factory(data["type"])
if factory is None:
_log.debug(
"CHANNEL_CREATE referencing an unknown channel type %s. Discarding.",
data["type"],
)
return
guild_id = utils._get_as_snowflake(data, "guild_id")
guild = self._get_guild(guild_id)
if guild is not None:
# the factory can't be a DMChannel or GroupChannel here
channel = factory(guild=guild, state=self, data=data) # type: ignore
guild._add_channel(channel) # type: ignore
self.dispatch("guild_channel_create", channel)
else:
_log.debug(
"CHANNEL_CREATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
return
def parse_channel_pins_update(self, data) -> None:
channel_id = int(data["channel_id"])
try:
guild = self._get_guild(int(data["guild_id"]))
except KeyError:
guild = None
channel = self._get_private_channel(channel_id)
else:
channel = guild and guild._resolve_channel(channel_id)
if channel is None:
_log.debug(
"CHANNEL_PINS_UPDATE referencing an unknown channel ID: %s. Discarding.",
channel_id,
)
return
last_pin = (
utils.parse_time(data["last_pin_timestamp"])
if data["last_pin_timestamp"]
else None
)
if guild is None:
self.dispatch("private_channel_pins_update", channel, last_pin)
else:
self.dispatch("guild_channel_pins_update", channel, last_pin)
def parse_thread_create(self, data) -> None:
guild_id = int(data["guild_id"])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_CREATE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread = Thread(guild=guild, state=guild._state, data=data)
has_thread = guild.get_thread(thread.id)
guild._add_thread(thread)
if not has_thread:
self.dispatch("thread_join", thread)
def parse_thread_update(self, data) -> None:
guild_id = int(data["guild_id"])
guild = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_UPDATE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread_id = int(data["id"])
thread = guild.get_thread(thread_id)
if thread is not None:
old = copy.copy(thread)
thread._update(data)
self.dispatch("thread_update", old, thread)
else:
thread = Thread(guild=guild, state=guild._state, data=data)
guild._add_thread(thread)
self.dispatch("thread_join", thread)
def parse_thread_delete(self, data) -> None:
guild_id = int(data["guild_id"])
guild = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_DELETE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread_id = int(data["id"])
thread = guild.get_thread(thread_id)
if thread is not None:
guild._remove_thread(thread) # type: ignore
self.dispatch("thread_delete", thread)
def parse_thread_list_sync(self, data) -> None:
guild_id = int(data["guild_id"])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_LIST_SYNC referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
try:
channel_ids = set(data["channel_ids"])
except KeyError:
# If not provided, then the entire guild is being synced
# So all previous thread data should be overwritten
previous_threads = guild._threads.copy()
guild._clear_threads()
else:
previous_threads = guild._filter_threads(channel_ids)
threads = {d["id"]: guild._store_thread(d) for d in data.get("threads", [])}
for member in data.get("members", []):
try:
# note: member['id'] is the thread_id
thread = threads[member["id"]]
except KeyError:
continue
else:
thread._add_member(ThreadMember(thread, member))
for thread in threads.values():
old = previous_threads.pop(thread.id, None)
if old is None:
self.dispatch("thread_join", thread)
for thread in previous_threads.values():
self.dispatch("thread_remove", thread)
def parse_thread_member_update(self, data) -> None:
guild_id = int(data["guild_id"])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread_id = int(data["id"])
thread: Optional[Thread] = guild.get_thread(thread_id)
if thread is None:
_log.debug(
"THREAD_MEMBER_UPDATE referencing an unknown thread ID: %s. Discarding",
thread_id,
)
return
member = ThreadMember(thread, data)
thread.me = member
def parse_thread_members_update(self, data) -> None:
guild_id = int(data["guild_id"])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_MEMBERS_UPDATE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread_id = int(data["id"])
thread: Optional[Thread] = guild.get_thread(thread_id)
if thread is None:
_log.debug(
"THREAD_MEMBERS_UPDATE referencing an unknown thread ID: %s. Discarding",
thread_id,
)
return
added_members = [ThreadMember(thread, d) for d in data.get("added_members", [])]
removed_member_ids = [int(x) for x in data.get("removed_member_ids", [])]
self_id = self.self_id
for member in added_members:
if member.id != self_id:
thread._add_member(member)
self.dispatch("thread_member_join", member)
else:
thread.me = member
self.dispatch("thread_join", thread)
for member_id in removed_member_ids:
if member_id != self_id:
member = thread._pop_member(member_id)
if member is not None:
self.dispatch("thread_member_remove", member)
else:
self.dispatch("thread_remove", thread)
def parse_guild_member_add(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"GUILD_MEMBER_ADD referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
member = Member(guild=guild, data=data, state=self)
if self.member_cache_flags.joined:
guild._add_member(member)
try:
guild._member_count += 1
except AttributeError:
pass
self.dispatch("member_join", member)
def parse_guild_member_remove(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
try:
guild._member_count -= 1
except AttributeError:
pass
user_id = int(data["user"]["id"])
member = guild.get_member(user_id)
if member is not None:
guild._remove_member(member) # type: ignore
self.dispatch("member_remove", member)
else:
_log.debug(
"GUILD_MEMBER_REMOVE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_guild_member_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
user = data["user"]
user_id = int(user["id"])
if guild is None:
_log.debug(
"GUILD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
member = guild.get_member(user_id)
if member is not None:
old_member = Member._copy(member)
member._update(data)
user_update = member._update_inner_user(user)
if user_update:
self.dispatch("user_update", user_update[0], user_update[1])
self.dispatch("member_update", old_member, member)
else:
if self.member_cache_flags.joined:
member = Member(data=data, guild=guild, state=self)
# Force an update on the inner user if necessary
user_update = member._update_inner_user(user)
if user_update:
self.dispatch("user_update", user_update[0], user_update[1])
guild._add_member(member)
_log.debug(
"GUILD_MEMBER_UPDATE referencing an unknown member ID: %s. Discarding.",
user_id,
)
def parse_guild_emojis_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"GUILD_EMOJIS_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
before_emojis = guild.emojis
for emoji in before_emojis:
self._emojis.pop(emoji.id, None)
# guild won't be None here
guild.emojis = tuple(map(lambda d: self.store_emoji(guild, d), data["emojis"])) # type: ignore
self.dispatch("guild_emojis_update", guild, before_emojis, guild.emojis)
def parse_guild_stickers_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"GUILD_STICKERS_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
before_stickers = guild.stickers
for emoji in before_stickers:
self._stickers.pop(emoji.id, None)
# guild won't be None here
guild.stickers = tuple(map(lambda d: self.store_sticker(guild, d), data["stickers"])) # type: ignore
self.dispatch("guild_stickers_update", guild, before_stickers, guild.stickers)
def _get_create_guild(self, data):
if data.get("unavailable") is False:
# GUILD_CREATE with unavailable in the response
# usually means that the guild has become available
# and is therefore in the cache
guild = self._get_guild(int(data["id"]))
if guild is not None:
guild.unavailable = False
guild._from_data(data)
return guild
return self._add_guild_from_data(data)
def is_guild_evicted(self, guild) -> bool:
return guild.id not in self._guilds
async def chunk_guild(self, guild, *, wait=True, cache=None):
cache = cache or self.member_cache_flags.joined
request = self._chunk_requests.get(guild.id)
if request is None:
self._chunk_requests[guild.id] = request = ChunkRequest(
guild.id, self.loop, self._get_guild, cache=cache
)
await self.chunker(guild.id, nonce=request.nonce)
if wait:
return await request.wait()
return request.get_future()
async def _chunk_and_dispatch(self, guild, unavailable):
try:
await asyncio.wait_for(self.chunk_guild(guild), timeout=60.0)
except asyncio.TimeoutError:
_log.info("Somehow timed out waiting for chunks.")
if unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
def parse_guild_create(self, data) -> None:
unavailable = data.get("unavailable")
if unavailable is True:
# joined a guild with unavailable == True so..
return
guild = self._get_create_guild(data)
try:
# Notify the on_ready state, if any, that this guild is complete.
self._ready_state.put_nowait(guild)
except AttributeError:
pass
else:
# If we're waiting for the event, put the rest on hold
return
# check if it requires chunking
if self._guild_needs_chunking(guild):
asyncio.create_task(self._chunk_and_dispatch(guild, unavailable))
return
# Dispatch available if newly available
if unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
def parse_guild_update(self, data) -> None:
guild = self._get_guild(int(data["id"]))
if guild is not None:
old_guild = copy.copy(guild)
guild._from_data(data)
self.dispatch("guild_update", old_guild, guild)
else:
_log.debug(
"GUILD_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["id"],
)
def parse_guild_delete(self, data) -> None:
guild = self._get_guild(int(data["id"]))
if guild is None:
_log.debug(
"GUILD_DELETE referencing an unknown guild ID: %s. Discarding.",
data["id"],
)
return
if data.get("unavailable", False):
# GUILD_DELETE with unavailable being True means that the
# guild that was available is now currently unavailable
guild.unavailable = True
self.dispatch("guild_unavailable", guild)
return
# do a cleanup of the messages cache
if self._messages is not None:
self._messages: Optional[Deque[Message]] = deque(
(msg for msg in self._messages if msg.guild != guild),
maxlen=self.max_messages,
)
self._remove_guild(guild)
self.dispatch("guild_remove", guild)
def parse_guild_ban_add(self, data) -> None:
# we make the assumption that GUILD_BAN_ADD is done
# before GUILD_MEMBER_REMOVE is called
# hence we don't remove it from cache or do anything
# strange with it, the main purpose of this event
# is mainly to dispatch to another event worth listening to for logging
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
try:
user = User(data=data["user"], state=self)
except KeyError:
pass
else:
member = guild.get_member(user.id) or user
self.dispatch("member_ban", guild, member)
def parse_guild_ban_remove(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None and "user" in data:
user = self.store_user(data["user"])
self.dispatch("member_unban", guild, user)
def parse_guild_role_create(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"GUILD_ROLE_CREATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
role_data = data["role"]
role = Role(guild=guild, data=role_data, state=self)
guild._add_role(role)
self.dispatch("guild_role_create", role)
def parse_guild_role_delete(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
role_id = int(data["role_id"])
try:
role = guild._remove_role(role_id)
except KeyError:
return
else:
self.dispatch("guild_role_delete", role)
else:
_log.debug(
"GUILD_ROLE_DELETE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_guild_role_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
role_data = data["role"]
role_id = int(role_data["id"])
role = guild.get_role(role_id)
if role is not None:
old_role = copy.copy(role)
role._update(role_data)
self.dispatch("guild_role_update", old_role, role)
else:
_log.debug(
"GUILD_ROLE_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_guild_members_chunk(self, data) -> None:
guild_id = int(data["guild_id"])
guild = self._get_guild(guild_id)
presences = data.get("presences", [])
# the guild won't be None here
members = [Member(guild=guild, data=member, state=self) for member in data.get("members", [])] # type: ignore
_log.debug(
"Processed a chunk for %s members in guild ID %s.", len(members), guild_id
)
if presences:
member_dict = {str(member.id): member for member in members}
for presence in presences:
user = presence["user"]
member_id = user["id"]
member = member_dict.get(member_id)
if member is not None:
member._presence_update(presence, user)
complete = data.get("chunk_index", 0) + 1 == data.get("chunk_count")
self.process_chunk_requests(guild_id, data.get("nonce"), members, complete)
def parse_guild_integrations_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
self.dispatch("guild_integrations_update", guild)
else:
_log.debug(
"GUILD_INTEGRATIONS_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_integration_create(self, data) -> None:
guild_id = int(data.pop("guild_id"))
guild = self._get_guild(guild_id)
if guild is not None:
cls, _ = _integration_factory(data["type"])
integration = cls(data=data, guild=guild)
self.dispatch("integration_create", integration)
else:
_log.debug(
"INTEGRATION_CREATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
def parse_integration_update(self, data) -> None:
guild_id = int(data.pop("guild_id"))
guild = self._get_guild(guild_id)
if guild is not None:
cls, _ = _integration_factory(data["type"])
integration = cls(data=data, guild=guild)
self.dispatch("integration_update", integration)
else:
_log.debug(
"INTEGRATION_UPDATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
def parse_integration_delete(self, data) -> None:
guild_id = int(data["guild_id"])
guild = self._get_guild(guild_id)
if guild is not None:
raw = RawIntegrationDeleteEvent(data)
self.dispatch("raw_integration_delete", raw)
else:
_log.debug(
"INTEGRATION_DELETE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
def parse_webhooks_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"WEBHOOKS_UPDATE referencing an unknown guild ID: %s. Discarding",
data["guild_id"],
)
return
channel = guild.get_channel(int(data["channel_id"]))
if channel is not None:
self.dispatch("webhooks_update", channel)
else:
_log.debug(
"WEBHOOKS_UPDATE referencing an unknown channel ID: %s. Discarding.",
data["channel_id"],
)
def parse_stage_instance_create(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
stage_instance = StageInstance(guild=guild, state=self, data=data)
guild._stage_instances[stage_instance.id] = stage_instance
self.dispatch("stage_instance_create", stage_instance)
else:
_log.debug(
"STAGE_INSTANCE_CREATE referencing unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_stage_instance_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
stage_instance = guild._stage_instances.get(int(data["id"]))
if stage_instance is not None:
old_stage_instance = copy.copy(stage_instance)
stage_instance._update(data)
self.dispatch(
"stage_instance_update", old_stage_instance, stage_instance
)
else:
_log.debug(
"STAGE_INSTANCE_UPDATE referencing unknown stage instance ID: %s. Discarding.",
data["id"],
)
else:
_log.debug(
"STAGE_INSTANCE_UPDATE referencing unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_stage_instance_delete(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
try:
stage_instance = guild._stage_instances.pop(int(data["id"]))
except KeyError:
pass
else:
self.dispatch("stage_instance_delete", stage_instance)
else:
_log.debug(
"STAGE_INSTANCE_DELETE referencing unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_voice_state_update(self, data) -> None:
guild = self._get_guild(utils._get_as_snowflake(data, "guild_id"))
channel_id = utils._get_as_snowflake(data, "channel_id")
flags = self.member_cache_flags
# self.user is *always* cached when this is called
self_id = self.user.id # type: ignore
if guild is not None:
if int(data["user_id"]) == self_id:
voice = self._get_voice_client(guild.id)
if voice is not None:
coro = voice.on_voice_state_update(data)
asyncio.create_task(
logging_coroutine(
coro, info="Voice Protocol voice state update handler"
)
)
member, before, after = guild._update_voice_state(data, channel_id) # type: ignore
if member is not None:
if flags.voice:
if (
channel_id is None
and flags._voice_only
and member.id != self_id
):
# Only remove from cache if we only have the voice flag enabled
# Member doesn't meet the Snowflake protocol currently
guild._remove_member(member) # type: ignore
elif channel_id is not None:
guild._add_member(member)
self.dispatch("voice_state_update", member, before, after)
else:
_log.debug(
"VOICE_STATE_UPDATE referencing an unknown member ID: %s. Discarding.",
data["user_id"],
)
def parse_voice_server_update(self, data) -> None:
try:
key_id = int(data["guild_id"])
except KeyError:
key_id = int(data["channel_id"])
vc = self._get_voice_client(key_id)
if vc is not None:
coro = vc.on_voice_server_update(data)
asyncio.create_task(
logging_coroutine(
coro, info="Voice Protocol voice server update handler"
)
)
def parse_typing_start(self, data) -> None:
channel, guild = self._get_guild_channel(data)
if channel is not None:
member = None
user_id = utils._get_as_snowflake(data, "user_id")
if isinstance(channel, DMChannel):
member = channel.recipient
elif isinstance(channel, (Thread, TextChannel)) and guild is not None:
# user_id won't be None
member = guild.get_member(user_id) # type: ignore
if member is None:
member_data = data.get("member")
if member_data:
member = Member(data=member_data, state=self, guild=guild)
elif isinstance(channel, GroupChannel):
member = utils.find(lambda x: x.id == user_id, channel.recipients)
if member is not None:
timestamp = datetime.datetime.fromtimestamp(
data.get("timestamp"), tz=datetime.timezone.utc
)
self.dispatch("typing", channel, member, timestamp)
def _get_reaction_user(
self, channel: MessageableChannel, user_id: int
) -> Optional[Union[User, Member]]:
if isinstance(channel, TextChannel):
return channel.guild.get_member(user_id)
return self.get_user(user_id)
def get_reaction_emoji(self, data) -> Union[Emoji, PartialEmoji]:
emoji_id = utils._get_as_snowflake(data, "id")
if not emoji_id:
return data["name"]
try:
return self._emojis[emoji_id]
except KeyError:
return PartialEmoji.with_state(
self,
animated=data.get("animated", False),
id=emoji_id,
name=data["name"],
)
def _upgrade_partial_emoji(
self, emoji: PartialEmoji
) -> Union[Emoji, PartialEmoji, str]:
emoji_id = emoji.id
if not emoji_id:
return emoji.name
try:
return self._emojis[emoji_id]
except KeyError:
return emoji
def get_channel(self, id: Optional[int]) -> Optional[Union[Channel, Thread]]:
if id is None:
return None
pm = self._get_private_channel(id)
if pm is not None:
return pm
for guild in self.guilds:
channel = guild._resolve_channel(id)
if channel is not None:
return channel
def create_message(
self,
*,
channel: Union[
TextChannel, Thread, DMChannel, GroupChannel, PartialMessageable
],
data: MessagePayload,
) -> Message:
return Message(state=self, channel=channel, data=data)
class AutoShardedConnectionState(ConnectionState):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.shard_ids: Union[List[int], range] = []
self.shards_launched: asyncio.Event = asyncio.Event()
def _update_message_references(self) -> None:
# self._messages won't be None when this is called
for msg in self._messages: # type: ignore
if not msg.guild:
continue
new_guild = self._get_guild(msg.guild.id)
if new_guild is not None and new_guild is not msg.guild:
channel_id = msg.channel.id
channel = new_guild._resolve_channel(channel_id) or Object(
id=channel_id
)
# channel will either be a TextChannel, Thread or Object
msg._rebind_cached_references(new_guild, channel) # type: ignore
async def chunker(
self,
guild_id: int,
query: str = "",
limit: int = 0,
presences: bool = False,
*,
shard_id: Optional[int] = None,
nonce: Optional[str] = None,
) -> None:
ws = self._get_websocket(guild_id, shard_id=shard_id)
await ws.request_chunks(
guild_id, query=query, limit=limit, presences=presences, nonce=nonce
)
async def _delay_ready(self) -> None:
await self.shards_launched.wait()
processed = []
max_concurrency = len(self.shard_ids) * 2
current_bucket = []
while True:
# this snippet of code is basically waiting N seconds
# until the last GUILD_CREATE was sent
try:
guild = await asyncio.wait_for(
self._ready_state.get(), timeout=self.guild_ready_timeout
)
except asyncio.TimeoutError:
break
else:
if self._guild_needs_chunking(guild):
_log.debug(
"Guild ID %d requires chunking, will be done in the background.",
guild.id,
)
if len(current_bucket) >= max_concurrency:
try:
await utils.sane_wait_for(
current_bucket, timeout=max_concurrency * 70.0
)
except asyncio.TimeoutError:
fmt = "Shard ID %s failed to wait for chunks from a sub-bucket with length %d"
_log.warning(fmt, guild.shard_id, len(current_bucket))
finally:
current_bucket = []
# Chunk the guild in the background while we wait for GUILD_CREATE streaming
future = asyncio.ensure_future(self.chunk_guild(guild))
current_bucket.append(future)
else:
future = self.loop.create_future()
future.set_result([])
processed.append((guild, future))
guilds = sorted(processed, key=lambda g: g[0].shard_id)
for shard_id, info in itertools.groupby(guilds, key=lambda g: g[0].shard_id):
children, futures = zip(*info)
# 110 reqs/minute w/ 1 req/guild plus some buffer
timeout = 61 * (len(children) / 110)
try:
await utils.sane_wait_for(futures, timeout=timeout)
except asyncio.TimeoutError:
_log.warning(
"Shard ID %s failed to wait for chunks (timeout=%.2f) for %d guilds",
shard_id,
timeout,
len(guilds),
)
for guild in children:
if guild.unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
self.dispatch("shard_ready", shard_id)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
# regular users cannot shard so we won't worry about it here.
# clear the current task
self._ready_task = None
# dispatch the event
self.call_handlers("ready")
self.dispatch("ready")
def parse_ready(self, data) -> None:
if not hasattr(self, "_ready_state"):
self._ready_state = asyncio.Queue()
self.user = user = ClientUser(state=self, data=data["user"])
# self._users is a list of Users, we're setting a ClientUser
self._users[user.id] = user # type: ignore
if self.application_id is None:
try:
application = data["application"]
except KeyError:
pass
else:
self.application_id = utils._get_as_snowflake(application, "id")
self.application_flags = ApplicationFlags._from_value(
application["flags"]
)
for guild_data in data["guilds"]:
self._add_guild_from_data(guild_data)
if self._messages:
self._update_message_references()
self.dispatch("connect")
self.dispatch("shard_connect", data["__shard_id__"])
if self._ready_task is None:
self._ready_task = asyncio.create_task(self._delay_ready())
def parse_resumed(self, data) -> None:
self.dispatch("resumed")
self.dispatch("shard_resumed", data["__shard_id__"])
| 36.844769 | 118 | 0.586002 |
from __future__ import annotations
import asyncio
from collections import deque, OrderedDict
import copy
import datetime
import itertools
import logging
from typing import (
Dict,
Optional,
TYPE_CHECKING,
Union,
Callable,
Any,
List,
TypeVar,
Coroutine,
Sequence,
Tuple,
Deque,
)
import inspect
import os
from .guild import Guild
from .activity import BaseActivity
from .user import User, ClientUser
from .emoji import Emoji
from .mentions import AllowedMentions
from .partial_emoji import PartialEmoji
from .message import Message
from .channel import *
from .channel import _channel_factory
from .raw_models import *
from .member import Member
from .role import Role
from .enums import ChannelType, try_enum, Status
from . import utils
from .flags import ApplicationFlags, Intents, MemberCacheFlags
from .object import Object
from .invite import Invite
from .integrations import _integration_factory
from .interactions import Interaction
from .ui.view import ViewStore, View
from .stage_instance import StageInstance
from .threads import Thread, ThreadMember
from .sticker import GuildSticker
if TYPE_CHECKING:
from .abc import PrivateChannel
from .message import MessageableChannel
from .guild import GuildChannel, VocalGuildChannel
from .http import HTTPClient
from .voice_client import VoiceProtocol
from .client import Client
from .gateway import DiscordWebSocket
from .types.activity import Activity as ActivityPayload
from .types.channel import DMChannel as DMChannelPayload
from .types.user import User as UserPayload
from .types.emoji import Emoji as EmojiPayload
from .types.sticker import GuildSticker as GuildStickerPayload
from .types.guild import Guild as GuildPayload
from .types.message import Message as MessagePayload
T = TypeVar("T")
CS = TypeVar("CS", bound="ConnectionState")
Channel = Union[GuildChannel, VocalGuildChannel, PrivateChannel, PartialMessageable]
class ChunkRequest:
def __init__(
self,
guild_id: int,
loop: asyncio.AbstractEventLoop,
resolver: Callable[[int], Any],
*,
cache: bool = True,
) -> None:
self.guild_id: int = guild_id
self.resolver: Callable[[int], Any] = resolver
self.loop: asyncio.AbstractEventLoop = loop
self.cache: bool = cache
self.nonce: str = os.urandom(16).hex()
self.buffer: List[Member] = []
self.waiters: List[asyncio.Future[List[Member]]] = []
def add_members(self, members: List[Member]) -> None:
self.buffer.extend(members)
if self.cache:
guild = self.resolver(self.guild_id)
if guild is None:
return
for member in members:
existing = guild.get_member(member.id)
if existing is None or existing.joined_at is None:
guild._add_member(member)
async def wait(self) -> List[Member]:
future = self.loop.create_future()
self.waiters.append(future)
try:
return await future
finally:
self.waiters.remove(future)
def get_future(self) -> asyncio.Future[List[Member]]:
future = self.loop.create_future()
self.waiters.append(future)
return future
def done(self) -> None:
for future in self.waiters:
if not future.done():
future.set_result(self.buffer)
_log = logging.getLogger(__name__)
async def logging_coroutine(
coroutine: Coroutine[Any, Any, T], *, info: str
) -> Optional[T]:
try:
await coroutine
except Exception:
_log.exception("Exception occurred during %s", info)
class ConnectionState:
if TYPE_CHECKING:
_get_websocket: Callable[..., DiscordWebSocket]
_get_client: Callable[..., Client]
_parsers: Dict[str, Callable[[Dict[str, Any]], None]]
def __init__(
self,
*,
dispatch: Callable,
handlers: Dict[str, Callable],
hooks: Dict[str, Callable],
http: HTTPClient,
loop: asyncio.AbstractEventLoop,
**options: Any,
) -> None:
self.loop: asyncio.AbstractEventLoop = loop
self.http: HTTPClient = http
self.max_messages: Optional[int] = options.get("max_messages", 1000)
if self.max_messages is not None and self.max_messages <= 0:
self.max_messages = 1000
self.dispatch: Callable = dispatch
self.handlers: Dict[str, Callable] = handlers
self.hooks: Dict[str, Callable] = hooks
self.shard_count: Optional[int] = None
self._ready_task: Optional[asyncio.Task] = None
self.application_id: Optional[int] = utils._get_as_snowflake(
options, "application_id"
)
self.heartbeat_timeout: float = options.get("heartbeat_timeout", 60.0)
self.guild_ready_timeout: float = options.get("guild_ready_timeout", 2.0)
if self.guild_ready_timeout < 0:
raise ValueError("guild_ready_timeout cannot be negative")
allowed_mentions = options.get("allowed_mentions")
if allowed_mentions is not None and not isinstance(
allowed_mentions, AllowedMentions
):
raise TypeError("allowed_mentions parameter must be AllowedMentions")
self.allowed_mentions: Optional[AllowedMentions] = allowed_mentions
self._chunk_requests: Dict[Union[int, str], ChunkRequest] = {}
activity = options.get("activity", None)
if activity:
if not isinstance(activity, BaseActivity):
raise TypeError("activity parameter must derive from BaseActivity.")
activity = activity.to_dict()
status = options.get("status", None)
if status:
if status is Status.offline:
status = "invisible"
else:
status = str(status)
intents = options.get("intents", None)
if intents is not None:
if not isinstance(intents, Intents):
raise TypeError(
f"intents parameter must be Intent not {type(intents)!r}"
)
else:
intents = Intents.default()
if not intents.guilds:
_log.warning(
"Guilds intent seems to be disabled. This may cause state related issues."
)
self._chunk_guilds: bool = options.get(
"chunk_guilds_at_startup", intents.members
)
if not intents.members and self._chunk_guilds:
raise ValueError(
"Intents.members must be enabled to chunk guilds at startup."
)
cache_flags = options.get("member_cache_flags", None)
if cache_flags is None:
cache_flags = MemberCacheFlags.from_intents(intents)
else:
if not isinstance(cache_flags, MemberCacheFlags):
raise TypeError(
f"member_cache_flags parameter must be MemberCacheFlags not {type(cache_flags)!r}"
)
cache_flags._verify_intents(intents)
self.member_cache_flags: MemberCacheFlags = cache_flags
self._activity: Optional[ActivityPayload] = activity
self._status: Optional[str] = status
self._intents: Intents = intents
if not intents.members or cache_flags._empty:
self.store_user = self.create_user
self.deref_user = self.deref_user_no_intents
self.parsers = parsers = {}
for attr, func in inspect.getmembers(self):
if attr.startswith("parse_"):
parsers[attr[6:].upper()] = func
self.clear()
def clear(self, *, views: bool = True) -> None:
self.user: Optional[ClientUser] = None
self._users: Dict[int, User] = {}
self._emojis: Dict[int, Emoji] = {}
self._stickers: Dict[int, GuildSticker] = {}
self._guilds: Dict[int, Guild] = {}
if views:
self._view_store: ViewStore = ViewStore(self)
self._voice_clients: Dict[int, VoiceProtocol] = {}
self._private_channels: OrderedDict[int, PrivateChannel] = OrderedDict()
self._private_channels_by_user: Dict[int, DMChannel] = {}
if self.max_messages is not None:
self._messages: Optional[Deque[Message]] = deque(maxlen=self.max_messages)
else:
self._messages: Optional[Deque[Message]] = None
def process_chunk_requests(
self, guild_id: int, nonce: Optional[str], members: List[Member], complete: bool
) -> None:
removed = []
for key, request in self._chunk_requests.items():
if request.guild_id == guild_id and request.nonce == nonce:
request.add_members(members)
if complete:
request.done()
removed.append(key)
for key in removed:
del self._chunk_requests[key]
def call_handlers(self, key: str, *args: Any, **kwargs: Any) -> None:
try:
func = self.handlers[key]
except KeyError:
pass
else:
func(*args, **kwargs)
async def call_hooks(self, key: str, *args: Any, **kwargs: Any) -> None:
try:
coro = self.hooks[key]
except KeyError:
pass
else:
await coro(*args, **kwargs)
@property
def self_id(self) -> Optional[int]:
u = self.user
return u.id if u else None
@property
def intents(self) -> Intents:
ret = Intents.none()
ret.value = self._intents.value
return ret
@property
def voice_clients(self) -> List[VoiceProtocol]:
return list(self._voice_clients.values())
def _get_voice_client(self, guild_id: Optional[int]) -> Optional[VoiceProtocol]:
return self._voice_clients.get(guild_id)
def _add_voice_client(self, guild_id: int, voice: VoiceProtocol) -> None:
self._voice_clients[guild_id] = voice
def _remove_voice_client(self, guild_id: int) -> None:
self._voice_clients.pop(guild_id, None)
def _update_references(self, ws: DiscordWebSocket) -> None:
for vc in self.voice_clients:
vc.main_ws = ws
def store_user(self, data: UserPayload) -> User:
user_id = int(data["id"])
try:
return self._users[user_id]
except KeyError:
user = User(state=self, data=data)
if user.discriminator != "0000":
self._users[user_id] = user
user._stored = True
return user
def deref_user(self, user_id: int) -> None:
self._users.pop(user_id, None)
def create_user(self, data: UserPayload) -> User:
return User(state=self, data=data)
def deref_user_no_intents(self, user_id: int) -> None:
return
def get_user(self, id: Optional[int]) -> Optional[User]:
return self._users.get(id)
def store_emoji(self, guild: Guild, data: EmojiPayload) -> Emoji:
emoji_id = int(data["id"])
self._emojis[emoji_id] = emoji = Emoji(guild=guild, state=self, data=data)
return emoji
def store_sticker(self, guild: Guild, data: GuildStickerPayload) -> GuildSticker:
sticker_id = int(data["id"])
self._stickers[sticker_id] = sticker = GuildSticker(state=self, data=data)
return sticker
def store_view(self, view: View, message_id: Optional[int] = None) -> None:
self._view_store.add_view(view, message_id)
def prevent_view_updates_for(self, message_id: int) -> Optional[View]:
return self._view_store.remove_message_tracking(message_id)
@property
def persistent_views(self) -> Sequence[View]:
return self._view_store.persistent_views
@property
def guilds(self) -> List[Guild]:
return list(self._guilds.values())
def _get_guild(self, guild_id: Optional[int]) -> Optional[Guild]:
return self._guilds.get(guild_id)
def _add_guild(self, guild: Guild) -> None:
self._guilds[guild.id] = guild
def _remove_guild(self, guild: Guild) -> None:
self._guilds.pop(guild.id, None)
for emoji in guild.emojis:
self._emojis.pop(emoji.id, None)
for sticker in guild.stickers:
self._stickers.pop(sticker.id, None)
del guild
@property
def emojis(self) -> List[Emoji]:
return list(self._emojis.values())
@property
def stickers(self) -> List[GuildSticker]:
return list(self._stickers.values())
def get_emoji(self, emoji_id: Optional[int]) -> Optional[Emoji]:
return self._emojis.get(emoji_id)
def get_sticker(self, sticker_id: Optional[int]) -> Optional[GuildSticker]:
return self._stickers.get(sticker_id)
@property
def private_channels(self) -> List[PrivateChannel]:
return list(self._private_channels.values())
def _get_private_channel(
self, channel_id: Optional[int]
) -> Optional[PrivateChannel]:
try:
value = self._private_channels[channel_id]
except KeyError:
return None
else:
self._private_channels.move_to_end(channel_id)
return value
def _get_private_channel_by_user(
self, user_id: Optional[int]
) -> Optional[DMChannel]:
return self._private_channels_by_user.get(user_id)
def _add_private_channel(self, channel: PrivateChannel) -> None:
channel_id = channel.id
self._private_channels[channel_id] = channel
if len(self._private_channels) > 128:
_, to_remove = self._private_channels.popitem(last=False)
if isinstance(to_remove, DMChannel) and to_remove.recipient:
self._private_channels_by_user.pop(to_remove.recipient.id, None)
if isinstance(channel, DMChannel) and channel.recipient:
self._private_channels_by_user[channel.recipient.id] = channel
def add_dm_channel(self, data: DMChannelPayload) -> DMChannel:
channel = DMChannel(me=self.user, state=self, data=data)
self._add_private_channel(channel)
return channel
def _remove_private_channel(self, channel: PrivateChannel) -> None:
self._private_channels.pop(channel.id, None)
if isinstance(channel, DMChannel):
recipient = channel.recipient
if recipient is not None:
self._private_channels_by_user.pop(recipient.id, None)
def _get_message(self, msg_id: Optional[int]) -> Optional[Message]:
return (
utils.find(lambda m: m.id == msg_id, reversed(self._messages))
if self._messages
else None
)
def _add_guild_from_data(self, data: GuildPayload) -> Guild:
guild = Guild(data=data, state=self)
self._add_guild(guild)
return guild
def _guild_needs_chunking(self, guild: Guild) -> bool:
return (
self._chunk_guilds
and not guild.chunked
and not (self._intents.presences and not guild.large)
)
def _get_guild_channel(
self, data: MessagePayload
) -> Tuple[Union[Channel, Thread], Optional[Guild]]:
channel_id = int(data["channel_id"])
try:
guild = self._get_guild(int(data["guild_id"]))
except KeyError:
channel = DMChannel._from_message(self, channel_id)
guild = None
else:
channel = guild and guild._resolve_channel(channel_id)
return channel or PartialMessageable(state=self, id=channel_id), guild
async def chunker(
self,
guild_id: int,
query: str = "",
limit: int = 0,
presences: bool = False,
*,
nonce: Optional[str] = None,
) -> None:
ws = self._get_websocket(guild_id)
await ws.request_chunks(
guild_id, query=query, limit=limit, presences=presences, nonce=nonce
)
async def query_members(
self,
guild: Guild,
query: str,
limit: int,
user_ids: List[int],
cache: bool,
presences: bool,
):
guild_id = guild.id
ws = self._get_websocket(guild_id)
if ws is None:
raise RuntimeError("Somehow do not have a websocket for this guild_id")
request = ChunkRequest(guild.id, self.loop, self._get_guild, cache=cache)
self._chunk_requests[request.nonce] = request
try:
await ws.request_chunks(
guild_id,
query=query,
limit=limit,
user_ids=user_ids,
presences=presences,
nonce=request.nonce,
)
return await asyncio.wait_for(request.wait(), timeout=30.0)
except asyncio.TimeoutError:
_log.warning(
"Timed out waiting for chunks with query %r and limit %d for guild_id %d",
query,
limit,
guild_id,
)
raise
async def _delay_ready(self) -> None:
try:
states = []
while True:
try:
guild = await asyncio.wait_for(
self._ready_state.get(), timeout=self.guild_ready_timeout
)
except asyncio.TimeoutError:
break
else:
if self._guild_needs_chunking(guild):
future = await self.chunk_guild(guild, wait=False)
states.append((guild, future))
else:
if guild.unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
for guild, future in states:
try:
await asyncio.wait_for(future, timeout=5.0)
except asyncio.TimeoutError:
_log.warning(
"Shard ID %s timed out waiting for chunks for guild_id %s.",
guild.shard_id,
guild.id,
)
if guild.unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
try:
del self._ready_state
except AttributeError:
pass
except asyncio.CancelledError:
pass
else:
self.call_handlers("ready")
self.dispatch("ready")
finally:
self._ready_task = None
def parse_ready(self, data) -> None:
if self._ready_task is not None:
self._ready_task.cancel()
self._ready_state = asyncio.Queue()
self.clear(views=False)
self.user = ClientUser(state=self, data=data["user"])
self.store_user(data["user"])
if self.application_id is None:
try:
application = data["application"]
except KeyError:
pass
else:
self.application_id = utils._get_as_snowflake(application, "id")
self.application_flags = ApplicationFlags._from_value(application["flags"])
for guild_data in data["guilds"]:
self._add_guild_from_data(guild_data)
self.dispatch("connect")
self._ready_task = asyncio.create_task(self._delay_ready())
def parse_resumed(self, data) -> None:
self.dispatch("resumed")
def parse_message_create(self, data) -> None:
channel, _ = self._get_guild_channel(data)
message = Message(channel=channel, data=data, state=self)
self.dispatch("message", message)
if self._messages is not None:
self._messages.append(message)
if channel and channel.__class__ in (TextChannel, Thread):
channel.last_message_id = message.id
def parse_message_delete(self, data) -> None:
raw = RawMessageDeleteEvent(data)
found = self._get_message(raw.message_id)
raw.cached_message = found
self.dispatch("raw_message_delete", raw)
if self._messages is not None and found is not None:
self.dispatch("message_delete", found)
self._messages.remove(found)
def parse_message_delete_bulk(self, data) -> None:
raw = RawBulkMessageDeleteEvent(data)
if self._messages:
found_messages = [
message for message in self._messages if message.id in raw.message_ids
]
else:
found_messages = []
raw.cached_messages = found_messages
self.dispatch("raw_bulk_message_delete", raw)
if found_messages:
self.dispatch("bulk_message_delete", found_messages)
for msg in found_messages:
self._messages.remove(msg) # type: ignore
def parse_message_update(self, data) -> None:
raw = RawMessageUpdateEvent(data)
message = self._get_message(raw.message_id)
if message is not None:
older_message = copy.copy(message)
raw.cached_message = older_message
self.dispatch("raw_message_edit", raw)
message._update(data)
# Coerce the `after` parameter to take the new updated Member
# ref: #5999
older_message.author = message.author
self.dispatch("message_edit", older_message, message)
else:
self.dispatch("raw_message_edit", raw)
if "components" in data and self._view_store.is_message_tracked(raw.message_id):
self._view_store.update_from_message(raw.message_id, data["components"])
def parse_message_reaction_add(self, data) -> None:
emoji = data["emoji"]
emoji_id = utils._get_as_snowflake(emoji, "id")
emoji = PartialEmoji.with_state(
self, id=emoji_id, animated=emoji.get("animated", False), name=emoji["name"]
)
raw = RawReactionActionEvent(data, emoji, "REACTION_ADD")
member_data = data.get("member")
if member_data:
guild = self._get_guild(raw.guild_id)
if guild is not None:
raw.member = Member(data=member_data, guild=guild, state=self)
else:
raw.member = None
else:
raw.member = None
self.dispatch("raw_reaction_add", raw)
# rich interface here
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
reaction = message._add_reaction(data, emoji, raw.user_id)
user = raw.member or self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch("reaction_add", reaction, user)
def parse_message_reaction_remove_all(self, data) -> None:
raw = RawReactionClearEvent(data)
self.dispatch("raw_reaction_clear", raw)
message = self._get_message(raw.message_id)
if message is not None:
old_reactions = message.reactions.copy()
message.reactions.clear()
self.dispatch("reaction_clear", message, old_reactions)
def parse_message_reaction_remove(self, data) -> None:
emoji = data["emoji"]
emoji_id = utils._get_as_snowflake(emoji, "id")
emoji = PartialEmoji.with_state(self, id=emoji_id, name=emoji["name"])
raw = RawReactionActionEvent(data, emoji, "REACTION_REMOVE")
self.dispatch("raw_reaction_remove", raw)
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
try:
reaction = message._remove_reaction(data, emoji, raw.user_id)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
user = self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch("reaction_remove", reaction, user)
def parse_message_reaction_remove_emoji(self, data) -> None:
emoji = data["emoji"]
emoji_id = utils._get_as_snowflake(emoji, "id")
emoji = PartialEmoji.with_state(self, id=emoji_id, name=emoji["name"])
raw = RawReactionClearEmojiEvent(data, emoji)
self.dispatch("raw_reaction_clear_emoji", raw)
message = self._get_message(raw.message_id)
if message is not None:
try:
reaction = message._clear_emoji(emoji)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
if reaction:
self.dispatch("reaction_clear_emoji", reaction)
def parse_interaction_create(self, data) -> None:
interaction = Interaction(data=data, state=self)
if data["type"] == 3: # interaction component
custom_id = interaction.data["custom_id"] # type: ignore
component_type = interaction.data["component_type"] # type: ignore
self._view_store.dispatch(component_type, custom_id, interaction)
self.dispatch("interaction", interaction)
def parse_presence_update(self, data) -> None:
guild_id = utils._get_as_snowflake(data, "guild_id")
# guild_id won't be None here
guild = self._get_guild(guild_id)
if guild is None:
_log.debug(
"PRESENCE_UPDATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
return
user = data["user"]
member_id = int(user["id"])
member = guild.get_member(member_id)
if member is None:
_log.debug(
"PRESENCE_UPDATE referencing an unknown member ID: %s. Discarding",
member_id,
)
return
old_member = Member._copy(member)
user_update = member._presence_update(data=data, user=user)
if user_update:
self.dispatch("user_update", user_update[0], user_update[1])
self.dispatch("presence_update", old_member, member)
def parse_user_update(self, data) -> None:
user: ClientUser = self.user
user._update(data)
ref = self._users.get(user.id)
if ref:
ref._update(data)
def parse_invite_create(self, data) -> None:
invite = Invite.from_gateway(state=self, data=data)
self.dispatch("invite_create", invite)
def parse_invite_delete(self, data) -> None:
invite = Invite.from_gateway(state=self, data=data)
self.dispatch("invite_delete", invite)
def parse_channel_delete(self, data) -> None:
guild = self._get_guild(utils._get_as_snowflake(data, "guild_id"))
channel_id = int(data["id"])
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
guild._remove_channel(channel)
self.dispatch("guild_channel_delete", channel)
def parse_channel_update(self, data) -> None:
channel_type = try_enum(ChannelType, data.get("type"))
channel_id = int(data["id"])
if channel_type is ChannelType.group:
channel = self._get_private_channel(channel_id)
old_channel = copy.copy(channel)
channel._update_group(data)
self.dispatch("private_channel_update", old_channel, channel)
return
guild_id = utils._get_as_snowflake(data, "guild_id")
guild = self._get_guild(guild_id)
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
old_channel = copy.copy(channel)
channel._update(guild, data)
self.dispatch("guild_channel_update", old_channel, channel)
else:
_log.debug(
"CHANNEL_UPDATE referencing an unknown channel ID: %s. Discarding.",
channel_id,
)
else:
_log.debug(
"CHANNEL_UPDATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
def parse_channel_create(self, data) -> None:
factory, ch_type = _channel_factory(data["type"])
if factory is None:
_log.debug(
"CHANNEL_CREATE referencing an unknown channel type %s. Discarding.",
data["type"],
)
return
guild_id = utils._get_as_snowflake(data, "guild_id")
guild = self._get_guild(guild_id)
if guild is not None:
channel = factory(guild=guild, state=self, data=data) # type: ignore
guild._add_channel(channel) # type: ignore
self.dispatch("guild_channel_create", channel)
else:
_log.debug(
"CHANNEL_CREATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
return
def parse_channel_pins_update(self, data) -> None:
channel_id = int(data["channel_id"])
try:
guild = self._get_guild(int(data["guild_id"]))
except KeyError:
guild = None
channel = self._get_private_channel(channel_id)
else:
channel = guild and guild._resolve_channel(channel_id)
if channel is None:
_log.debug(
"CHANNEL_PINS_UPDATE referencing an unknown channel ID: %s. Discarding.",
channel_id,
)
return
last_pin = (
utils.parse_time(data["last_pin_timestamp"])
if data["last_pin_timestamp"]
else None
)
if guild is None:
self.dispatch("private_channel_pins_update", channel, last_pin)
else:
self.dispatch("guild_channel_pins_update", channel, last_pin)
def parse_thread_create(self, data) -> None:
guild_id = int(data["guild_id"])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_CREATE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread = Thread(guild=guild, state=guild._state, data=data)
has_thread = guild.get_thread(thread.id)
guild._add_thread(thread)
if not has_thread:
self.dispatch("thread_join", thread)
def parse_thread_update(self, data) -> None:
guild_id = int(data["guild_id"])
guild = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_UPDATE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread_id = int(data["id"])
thread = guild.get_thread(thread_id)
if thread is not None:
old = copy.copy(thread)
thread._update(data)
self.dispatch("thread_update", old, thread)
else:
thread = Thread(guild=guild, state=guild._state, data=data)
guild._add_thread(thread)
self.dispatch("thread_join", thread)
def parse_thread_delete(self, data) -> None:
guild_id = int(data["guild_id"])
guild = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_DELETE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread_id = int(data["id"])
thread = guild.get_thread(thread_id)
if thread is not None:
guild._remove_thread(thread) # type: ignore
self.dispatch("thread_delete", thread)
def parse_thread_list_sync(self, data) -> None:
guild_id = int(data["guild_id"])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_LIST_SYNC referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
try:
channel_ids = set(data["channel_ids"])
except KeyError:
# If not provided, then the entire guild is being synced
# So all previous thread data should be overwritten
previous_threads = guild._threads.copy()
guild._clear_threads()
else:
previous_threads = guild._filter_threads(channel_ids)
threads = {d["id"]: guild._store_thread(d) for d in data.get("threads", [])}
for member in data.get("members", []):
try:
# note: member['id'] is the thread_id
thread = threads[member["id"]]
except KeyError:
continue
else:
thread._add_member(ThreadMember(thread, member))
for thread in threads.values():
old = previous_threads.pop(thread.id, None)
if old is None:
self.dispatch("thread_join", thread)
for thread in previous_threads.values():
self.dispatch("thread_remove", thread)
def parse_thread_member_update(self, data) -> None:
guild_id = int(data["guild_id"])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread_id = int(data["id"])
thread: Optional[Thread] = guild.get_thread(thread_id)
if thread is None:
_log.debug(
"THREAD_MEMBER_UPDATE referencing an unknown thread ID: %s. Discarding",
thread_id,
)
return
member = ThreadMember(thread, data)
thread.me = member
def parse_thread_members_update(self, data) -> None:
guild_id = int(data["guild_id"])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug(
"THREAD_MEMBERS_UPDATE referencing an unknown guild ID: %s. Discarding",
guild_id,
)
return
thread_id = int(data["id"])
thread: Optional[Thread] = guild.get_thread(thread_id)
if thread is None:
_log.debug(
"THREAD_MEMBERS_UPDATE referencing an unknown thread ID: %s. Discarding",
thread_id,
)
return
added_members = [ThreadMember(thread, d) for d in data.get("added_members", [])]
removed_member_ids = [int(x) for x in data.get("removed_member_ids", [])]
self_id = self.self_id
for member in added_members:
if member.id != self_id:
thread._add_member(member)
self.dispatch("thread_member_join", member)
else:
thread.me = member
self.dispatch("thread_join", thread)
for member_id in removed_member_ids:
if member_id != self_id:
member = thread._pop_member(member_id)
if member is not None:
self.dispatch("thread_member_remove", member)
else:
self.dispatch("thread_remove", thread)
def parse_guild_member_add(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"GUILD_MEMBER_ADD referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
member = Member(guild=guild, data=data, state=self)
if self.member_cache_flags.joined:
guild._add_member(member)
try:
guild._member_count += 1
except AttributeError:
pass
self.dispatch("member_join", member)
def parse_guild_member_remove(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
try:
guild._member_count -= 1
except AttributeError:
pass
user_id = int(data["user"]["id"])
member = guild.get_member(user_id)
if member is not None:
guild._remove_member(member) # type: ignore
self.dispatch("member_remove", member)
else:
_log.debug(
"GUILD_MEMBER_REMOVE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_guild_member_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
user = data["user"]
user_id = int(user["id"])
if guild is None:
_log.debug(
"GUILD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
member = guild.get_member(user_id)
if member is not None:
old_member = Member._copy(member)
member._update(data)
user_update = member._update_inner_user(user)
if user_update:
self.dispatch("user_update", user_update[0], user_update[1])
self.dispatch("member_update", old_member, member)
else:
if self.member_cache_flags.joined:
member = Member(data=data, guild=guild, state=self)
# Force an update on the inner user if necessary
user_update = member._update_inner_user(user)
if user_update:
self.dispatch("user_update", user_update[0], user_update[1])
guild._add_member(member)
_log.debug(
"GUILD_MEMBER_UPDATE referencing an unknown member ID: %s. Discarding.",
user_id,
)
def parse_guild_emojis_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"GUILD_EMOJIS_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
before_emojis = guild.emojis
for emoji in before_emojis:
self._emojis.pop(emoji.id, None)
# guild won't be None here
guild.emojis = tuple(map(lambda d: self.store_emoji(guild, d), data["emojis"]))
self.dispatch("guild_emojis_update", guild, before_emojis, guild.emojis)
def parse_guild_stickers_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"GUILD_STICKERS_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
before_stickers = guild.stickers
for emoji in before_stickers:
self._stickers.pop(emoji.id, None)
guild.stickers = tuple(map(lambda d: self.store_sticker(guild, d), data["stickers"])) # type: ignore
self.dispatch("guild_stickers_update", guild, before_stickers, guild.stickers)
def _get_create_guild(self, data):
if data.get("unavailable") is False:
# GUILD_CREATE with unavailable in the response
# usually means that the guild has become available
# and is therefore in the cache
guild = self._get_guild(int(data["id"]))
if guild is not None:
guild.unavailable = False
guild._from_data(data)
return guild
return self._add_guild_from_data(data)
def is_guild_evicted(self, guild) -> bool:
return guild.id not in self._guilds
async def chunk_guild(self, guild, *, wait=True, cache=None):
cache = cache or self.member_cache_flags.joined
request = self._chunk_requests.get(guild.id)
if request is None:
self._chunk_requests[guild.id] = request = ChunkRequest(
guild.id, self.loop, self._get_guild, cache=cache
)
await self.chunker(guild.id, nonce=request.nonce)
if wait:
return await request.wait()
return request.get_future()
async def _chunk_and_dispatch(self, guild, unavailable):
try:
await asyncio.wait_for(self.chunk_guild(guild), timeout=60.0)
except asyncio.TimeoutError:
_log.info("Somehow timed out waiting for chunks.")
if unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
def parse_guild_create(self, data) -> None:
unavailable = data.get("unavailable")
if unavailable is True:
# joined a guild with unavailable == True so..
return
guild = self._get_create_guild(data)
try:
# Notify the on_ready state, if any, that this guild is complete.
self._ready_state.put_nowait(guild)
except AttributeError:
pass
else:
# If we're waiting for the event, put the rest on hold
return
if self._guild_needs_chunking(guild):
asyncio.create_task(self._chunk_and_dispatch(guild, unavailable))
return
if unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
def parse_guild_update(self, data) -> None:
guild = self._get_guild(int(data["id"]))
if guild is not None:
old_guild = copy.copy(guild)
guild._from_data(data)
self.dispatch("guild_update", old_guild, guild)
else:
_log.debug(
"GUILD_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["id"],
)
def parse_guild_delete(self, data) -> None:
guild = self._get_guild(int(data["id"]))
if guild is None:
_log.debug(
"GUILD_DELETE referencing an unknown guild ID: %s. Discarding.",
data["id"],
)
return
if data.get("unavailable", False):
guild.unavailable = True
self.dispatch("guild_unavailable", guild)
return
if self._messages is not None:
self._messages: Optional[Deque[Message]] = deque(
(msg for msg in self._messages if msg.guild != guild),
maxlen=self.max_messages,
)
self._remove_guild(guild)
self.dispatch("guild_remove", guild)
def parse_guild_ban_add(self, data) -> None:
# strange with it, the main purpose of this event
# is mainly to dispatch to another event worth listening to for logging
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
try:
user = User(data=data["user"], state=self)
except KeyError:
pass
else:
member = guild.get_member(user.id) or user
self.dispatch("member_ban", guild, member)
def parse_guild_ban_remove(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None and "user" in data:
user = self.store_user(data["user"])
self.dispatch("member_unban", guild, user)
def parse_guild_role_create(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"GUILD_ROLE_CREATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
return
role_data = data["role"]
role = Role(guild=guild, data=role_data, state=self)
guild._add_role(role)
self.dispatch("guild_role_create", role)
def parse_guild_role_delete(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
role_id = int(data["role_id"])
try:
role = guild._remove_role(role_id)
except KeyError:
return
else:
self.dispatch("guild_role_delete", role)
else:
_log.debug(
"GUILD_ROLE_DELETE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_guild_role_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
role_data = data["role"]
role_id = int(role_data["id"])
role = guild.get_role(role_id)
if role is not None:
old_role = copy.copy(role)
role._update(role_data)
self.dispatch("guild_role_update", old_role, role)
else:
_log.debug(
"GUILD_ROLE_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_guild_members_chunk(self, data) -> None:
guild_id = int(data["guild_id"])
guild = self._get_guild(guild_id)
presences = data.get("presences", [])
# the guild won't be None here
members = [Member(guild=guild, data=member, state=self) for member in data.get("members", [])]
_log.debug(
"Processed a chunk for %s members in guild ID %s.", len(members), guild_id
)
if presences:
member_dict = {str(member.id): member for member in members}
for presence in presences:
user = presence["user"]
member_id = user["id"]
member = member_dict.get(member_id)
if member is not None:
member._presence_update(presence, user)
complete = data.get("chunk_index", 0) + 1 == data.get("chunk_count")
self.process_chunk_requests(guild_id, data.get("nonce"), members, complete)
def parse_guild_integrations_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
self.dispatch("guild_integrations_update", guild)
else:
_log.debug(
"GUILD_INTEGRATIONS_UPDATE referencing an unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_integration_create(self, data) -> None:
guild_id = int(data.pop("guild_id"))
guild = self._get_guild(guild_id)
if guild is not None:
cls, _ = _integration_factory(data["type"])
integration = cls(data=data, guild=guild)
self.dispatch("integration_create", integration)
else:
_log.debug(
"INTEGRATION_CREATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
def parse_integration_update(self, data) -> None:
guild_id = int(data.pop("guild_id"))
guild = self._get_guild(guild_id)
if guild is not None:
cls, _ = _integration_factory(data["type"])
integration = cls(data=data, guild=guild)
self.dispatch("integration_update", integration)
else:
_log.debug(
"INTEGRATION_UPDATE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
def parse_integration_delete(self, data) -> None:
guild_id = int(data["guild_id"])
guild = self._get_guild(guild_id)
if guild is not None:
raw = RawIntegrationDeleteEvent(data)
self.dispatch("raw_integration_delete", raw)
else:
_log.debug(
"INTEGRATION_DELETE referencing an unknown guild ID: %s. Discarding.",
guild_id,
)
def parse_webhooks_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is None:
_log.debug(
"WEBHOOKS_UPDATE referencing an unknown guild ID: %s. Discarding",
data["guild_id"],
)
return
channel = guild.get_channel(int(data["channel_id"]))
if channel is not None:
self.dispatch("webhooks_update", channel)
else:
_log.debug(
"WEBHOOKS_UPDATE referencing an unknown channel ID: %s. Discarding.",
data["channel_id"],
)
def parse_stage_instance_create(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
stage_instance = StageInstance(guild=guild, state=self, data=data)
guild._stage_instances[stage_instance.id] = stage_instance
self.dispatch("stage_instance_create", stage_instance)
else:
_log.debug(
"STAGE_INSTANCE_CREATE referencing unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_stage_instance_update(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
stage_instance = guild._stage_instances.get(int(data["id"]))
if stage_instance is not None:
old_stage_instance = copy.copy(stage_instance)
stage_instance._update(data)
self.dispatch(
"stage_instance_update", old_stage_instance, stage_instance
)
else:
_log.debug(
"STAGE_INSTANCE_UPDATE referencing unknown stage instance ID: %s. Discarding.",
data["id"],
)
else:
_log.debug(
"STAGE_INSTANCE_UPDATE referencing unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_stage_instance_delete(self, data) -> None:
guild = self._get_guild(int(data["guild_id"]))
if guild is not None:
try:
stage_instance = guild._stage_instances.pop(int(data["id"]))
except KeyError:
pass
else:
self.dispatch("stage_instance_delete", stage_instance)
else:
_log.debug(
"STAGE_INSTANCE_DELETE referencing unknown guild ID: %s. Discarding.",
data["guild_id"],
)
def parse_voice_state_update(self, data) -> None:
guild = self._get_guild(utils._get_as_snowflake(data, "guild_id"))
channel_id = utils._get_as_snowflake(data, "channel_id")
flags = self.member_cache_flags
self_id = self.user.id
if guild is not None:
if int(data["user_id"]) == self_id:
voice = self._get_voice_client(guild.id)
if voice is not None:
coro = voice.on_voice_state_update(data)
asyncio.create_task(
logging_coroutine(
coro, info="Voice Protocol voice state update handler"
)
)
member, before, after = guild._update_voice_state(data, channel_id)
if member is not None:
if flags.voice:
if (
channel_id is None
and flags._voice_only
and member.id != self_id
):
guild._remove_member(member) # type: ignore
elif channel_id is not None:
guild._add_member(member)
self.dispatch("voice_state_update", member, before, after)
else:
_log.debug(
"VOICE_STATE_UPDATE referencing an unknown member ID: %s. Discarding.",
data["user_id"],
)
def parse_voice_server_update(self, data) -> None:
try:
key_id = int(data["guild_id"])
except KeyError:
key_id = int(data["channel_id"])
vc = self._get_voice_client(key_id)
if vc is not None:
coro = vc.on_voice_server_update(data)
asyncio.create_task(
logging_coroutine(
coro, info="Voice Protocol voice server update handler"
)
)
def parse_typing_start(self, data) -> None:
channel, guild = self._get_guild_channel(data)
if channel is not None:
member = None
user_id = utils._get_as_snowflake(data, "user_id")
if isinstance(channel, DMChannel):
member = channel.recipient
elif isinstance(channel, (Thread, TextChannel)) and guild is not None:
# user_id won't be None
member = guild.get_member(user_id)
if member is None:
member_data = data.get("member")
if member_data:
member = Member(data=member_data, state=self, guild=guild)
elif isinstance(channel, GroupChannel):
member = utils.find(lambda x: x.id == user_id, channel.recipients)
if member is not None:
timestamp = datetime.datetime.fromtimestamp(
data.get("timestamp"), tz=datetime.timezone.utc
)
self.dispatch("typing", channel, member, timestamp)
def _get_reaction_user(
self, channel: MessageableChannel, user_id: int
) -> Optional[Union[User, Member]]:
if isinstance(channel, TextChannel):
return channel.guild.get_member(user_id)
return self.get_user(user_id)
def get_reaction_emoji(self, data) -> Union[Emoji, PartialEmoji]:
emoji_id = utils._get_as_snowflake(data, "id")
if not emoji_id:
return data["name"]
try:
return self._emojis[emoji_id]
except KeyError:
return PartialEmoji.with_state(
self,
animated=data.get("animated", False),
id=emoji_id,
name=data["name"],
)
def _upgrade_partial_emoji(
self, emoji: PartialEmoji
) -> Union[Emoji, PartialEmoji, str]:
emoji_id = emoji.id
if not emoji_id:
return emoji.name
try:
return self._emojis[emoji_id]
except KeyError:
return emoji
def get_channel(self, id: Optional[int]) -> Optional[Union[Channel, Thread]]:
if id is None:
return None
pm = self._get_private_channel(id)
if pm is not None:
return pm
for guild in self.guilds:
channel = guild._resolve_channel(id)
if channel is not None:
return channel
def create_message(
self,
*,
channel: Union[
TextChannel, Thread, DMChannel, GroupChannel, PartialMessageable
],
data: MessagePayload,
) -> Message:
return Message(state=self, channel=channel, data=data)
class AutoShardedConnectionState(ConnectionState):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.shard_ids: Union[List[int], range] = []
self.shards_launched: asyncio.Event = asyncio.Event()
def _update_message_references(self) -> None:
for msg in self._messages: # type: ignore
if not msg.guild:
continue
new_guild = self._get_guild(msg.guild.id)
if new_guild is not None and new_guild is not msg.guild:
channel_id = msg.channel.id
channel = new_guild._resolve_channel(channel_id) or Object(
id=channel_id
)
# channel will either be a TextChannel, Thread or Object
msg._rebind_cached_references(new_guild, channel) # type: ignore
async def chunker(
self,
guild_id: int,
query: str = "",
limit: int = 0,
presences: bool = False,
*,
shard_id: Optional[int] = None,
nonce: Optional[str] = None,
) -> None:
ws = self._get_websocket(guild_id, shard_id=shard_id)
await ws.request_chunks(
guild_id, query=query, limit=limit, presences=presences, nonce=nonce
)
async def _delay_ready(self) -> None:
await self.shards_launched.wait()
processed = []
max_concurrency = len(self.shard_ids) * 2
current_bucket = []
while True:
# this snippet of code is basically waiting N seconds
# until the last GUILD_CREATE was sent
try:
guild = await asyncio.wait_for(
self._ready_state.get(), timeout=self.guild_ready_timeout
)
except asyncio.TimeoutError:
break
else:
if self._guild_needs_chunking(guild):
_log.debug(
"Guild ID %d requires chunking, will be done in the background.",
guild.id,
)
if len(current_bucket) >= max_concurrency:
try:
await utils.sane_wait_for(
current_bucket, timeout=max_concurrency * 70.0
)
except asyncio.TimeoutError:
fmt = "Shard ID %s failed to wait for chunks from a sub-bucket with length %d"
_log.warning(fmt, guild.shard_id, len(current_bucket))
finally:
current_bucket = []
# Chunk the guild in the background while we wait for GUILD_CREATE streaming
future = asyncio.ensure_future(self.chunk_guild(guild))
current_bucket.append(future)
else:
future = self.loop.create_future()
future.set_result([])
processed.append((guild, future))
guilds = sorted(processed, key=lambda g: g[0].shard_id)
for shard_id, info in itertools.groupby(guilds, key=lambda g: g[0].shard_id):
children, futures = zip(*info)
# 110 reqs/minute w/ 1 req/guild plus some buffer
timeout = 61 * (len(children) / 110)
try:
await utils.sane_wait_for(futures, timeout=timeout)
except asyncio.TimeoutError:
_log.warning(
"Shard ID %s failed to wait for chunks (timeout=%.2f) for %d guilds",
shard_id,
timeout,
len(guilds),
)
for guild in children:
if guild.unavailable is False:
self.dispatch("guild_available", guild)
else:
self.dispatch("guild_join", guild)
self.dispatch("shard_ready", shard_id)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
# regular users cannot shard so we won't worry about it here.
self._ready_task = None
self.call_handlers("ready")
self.dispatch("ready")
def parse_ready(self, data) -> None:
if not hasattr(self, "_ready_state"):
self._ready_state = asyncio.Queue()
self.user = user = ClientUser(state=self, data=data["user"])
self._users[user.id] = user # type: ignore
if self.application_id is None:
try:
application = data["application"]
except KeyError:
pass
else:
self.application_id = utils._get_as_snowflake(application, "id")
self.application_flags = ApplicationFlags._from_value(
application["flags"]
)
for guild_data in data["guilds"]:
self._add_guild_from_data(guild_data)
if self._messages:
self._update_message_references()
self.dispatch("connect")
self.dispatch("shard_connect", data["__shard_id__"])
if self._ready_task is None:
self._ready_task = asyncio.create_task(self._delay_ready())
def parse_resumed(self, data) -> None:
self.dispatch("resumed")
self.dispatch("shard_resumed", data["__shard_id__"])
| true | true |
f7fcfa53832ebec3a80de6aa8f5f22eb81c7f930 | 4,802 | py | Python | AppDB/test/unit/test_cassandra_backup.py | Honcharov12/appscale | be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9 | [
"Apache-2.0"
] | null | null | null | AppDB/test/unit/test_cassandra_backup.py | Honcharov12/appscale | be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9 | [
"Apache-2.0"
] | null | null | null | AppDB/test/unit/test_cassandra_backup.py | Honcharov12/appscale | be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import re
import subprocess
import sys
import time
import unittest
from flexmock import flexmock
from appscale.common import appscale_info
from appscale.common import appscale_utils
from appscale.common.unpackaged import INFRASTRUCTURE_MANAGER_DIR
from appscale.datastore.backup import backup_exceptions
from appscale.datastore.backup import cassandra_backup
from appscale.datastore.cassandra_env import rebalance
from appscale.datastore.cassandra_env.cassandra_interface import NODE_TOOL
sys.path.append(INFRASTRUCTURE_MANAGER_DIR)
from utils import utils
class TestCassandraBackup(unittest.TestCase):
""" A set of test cases for the Cassandra backup. """
def test_clear_old_snapshots(self):
flexmock(subprocess).should_receive('check_call').with_args([NODE_TOOL,
'clearsnapshot']).and_return().times(1)
cassandra_backup.clear_old_snapshots()
def test_create_snapshot(self):
flexmock(subprocess).should_receive('check_call').with_args([NODE_TOOL,
'snapshot']).and_return().times(1)
cassandra_backup.create_snapshot()
def test_remove_old_data(self):
pass
def test_restore_snapshots(self):
pass
def test_backup_data(self):
db_ips = ['192.168.33.10', '192.168.33.11']
keyname = 'key1'
path = '~/cassandra_backup.tar'
flexmock(appscale_info).should_receive('get_db_ips').and_return(db_ips)
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('.*snapshot$'))
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[0], keyname, re.compile('.*du -s.*'),
method=subprocess.check_output).and_return('200 file1\n500 file2\n')
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[1], keyname, re.compile('.*du -s.*'),
method=subprocess.check_output).and_return('900 file1\n100 file2\n')
# Assume first DB machine does not have enough space.
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[0], keyname, re.compile('^df .*'),
method=subprocess.check_output).\
and_return('headers\ndisk blocks used 100 etc')
self.assertRaises(backup_exceptions.BRException,
cassandra_backup.backup_data, path, keyname)
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[0], keyname, re.compile('^df .*'),
method=subprocess.check_output).\
and_return('headers\ndisk blocks used 2000 etc')
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[1], keyname, re.compile('^df .*'),
method=subprocess.check_output).\
and_return('headers\ndisk blocks used 3000 etc')
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('.*tar --transform.*'))
cassandra_backup.backup_data(path, keyname)
def test_restore_data(self):
db_ips = ['192.168.33.10', '192.168.33.11']
keyname = 'key1'
path = '~/cassandra_backup.tar'
flexmock(appscale_info).should_receive('get_db_ips').and_return(db_ips)
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, 'ls {}'.format(path),
method=subprocess.call).and_return(0)
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, 'monit summary',
method=subprocess.check_output).and_return('summary output')
status_outputs = (['Not monitored'] * len(db_ips)) +\
(['Running'] * len(db_ips))
flexmock(utils).should_receive('monit_status').\
and_return(*status_outputs).one_by_one()
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^find.* -exec rm .*'))
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^tar xf .*'))
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^appscale-start-service .*'),
subprocess.call)
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^appscale-start-service .*'))
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^chown -R cassandra /opt/.*'))
flexmock(rebalance).should_receive('get_status').and_return(
[{'state': 'UN'} for _ in db_ips])
flexmock(time).should_receive('sleep')
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('.*nodetool status'),
method=subprocess.check_output).\
and_return('UN 192.168.33.10\nUN 192.168.33.11')
cassandra_backup.restore_data(path, keyname)
if __name__ == "__main__":
unittest.main()
| 38.725806 | 79 | 0.708247 |
import re
import subprocess
import sys
import time
import unittest
from flexmock import flexmock
from appscale.common import appscale_info
from appscale.common import appscale_utils
from appscale.common.unpackaged import INFRASTRUCTURE_MANAGER_DIR
from appscale.datastore.backup import backup_exceptions
from appscale.datastore.backup import cassandra_backup
from appscale.datastore.cassandra_env import rebalance
from appscale.datastore.cassandra_env.cassandra_interface import NODE_TOOL
sys.path.append(INFRASTRUCTURE_MANAGER_DIR)
from utils import utils
class TestCassandraBackup(unittest.TestCase):
def test_clear_old_snapshots(self):
flexmock(subprocess).should_receive('check_call').with_args([NODE_TOOL,
'clearsnapshot']).and_return().times(1)
cassandra_backup.clear_old_snapshots()
def test_create_snapshot(self):
flexmock(subprocess).should_receive('check_call').with_args([NODE_TOOL,
'snapshot']).and_return().times(1)
cassandra_backup.create_snapshot()
def test_remove_old_data(self):
pass
def test_restore_snapshots(self):
pass
def test_backup_data(self):
db_ips = ['192.168.33.10', '192.168.33.11']
keyname = 'key1'
path = '~/cassandra_backup.tar'
flexmock(appscale_info).should_receive('get_db_ips').and_return(db_ips)
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('.*snapshot$'))
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[0], keyname, re.compile('.*du -s.*'),
method=subprocess.check_output).and_return('200 file1\n500 file2\n')
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[1], keyname, re.compile('.*du -s.*'),
method=subprocess.check_output).and_return('900 file1\n100 file2\n')
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[0], keyname, re.compile('^df .*'),
method=subprocess.check_output).\
and_return('headers\ndisk blocks used 100 etc')
self.assertRaises(backup_exceptions.BRException,
cassandra_backup.backup_data, path, keyname)
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[0], keyname, re.compile('^df .*'),
method=subprocess.check_output).\
and_return('headers\ndisk blocks used 2000 etc')
flexmock(appscale_utils).should_receive('ssh').with_args(
db_ips[1], keyname, re.compile('^df .*'),
method=subprocess.check_output).\
and_return('headers\ndisk blocks used 3000 etc')
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('.*tar --transform.*'))
cassandra_backup.backup_data(path, keyname)
def test_restore_data(self):
db_ips = ['192.168.33.10', '192.168.33.11']
keyname = 'key1'
path = '~/cassandra_backup.tar'
flexmock(appscale_info).should_receive('get_db_ips').and_return(db_ips)
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, 'ls {}'.format(path),
method=subprocess.call).and_return(0)
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, 'monit summary',
method=subprocess.check_output).and_return('summary output')
status_outputs = (['Not monitored'] * len(db_ips)) +\
(['Running'] * len(db_ips))
flexmock(utils).should_receive('monit_status').\
and_return(*status_outputs).one_by_one()
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^find.* -exec rm .*'))
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^tar xf .*'))
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^appscale-start-service .*'),
subprocess.call)
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^appscale-start-service .*'))
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('^chown -R cassandra /opt/.*'))
flexmock(rebalance).should_receive('get_status').and_return(
[{'state': 'UN'} for _ in db_ips])
flexmock(time).should_receive('sleep')
flexmock(appscale_utils).should_receive('ssh').with_args(
re.compile('^192.*'), keyname, re.compile('.*nodetool status'),
method=subprocess.check_output).\
and_return('UN 192.168.33.10\nUN 192.168.33.11')
cassandra_backup.restore_data(path, keyname)
if __name__ == "__main__":
unittest.main()
| true | true |
f7fcfaa7231e0891f3c44841a657fb8765e8b0e6 | 24,355 | py | Python | tokenio/proto/security_pb2.py | overcat/token-io | f1b14fef9fd64989b9c9e5c076272f72253b39ed | [
"MIT"
] | null | null | null | tokenio/proto/security_pb2.py | overcat/token-io | f1b14fef9fd64989b9c9e5c076272f72253b39ed | [
"MIT"
] | 2 | 2019-02-26T08:15:50.000Z | 2019-08-06T12:58:21.000Z | tokenio/proto/security_pb2.py | overcat/token-io | f1b14fef9fd64989b9c9e5c076272f72253b39ed | [
"MIT"
] | 1 | 2019-08-05T12:36:22.000Z | 2019-08-05T12:36:22.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: security.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tokenio.proto.extensions import field_pb2 as extensions_dot_field__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='security.proto',
package='io.token.proto.common.security',
syntax='proto3',
serialized_options=_b('B\016SecurityProtos\252\002#Tokenio.Proto.Common.SecurityProtos'),
serialized_pb=_b('\n\x0esecurity.proto\x12\x1eio.token.proto.common.security\x1a\x16\x65xtensions/field.proto\"\xc9\x02\n\x03Key\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\npublic_key\x18\x02 \x01(\t\x12\x38\n\x05level\x18\x03 \x01(\x0e\x32).io.token.proto.common.security.Key.Level\x12@\n\talgorithm\x18\x04 \x01(\x0e\x32-.io.token.proto.common.security.Key.Algorithm\x12\x15\n\rexpires_at_ms\x18\x05 \x01(\x03\"L\n\tAlgorithm\x12\x15\n\x11INVALID_ALGORITHM\x10\x00\x12\x0b\n\x07\x45\x44\x32\x35\x35\x31\x39\x10\x01\x12\x10\n\x0c\x45\x43\x44SA_SHA256\x10\x02\x12\t\n\x05RS256\x10\x03\"A\n\x05Level\x12\x11\n\rINVALID_LEVEL\x10\x00\x12\x0e\n\nPRIVILEGED\x10\x01\x12\x0c\n\x08STANDARD\x10\x02\x12\x07\n\x03LOW\x10\x03\"\xaf\x01\n\nPrivateKey\x12\n\n\x02id\x18\x01 \x01(\t\x12\x19\n\x0bprivate_key\x18\x02 \x01(\tB\x04\x80\xb5\x18\x01\x12\x38\n\x05level\x18\x03 \x01(\x0e\x32).io.token.proto.common.security.Key.Level\x12@\n\talgorithm\x18\x04 \x01(\x0e\x32-.io.token.proto.common.security.Key.Algorithm\"A\n\tSignature\x12\x11\n\tmember_id\x18\x01 \x01(\t\x12\x0e\n\x06key_id\x18\x02 \x01(\t\x12\x11\n\tsignature\x18\x03 \x01(\t\"\x94\x04\n\rSealedMessage\x12\x12\n\nciphertext\x18\x01 \x01(\t\x12H\n\x04noop\x18\x04 \x01(\x0b\x32\x38.io.token.proto.common.security.SealedMessage.NoopMethodH\x00\x12\x46\n\x03rsa\x18\x06 \x01(\x0b\x32\x37.io.token.proto.common.security.SealedMessage.RsaMethodH\x00\x12M\n\x07rsa_aes\x18\x07 \x01(\x0b\x32:.io.token.proto.common.security.SealedMessage.RsaAesMethodH\x00\x1a\x0c\n\nNoopMethod\x1a[\n\tRsaMethod\x12\x0e\n\x06key_id\x18\x01 \x01(\t\x12\x11\n\talgorithm\x18\x02 \x01(\t\x12\x11\n\tsignature\x18\x03 \x01(\t\x12\x18\n\x10signature_key_id\x18\x04 \x01(\t\x1a\x98\x01\n\x0cRsaAesMethod\x12\x12\n\nrsa_key_id\x18\x01 \x01(\t\x12\x15\n\rrsa_algorithm\x18\x02 \x01(\t\x12\x15\n\raes_algorithm\x18\x03 \x01(\t\x12\x19\n\x11\x65ncrypted_aes_key\x18\x05 \x01(\t\x12\x11\n\tsignature\x18\x06 \x01(\t\x12\x18\n\x10signature_key_id\x18\x07 \x01(\tB\x08\n\x06method\"X\n\x10SecurityMetadata\x12\x12\n\nip_address\x18\x01 \x01(\t\x12\x14\n\x0cgeo_location\x18\x02 \x01(\t\x12\x1a\n\x12\x64\x65vice_fingerprint\x18\x03 \x01(\tB6B\x0eSecurityProtos\xaa\x02#Tokenio.Proto.Common.SecurityProtosb\x06proto3')
,
dependencies=[extensions_dot_field__pb2.DESCRIPTOR,])
_KEY_ALGORITHM = _descriptor.EnumDescriptor(
name='Algorithm',
full_name='io.token.proto.common.security.Key.Algorithm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID_ALGORITHM', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ED25519', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ECDSA_SHA256', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RS256', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=261,
serialized_end=337,
)
_sym_db.RegisterEnumDescriptor(_KEY_ALGORITHM)
_KEY_LEVEL = _descriptor.EnumDescriptor(
name='Level',
full_name='io.token.proto.common.security.Key.Level',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID_LEVEL', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRIVILEGED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STANDARD', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOW', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=339,
serialized_end=404,
)
_sym_db.RegisterEnumDescriptor(_KEY_LEVEL)
_KEY = _descriptor.Descriptor(
name='Key',
full_name='io.token.proto.common.security.Key',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='io.token.proto.common.security.Key.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='public_key', full_name='io.token.proto.common.security.Key.public_key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='io.token.proto.common.security.Key.level', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='algorithm', full_name='io.token.proto.common.security.Key.algorithm', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expires_at_ms', full_name='io.token.proto.common.security.Key.expires_at_ms', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_KEY_ALGORITHM,
_KEY_LEVEL,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=404,
)
_PRIVATEKEY = _descriptor.Descriptor(
name='PrivateKey',
full_name='io.token.proto.common.security.PrivateKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='io.token.proto.common.security.PrivateKey.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='private_key', full_name='io.token.proto.common.security.PrivateKey.private_key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\200\265\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='io.token.proto.common.security.PrivateKey.level', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='algorithm', full_name='io.token.proto.common.security.PrivateKey.algorithm', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=407,
serialized_end=582,
)
_SIGNATURE = _descriptor.Descriptor(
name='Signature',
full_name='io.token.proto.common.security.Signature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='member_id', full_name='io.token.proto.common.security.Signature.member_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key_id', full_name='io.token.proto.common.security.Signature.key_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='io.token.proto.common.security.Signature.signature', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=584,
serialized_end=649,
)
_SEALEDMESSAGE_NOOPMETHOD = _descriptor.Descriptor(
name='NoopMethod',
full_name='io.token.proto.common.security.SealedMessage.NoopMethod',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=914,
serialized_end=926,
)
_SEALEDMESSAGE_RSAMETHOD = _descriptor.Descriptor(
name='RsaMethod',
full_name='io.token.proto.common.security.SealedMessage.RsaMethod',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key_id', full_name='io.token.proto.common.security.SealedMessage.RsaMethod.key_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='algorithm', full_name='io.token.proto.common.security.SealedMessage.RsaMethod.algorithm', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='io.token.proto.common.security.SealedMessage.RsaMethod.signature', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature_key_id', full_name='io.token.proto.common.security.SealedMessage.RsaMethod.signature_key_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=928,
serialized_end=1019,
)
_SEALEDMESSAGE_RSAAESMETHOD = _descriptor.Descriptor(
name='RsaAesMethod',
full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rsa_key_id', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.rsa_key_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rsa_algorithm', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.rsa_algorithm', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aes_algorithm', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.aes_algorithm', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encrypted_aes_key', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.encrypted_aes_key', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.signature', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature_key_id', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.signature_key_id', index=5,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1022,
serialized_end=1174,
)
_SEALEDMESSAGE = _descriptor.Descriptor(
name='SealedMessage',
full_name='io.token.proto.common.security.SealedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ciphertext', full_name='io.token.proto.common.security.SealedMessage.ciphertext', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='noop', full_name='io.token.proto.common.security.SealedMessage.noop', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rsa', full_name='io.token.proto.common.security.SealedMessage.rsa', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rsa_aes', full_name='io.token.proto.common.security.SealedMessage.rsa_aes', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SEALEDMESSAGE_NOOPMETHOD, _SEALEDMESSAGE_RSAMETHOD, _SEALEDMESSAGE_RSAAESMETHOD, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='method', full_name='io.token.proto.common.security.SealedMessage.method',
index=0, containing_type=None, fields=[]),
],
serialized_start=652,
serialized_end=1184,
)
_SECURITYMETADATA = _descriptor.Descriptor(
name='SecurityMetadata',
full_name='io.token.proto.common.security.SecurityMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ip_address', full_name='io.token.proto.common.security.SecurityMetadata.ip_address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_location', full_name='io.token.proto.common.security.SecurityMetadata.geo_location', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_fingerprint', full_name='io.token.proto.common.security.SecurityMetadata.device_fingerprint', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1186,
serialized_end=1274,
)
_KEY.fields_by_name['level'].enum_type = _KEY_LEVEL
_KEY.fields_by_name['algorithm'].enum_type = _KEY_ALGORITHM
_KEY_ALGORITHM.containing_type = _KEY
_KEY_LEVEL.containing_type = _KEY
_PRIVATEKEY.fields_by_name['level'].enum_type = _KEY_LEVEL
_PRIVATEKEY.fields_by_name['algorithm'].enum_type = _KEY_ALGORITHM
_SEALEDMESSAGE_NOOPMETHOD.containing_type = _SEALEDMESSAGE
_SEALEDMESSAGE_RSAMETHOD.containing_type = _SEALEDMESSAGE
_SEALEDMESSAGE_RSAAESMETHOD.containing_type = _SEALEDMESSAGE
_SEALEDMESSAGE.fields_by_name['noop'].message_type = _SEALEDMESSAGE_NOOPMETHOD
_SEALEDMESSAGE.fields_by_name['rsa'].message_type = _SEALEDMESSAGE_RSAMETHOD
_SEALEDMESSAGE.fields_by_name['rsa_aes'].message_type = _SEALEDMESSAGE_RSAAESMETHOD
_SEALEDMESSAGE.oneofs_by_name['method'].fields.append(
_SEALEDMESSAGE.fields_by_name['noop'])
_SEALEDMESSAGE.fields_by_name['noop'].containing_oneof = _SEALEDMESSAGE.oneofs_by_name['method']
_SEALEDMESSAGE.oneofs_by_name['method'].fields.append(
_SEALEDMESSAGE.fields_by_name['rsa'])
_SEALEDMESSAGE.fields_by_name['rsa'].containing_oneof = _SEALEDMESSAGE.oneofs_by_name['method']
_SEALEDMESSAGE.oneofs_by_name['method'].fields.append(
_SEALEDMESSAGE.fields_by_name['rsa_aes'])
_SEALEDMESSAGE.fields_by_name['rsa_aes'].containing_oneof = _SEALEDMESSAGE.oneofs_by_name['method']
DESCRIPTOR.message_types_by_name['Key'] = _KEY
DESCRIPTOR.message_types_by_name['PrivateKey'] = _PRIVATEKEY
DESCRIPTOR.message_types_by_name['Signature'] = _SIGNATURE
DESCRIPTOR.message_types_by_name['SealedMessage'] = _SEALEDMESSAGE
DESCRIPTOR.message_types_by_name['SecurityMetadata'] = _SECURITYMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Key = _reflection.GeneratedProtocolMessageType('Key', (_message.Message,), dict(
DESCRIPTOR = _KEY,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.Key)
))
_sym_db.RegisterMessage(Key)
PrivateKey = _reflection.GeneratedProtocolMessageType('PrivateKey', (_message.Message,), dict(
DESCRIPTOR = _PRIVATEKEY,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.PrivateKey)
))
_sym_db.RegisterMessage(PrivateKey)
Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict(
DESCRIPTOR = _SIGNATURE,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.Signature)
))
_sym_db.RegisterMessage(Signature)
SealedMessage = _reflection.GeneratedProtocolMessageType('SealedMessage', (_message.Message,), dict(
NoopMethod = _reflection.GeneratedProtocolMessageType('NoopMethod', (_message.Message,), dict(
DESCRIPTOR = _SEALEDMESSAGE_NOOPMETHOD,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SealedMessage.NoopMethod)
))
,
RsaMethod = _reflection.GeneratedProtocolMessageType('RsaMethod', (_message.Message,), dict(
DESCRIPTOR = _SEALEDMESSAGE_RSAMETHOD,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SealedMessage.RsaMethod)
))
,
RsaAesMethod = _reflection.GeneratedProtocolMessageType('RsaAesMethod', (_message.Message,), dict(
DESCRIPTOR = _SEALEDMESSAGE_RSAAESMETHOD,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SealedMessage.RsaAesMethod)
))
,
DESCRIPTOR = _SEALEDMESSAGE,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SealedMessage)
))
_sym_db.RegisterMessage(SealedMessage)
_sym_db.RegisterMessage(SealedMessage.NoopMethod)
_sym_db.RegisterMessage(SealedMessage.RsaMethod)
_sym_db.RegisterMessage(SealedMessage.RsaAesMethod)
SecurityMetadata = _reflection.GeneratedProtocolMessageType('SecurityMetadata', (_message.Message,), dict(
DESCRIPTOR = _SECURITYMETADATA,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SecurityMetadata)
))
_sym_db.RegisterMessage(SecurityMetadata)
DESCRIPTOR._options = None
_PRIVATEKEY.fields_by_name['private_key']._options = None
# @@protoc_insertion_point(module_scope)
| 42.209705 | 2,233 | 0.746377 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from tokenio.proto.extensions import field_pb2 as extensions_dot_field__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='security.proto',
package='io.token.proto.common.security',
syntax='proto3',
serialized_options=_b('B\016SecurityProtos\252\002#Tokenio.Proto.Common.SecurityProtos'),
serialized_pb=_b('\n\x0esecurity.proto\x12\x1eio.token.proto.common.security\x1a\x16\x65xtensions/field.proto\"\xc9\x02\n\x03Key\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\npublic_key\x18\x02 \x01(\t\x12\x38\n\x05level\x18\x03 \x01(\x0e\x32).io.token.proto.common.security.Key.Level\x12@\n\talgorithm\x18\x04 \x01(\x0e\x32-.io.token.proto.common.security.Key.Algorithm\x12\x15\n\rexpires_at_ms\x18\x05 \x01(\x03\"L\n\tAlgorithm\x12\x15\n\x11INVALID_ALGORITHM\x10\x00\x12\x0b\n\x07\x45\x44\x32\x35\x35\x31\x39\x10\x01\x12\x10\n\x0c\x45\x43\x44SA_SHA256\x10\x02\x12\t\n\x05RS256\x10\x03\"A\n\x05Level\x12\x11\n\rINVALID_LEVEL\x10\x00\x12\x0e\n\nPRIVILEGED\x10\x01\x12\x0c\n\x08STANDARD\x10\x02\x12\x07\n\x03LOW\x10\x03\"\xaf\x01\n\nPrivateKey\x12\n\n\x02id\x18\x01 \x01(\t\x12\x19\n\x0bprivate_key\x18\x02 \x01(\tB\x04\x80\xb5\x18\x01\x12\x38\n\x05level\x18\x03 \x01(\x0e\x32).io.token.proto.common.security.Key.Level\x12@\n\talgorithm\x18\x04 \x01(\x0e\x32-.io.token.proto.common.security.Key.Algorithm\"A\n\tSignature\x12\x11\n\tmember_id\x18\x01 \x01(\t\x12\x0e\n\x06key_id\x18\x02 \x01(\t\x12\x11\n\tsignature\x18\x03 \x01(\t\"\x94\x04\n\rSealedMessage\x12\x12\n\nciphertext\x18\x01 \x01(\t\x12H\n\x04noop\x18\x04 \x01(\x0b\x32\x38.io.token.proto.common.security.SealedMessage.NoopMethodH\x00\x12\x46\n\x03rsa\x18\x06 \x01(\x0b\x32\x37.io.token.proto.common.security.SealedMessage.RsaMethodH\x00\x12M\n\x07rsa_aes\x18\x07 \x01(\x0b\x32:.io.token.proto.common.security.SealedMessage.RsaAesMethodH\x00\x1a\x0c\n\nNoopMethod\x1a[\n\tRsaMethod\x12\x0e\n\x06key_id\x18\x01 \x01(\t\x12\x11\n\talgorithm\x18\x02 \x01(\t\x12\x11\n\tsignature\x18\x03 \x01(\t\x12\x18\n\x10signature_key_id\x18\x04 \x01(\t\x1a\x98\x01\n\x0cRsaAesMethod\x12\x12\n\nrsa_key_id\x18\x01 \x01(\t\x12\x15\n\rrsa_algorithm\x18\x02 \x01(\t\x12\x15\n\raes_algorithm\x18\x03 \x01(\t\x12\x19\n\x11\x65ncrypted_aes_key\x18\x05 \x01(\t\x12\x11\n\tsignature\x18\x06 \x01(\t\x12\x18\n\x10signature_key_id\x18\x07 \x01(\tB\x08\n\x06method\"X\n\x10SecurityMetadata\x12\x12\n\nip_address\x18\x01 \x01(\t\x12\x14\n\x0cgeo_location\x18\x02 \x01(\t\x12\x1a\n\x12\x64\x65vice_fingerprint\x18\x03 \x01(\tB6B\x0eSecurityProtos\xaa\x02#Tokenio.Proto.Common.SecurityProtosb\x06proto3')
,
dependencies=[extensions_dot_field__pb2.DESCRIPTOR,])
_KEY_ALGORITHM = _descriptor.EnumDescriptor(
name='Algorithm',
full_name='io.token.proto.common.security.Key.Algorithm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID_ALGORITHM', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ED25519', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ECDSA_SHA256', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RS256', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=261,
serialized_end=337,
)
_sym_db.RegisterEnumDescriptor(_KEY_ALGORITHM)
_KEY_LEVEL = _descriptor.EnumDescriptor(
name='Level',
full_name='io.token.proto.common.security.Key.Level',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID_LEVEL', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRIVILEGED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STANDARD', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOW', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=339,
serialized_end=404,
)
_sym_db.RegisterEnumDescriptor(_KEY_LEVEL)
_KEY = _descriptor.Descriptor(
name='Key',
full_name='io.token.proto.common.security.Key',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='io.token.proto.common.security.Key.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='public_key', full_name='io.token.proto.common.security.Key.public_key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='io.token.proto.common.security.Key.level', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='algorithm', full_name='io.token.proto.common.security.Key.algorithm', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expires_at_ms', full_name='io.token.proto.common.security.Key.expires_at_ms', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_KEY_ALGORITHM,
_KEY_LEVEL,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=404,
)
_PRIVATEKEY = _descriptor.Descriptor(
name='PrivateKey',
full_name='io.token.proto.common.security.PrivateKey',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='io.token.proto.common.security.PrivateKey.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='private_key', full_name='io.token.proto.common.security.PrivateKey.private_key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\200\265\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='io.token.proto.common.security.PrivateKey.level', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='algorithm', full_name='io.token.proto.common.security.PrivateKey.algorithm', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=407,
serialized_end=582,
)
_SIGNATURE = _descriptor.Descriptor(
name='Signature',
full_name='io.token.proto.common.security.Signature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='member_id', full_name='io.token.proto.common.security.Signature.member_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key_id', full_name='io.token.proto.common.security.Signature.key_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='io.token.proto.common.security.Signature.signature', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=584,
serialized_end=649,
)
_SEALEDMESSAGE_NOOPMETHOD = _descriptor.Descriptor(
name='NoopMethod',
full_name='io.token.proto.common.security.SealedMessage.NoopMethod',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=914,
serialized_end=926,
)
_SEALEDMESSAGE_RSAMETHOD = _descriptor.Descriptor(
name='RsaMethod',
full_name='io.token.proto.common.security.SealedMessage.RsaMethod',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key_id', full_name='io.token.proto.common.security.SealedMessage.RsaMethod.key_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='algorithm', full_name='io.token.proto.common.security.SealedMessage.RsaMethod.algorithm', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='io.token.proto.common.security.SealedMessage.RsaMethod.signature', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature_key_id', full_name='io.token.proto.common.security.SealedMessage.RsaMethod.signature_key_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=928,
serialized_end=1019,
)
_SEALEDMESSAGE_RSAAESMETHOD = _descriptor.Descriptor(
name='RsaAesMethod',
full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rsa_key_id', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.rsa_key_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rsa_algorithm', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.rsa_algorithm', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aes_algorithm', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.aes_algorithm', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encrypted_aes_key', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.encrypted_aes_key', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.signature', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature_key_id', full_name='io.token.proto.common.security.SealedMessage.RsaAesMethod.signature_key_id', index=5,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1022,
serialized_end=1174,
)
_SEALEDMESSAGE = _descriptor.Descriptor(
name='SealedMessage',
full_name='io.token.proto.common.security.SealedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ciphertext', full_name='io.token.proto.common.security.SealedMessage.ciphertext', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='noop', full_name='io.token.proto.common.security.SealedMessage.noop', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rsa', full_name='io.token.proto.common.security.SealedMessage.rsa', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rsa_aes', full_name='io.token.proto.common.security.SealedMessage.rsa_aes', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SEALEDMESSAGE_NOOPMETHOD, _SEALEDMESSAGE_RSAMETHOD, _SEALEDMESSAGE_RSAAESMETHOD, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='method', full_name='io.token.proto.common.security.SealedMessage.method',
index=0, containing_type=None, fields=[]),
],
serialized_start=652,
serialized_end=1184,
)
_SECURITYMETADATA = _descriptor.Descriptor(
name='SecurityMetadata',
full_name='io.token.proto.common.security.SecurityMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ip_address', full_name='io.token.proto.common.security.SecurityMetadata.ip_address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_location', full_name='io.token.proto.common.security.SecurityMetadata.geo_location', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_fingerprint', full_name='io.token.proto.common.security.SecurityMetadata.device_fingerprint', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1186,
serialized_end=1274,
)
_KEY.fields_by_name['level'].enum_type = _KEY_LEVEL
_KEY.fields_by_name['algorithm'].enum_type = _KEY_ALGORITHM
_KEY_ALGORITHM.containing_type = _KEY
_KEY_LEVEL.containing_type = _KEY
_PRIVATEKEY.fields_by_name['level'].enum_type = _KEY_LEVEL
_PRIVATEKEY.fields_by_name['algorithm'].enum_type = _KEY_ALGORITHM
_SEALEDMESSAGE_NOOPMETHOD.containing_type = _SEALEDMESSAGE
_SEALEDMESSAGE_RSAMETHOD.containing_type = _SEALEDMESSAGE
_SEALEDMESSAGE_RSAAESMETHOD.containing_type = _SEALEDMESSAGE
_SEALEDMESSAGE.fields_by_name['noop'].message_type = _SEALEDMESSAGE_NOOPMETHOD
_SEALEDMESSAGE.fields_by_name['rsa'].message_type = _SEALEDMESSAGE_RSAMETHOD
_SEALEDMESSAGE.fields_by_name['rsa_aes'].message_type = _SEALEDMESSAGE_RSAAESMETHOD
_SEALEDMESSAGE.oneofs_by_name['method'].fields.append(
_SEALEDMESSAGE.fields_by_name['noop'])
_SEALEDMESSAGE.fields_by_name['noop'].containing_oneof = _SEALEDMESSAGE.oneofs_by_name['method']
_SEALEDMESSAGE.oneofs_by_name['method'].fields.append(
_SEALEDMESSAGE.fields_by_name['rsa'])
_SEALEDMESSAGE.fields_by_name['rsa'].containing_oneof = _SEALEDMESSAGE.oneofs_by_name['method']
_SEALEDMESSAGE.oneofs_by_name['method'].fields.append(
_SEALEDMESSAGE.fields_by_name['rsa_aes'])
_SEALEDMESSAGE.fields_by_name['rsa_aes'].containing_oneof = _SEALEDMESSAGE.oneofs_by_name['method']
DESCRIPTOR.message_types_by_name['Key'] = _KEY
DESCRIPTOR.message_types_by_name['PrivateKey'] = _PRIVATEKEY
DESCRIPTOR.message_types_by_name['Signature'] = _SIGNATURE
DESCRIPTOR.message_types_by_name['SealedMessage'] = _SEALEDMESSAGE
DESCRIPTOR.message_types_by_name['SecurityMetadata'] = _SECURITYMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Key = _reflection.GeneratedProtocolMessageType('Key', (_message.Message,), dict(
DESCRIPTOR = _KEY,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.Key)
))
_sym_db.RegisterMessage(Key)
PrivateKey = _reflection.GeneratedProtocolMessageType('PrivateKey', (_message.Message,), dict(
DESCRIPTOR = _PRIVATEKEY,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.PrivateKey)
))
_sym_db.RegisterMessage(PrivateKey)
Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict(
DESCRIPTOR = _SIGNATURE,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.Signature)
))
_sym_db.RegisterMessage(Signature)
SealedMessage = _reflection.GeneratedProtocolMessageType('SealedMessage', (_message.Message,), dict(
NoopMethod = _reflection.GeneratedProtocolMessageType('NoopMethod', (_message.Message,), dict(
DESCRIPTOR = _SEALEDMESSAGE_NOOPMETHOD,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SealedMessage.NoopMethod)
))
,
RsaMethod = _reflection.GeneratedProtocolMessageType('RsaMethod', (_message.Message,), dict(
DESCRIPTOR = _SEALEDMESSAGE_RSAMETHOD,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SealedMessage.RsaMethod)
))
,
RsaAesMethod = _reflection.GeneratedProtocolMessageType('RsaAesMethod', (_message.Message,), dict(
DESCRIPTOR = _SEALEDMESSAGE_RSAAESMETHOD,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SealedMessage.RsaAesMethod)
))
,
DESCRIPTOR = _SEALEDMESSAGE,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SealedMessage)
))
_sym_db.RegisterMessage(SealedMessage)
_sym_db.RegisterMessage(SealedMessage.NoopMethod)
_sym_db.RegisterMessage(SealedMessage.RsaMethod)
_sym_db.RegisterMessage(SealedMessage.RsaAesMethod)
SecurityMetadata = _reflection.GeneratedProtocolMessageType('SecurityMetadata', (_message.Message,), dict(
DESCRIPTOR = _SECURITYMETADATA,
__module__ = 'security_pb2'
# @@protoc_insertion_point(class_scope:io.token.proto.common.security.SecurityMetadata)
))
_sym_db.RegisterMessage(SecurityMetadata)
DESCRIPTOR._options = None
_PRIVATEKEY.fields_by_name['private_key']._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f7fcfbcd61db1c5d878dc58959a5ca91df076e2e | 5,327 | py | Python | docs/source/conf.py | pierfra-ro/allesfitter | a6a885aaeb3253fec0d924ef3b45e8b7c473b181 | [
"MIT"
] | null | null | null | docs/source/conf.py | pierfra-ro/allesfitter | a6a885aaeb3253fec0d924ef3b45e8b7c473b181 | [
"MIT"
] | null | null | null | docs/source/conf.py | pierfra-ro/allesfitter | a6a885aaeb3253fec0d924ef3b45e8b7c473b181 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'allesfitter'
copyright = '2019, Maximilian N. Guenther, Tansu Daylan'
author = 'Maximilian N. Guenther, Tansu Daylan'
# The short X.Y version
version = '1.1.2'
# The full version, including alpha/beta/rc tags
release = '1.1.2'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'allesfitterdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'allesfitter.tex', 'allesfitter Documentation',
'Maximilian N. Guenther, Tansu Daylan', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'allesfitter', 'allesfitter Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'allesfitter', 'allesfitter Documentation',
author, 'allesfitter', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 29.926966 | 79 | 0.650648 |
project = 'allesfitter'
copyright = '2019, Maximilian N. Guenther, Tansu Daylan'
author = 'Maximilian N. Guenther, Tansu Daylan'
version = '1.1.2'
release = '1.1.2'
extensions = [
'sphinx.ext.autodoc',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = None
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'allesfitterdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'allesfitter.tex', 'allesfitter Documentation',
'Maximilian N. Guenther, Tansu Daylan', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'allesfitter', 'allesfitter Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'allesfitter', 'allesfitter Documentation',
author, 'allesfitter', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| true | true |
f7fcfcc1fae130bab9a576a40d64638e126925d4 | 16,547 | py | Python | eth/db/chain.py | mneedham91/py-evm | 512b6809ce9c33ebed088702e6e22128bf6183af | [
"MIT"
] | null | null | null | eth/db/chain.py | mneedham91/py-evm | 512b6809ce9c33ebed088702e6e22128bf6183af | [
"MIT"
] | null | null | null | eth/db/chain.py | mneedham91/py-evm | 512b6809ce9c33ebed088702e6e22128bf6183af | [
"MIT"
] | null | null | null | import functools
import itertools
from abc import (
abstractmethod
)
from typing import (
Dict,
Iterable,
List,
Tuple,
Type,
TYPE_CHECKING,
)
import rlp
from trie import (
HexaryTrie,
)
from eth_typing import (
BlockNumber,
Hash32
)
from eth_utils import (
to_list,
to_tuple,
)
from eth_hash.auto import keccak
from eth.constants import (
EMPTY_UNCLE_HASH,
GENESIS_PARENT_HASH,
)
from eth.exceptions import (
CanonicalHeadNotFound,
HeaderNotFound,
ParentNotFound,
TransactionNotFound,
ValidationError,
)
from eth.db.header import BaseHeaderDB, HeaderDB
from eth.db.backends.base import (
BaseDB
)
from eth.db.schema import SchemaV1
from eth.rlp.headers import (
BlockHeader,
)
from eth.rlp.receipts import (
Receipt
)
from eth.utils.hexadecimal import (
encode_hex,
)
from eth.validation import (
validate_word,
)
if TYPE_CHECKING:
from eth.rlp.blocks import ( # noqa: F401
BaseBlock
)
from eth.rlp.transactions import ( # noqa: F401
BaseTransaction
)
class TransactionKey(rlp.Serializable):
fields = [
('block_number', rlp.sedes.big_endian_int),
('index', rlp.sedes.big_endian_int),
]
class BaseChainDB(BaseHeaderDB):
db = None # type: BaseDB
@abstractmethod
def __init__(self, db: BaseDB) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Header API
#
@abstractmethod
def get_block_uncles(self, uncles_hash: Hash32) -> List[BlockHeader]:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Block API
#
@abstractmethod
def persist_block(self, block: 'BaseBlock') -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_uncles(self, uncles: Tuple[BlockHeader]) -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Transaction API
#
@abstractmethod
def add_receipt(self,
block_header: BlockHeader,
index_key: int, receipt: Receipt) -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def add_transaction(self,
block_header: BlockHeader,
index_key: int, transaction: 'BaseTransaction') -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_transactions(
self,
block_header: BlockHeader,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_transaction_hashes(self, block_header: BlockHeader) -> Iterable[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_receipts(self,
header: BlockHeader,
receipt_class: Type[Receipt]) -> Iterable[Receipt]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_by_index(
self,
block_number: BlockNumber,
transaction_index: int,
transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[BlockNumber, int]:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Raw Database API
#
@abstractmethod
def exists(self, key: bytes) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get(self, key: bytes) -> bytes:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_trie_data_dict(self, trie_data_dict: Dict[bytes, bytes]) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
class ChainDB(HeaderDB, BaseChainDB):
def __init__(self, db: BaseDB) -> None:
self.db = db
#
# Header API
#
def get_block_uncles(self, uncles_hash: Hash32) -> List[BlockHeader]:
"""
Returns an iterable of uncle headers specified by the given uncles_hash
"""
validate_word(uncles_hash, title="Uncles Hash")
if uncles_hash == EMPTY_UNCLE_HASH:
return []
try:
encoded_uncles = self.db[uncles_hash]
except KeyError:
raise HeaderNotFound(
"No uncles found for hash {0}".format(uncles_hash)
)
else:
return rlp.decode(encoded_uncles, sedes=rlp.sedes.CountableList(BlockHeader))
# TODO: This method should take a chain of headers as that's the most common use case
# and it'd be much faster than inserting each header individually.
def persist_header(self, header: BlockHeader) -> Tuple[BlockHeader, ...]:
"""
Returns iterable of headers newly on the canonical chain
"""
is_genesis = header.parent_hash == GENESIS_PARENT_HASH
if not is_genesis and not self.header_exists(header.parent_hash):
raise ParentNotFound(
"Cannot persist block header ({}) with unknown parent ({})".format(
encode_hex(header.hash), encode_hex(header.parent_hash)))
self.db.set(
header.hash,
rlp.encode(header),
)
if is_genesis:
score = header.difficulty
else:
score = self.get_score(header.parent_hash) + header.difficulty
self.db.set(
SchemaV1.make_block_hash_to_score_lookup_key(header.hash),
rlp.encode(score, sedes=rlp.sedes.big_endian_int),
)
try:
head_score = self.get_score(self.get_canonical_head().hash)
except CanonicalHeadNotFound:
new_headers = self._set_as_canonical_chain_head(header)
else:
if score > head_score:
new_headers = self._set_as_canonical_chain_head(header)
else:
new_headers = tuple()
return new_headers
# TODO: update this to take a `hash` rather than a full header object.
def _set_as_canonical_chain_head(self, header: BlockHeader) -> Tuple[BlockHeader, ...]:
"""
Returns iterable of headers newly on the canonical head
"""
try:
self.get_block_header_by_hash(header.hash)
except HeaderNotFound:
raise ValueError("Cannot use unknown block hash as canonical head: {}".format(
header.hash))
new_canonical_headers = tuple(reversed(self._find_new_ancestors(header)))
# remove transaction lookups for blocks that are no longer canonical
for h in new_canonical_headers:
try:
old_hash = self.get_canonical_block_hash(h.block_number)
except HeaderNotFound:
# no old block, and no more possible
break
else:
old_header = self.get_block_header_by_hash(old_hash)
for transaction_hash in self.get_block_transaction_hashes(old_header):
self._remove_transaction_from_canonical_chain(transaction_hash)
# TODO re-add txn to internal pending pool (only if local sender)
pass
for h in new_canonical_headers:
self._add_block_number_to_hash_lookup(h)
self.db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash)
return new_canonical_headers
#
# Block API
#
def persist_block(self, block: 'BaseBlock') -> None:
'''
Persist the given block's header and uncles.
Assumes all block transactions have been persisted already.
'''
new_canonical_headers = self.persist_header(block.header)
for header in new_canonical_headers:
for index, transaction_hash in enumerate(self.get_block_transaction_hashes(header)):
self._add_transaction_to_canonical_chain(transaction_hash, header, index)
if block.uncles:
uncles_hash = self.persist_uncles(block.uncles)
else:
uncles_hash = EMPTY_UNCLE_HASH
if uncles_hash != block.header.uncles_hash:
raise ValidationError(
"Block's uncles_hash (%s) does not match actual uncles' hash (%s)",
block.header.uncles_hash, uncles_hash)
def persist_uncles(self, uncles: Tuple[BlockHeader]) -> Hash32:
"""
Persists the list of uncles to the database.
Returns the uncles hash.
"""
uncles_hash = keccak(rlp.encode(uncles))
self.db.set(
uncles_hash,
rlp.encode(uncles, sedes=rlp.sedes.CountableList(BlockHeader)))
return uncles_hash
#
# Transaction API
#
def add_receipt(self, block_header: BlockHeader, index_key: int, receipt: Receipt) -> Hash32:
"""
Adds the given receipt to the provide block header.
Returns the updated `receipts_root` for updated block header.
"""
receipt_db = HexaryTrie(db=self.db, root_hash=block_header.receipt_root)
receipt_db[index_key] = rlp.encode(receipt)
return receipt_db.root_hash
def add_transaction(self,
block_header: BlockHeader,
index_key: int,
transaction: 'BaseTransaction') -> Hash32:
"""
Adds the given transaction to the provide block header.
Returns the updated `transactions_root` for updated block header.
"""
transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
transaction_db[index_key] = rlp.encode(transaction)
return transaction_db.root_hash
def get_block_transactions(
self,
header: BlockHeader,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
"""
Returns an iterable of transactions for the block speficied by the
given block header.
"""
return self._get_block_transactions(header.transaction_root, transaction_class)
@to_list
def get_block_transaction_hashes(self, block_header: BlockHeader) -> Iterable[Hash32]:
"""
Returns an iterable of the transaction hashes from th block specified
by the given block header.
"""
all_encoded_transactions = self._get_block_transaction_data(
block_header.transaction_root,
)
for encoded_transaction in all_encoded_transactions:
yield keccak(encoded_transaction)
@to_tuple
def get_receipts(self,
header: BlockHeader,
receipt_class: Type[Receipt]) -> Iterable[Receipt]:
"""
Returns an iterable of receipts for the block specified by the given
block header.
"""
receipt_db = HexaryTrie(db=self.db, root_hash=header.receipt_root)
for receipt_idx in itertools.count():
receipt_key = rlp.encode(receipt_idx)
if receipt_key in receipt_db:
receipt_data = receipt_db[receipt_key]
yield rlp.decode(receipt_data, sedes=receipt_class)
else:
break
def get_transaction_by_index(
self,
block_number: BlockNumber,
transaction_index: int,
transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
"""
Returns the transaction at the specified `transaction_index` from the
block specified by `block_number` from the canonical chain.
Raises TransactionNotFound if no block
"""
try:
block_header = self.get_canonical_block_header_by_number(block_number)
except HeaderNotFound:
raise TransactionNotFound("Block {} is not in the canonical chain".format(block_number))
transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
encoded_index = rlp.encode(transaction_index)
if encoded_index in transaction_db:
encoded_transaction = transaction_db[encoded_index]
return rlp.decode(encoded_transaction, sedes=transaction_class)
else:
raise TransactionNotFound(
"No transaction is at index {} of block {}".format(transaction_index, block_number))
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[BlockNumber, int]:
"""
Returns a 2-tuple of (block_number, transaction_index) indicating which
block the given transaction can be found in and at what index in the
block transactions.
Raises TransactionNotFound if the transaction_hash is not found in the
canonical chain.
"""
key = SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash)
try:
encoded_key = self.db[key]
except KeyError:
raise TransactionNotFound(
"Transaction {} not found in canonical chain".format(encode_hex(transaction_hash)))
transaction_key = rlp.decode(encoded_key, sedes=TransactionKey)
return (transaction_key.block_number, transaction_key.index)
def _get_block_transaction_data(self, transaction_root: Hash32) -> Iterable[Hash32]:
'''
Returns iterable of the encoded transactions for the given block header
'''
transaction_db = HexaryTrie(self.db, root_hash=transaction_root)
for transaction_idx in itertools.count():
transaction_key = rlp.encode(transaction_idx)
if transaction_key in transaction_db:
yield transaction_db[transaction_key]
else:
break
@functools.lru_cache(maxsize=32)
@to_list
def _get_block_transactions(
self,
transaction_root: Hash32,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
"""
Memoizable version of `get_block_transactions`
"""
for encoded_transaction in self._get_block_transaction_data(transaction_root):
yield rlp.decode(encoded_transaction, sedes=transaction_class)
def _remove_transaction_from_canonical_chain(self, transaction_hash: Hash32) -> None:
"""
Removes the transaction specified by the given hash from the canonical
chain.
"""
self.db.delete(SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash))
def _add_transaction_to_canonical_chain(self,
transaction_hash: Hash32,
block_header: BlockHeader,
index: int) -> None:
"""
:param bytes transaction_hash: the hash of the transaction to add the lookup for
:param block_header: The header of the block with the txn that is in the canonical chain
:param int index: the position of the transaction in the block
- add lookup from transaction hash to the block number and index that the body is stored at
- remove transaction hash to body lookup in the pending pool
"""
transaction_key = TransactionKey(block_header.block_number, index)
self.db.set(
SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash),
rlp.encode(transaction_key),
)
#
# Raw Database API
#
def exists(self, key: bytes) -> bool:
"""
Returns True if the given key exists in the database.
"""
return self.db.exists(key)
def get(self, key: bytes) -> bytes:
"""
Return the value for the given key or a KeyError if it doesn't exist in the database.
"""
return self.db[key]
def persist_trie_data_dict(self, trie_data_dict: Dict[bytes, bytes]) -> None:
"""
Store raw trie data to db from a dict
"""
for key, value in trie_data_dict.items():
self.db[key] = value
| 34.983087 | 100 | 0.642533 | import functools
import itertools
from abc import (
abstractmethod
)
from typing import (
Dict,
Iterable,
List,
Tuple,
Type,
TYPE_CHECKING,
)
import rlp
from trie import (
HexaryTrie,
)
from eth_typing import (
BlockNumber,
Hash32
)
from eth_utils import (
to_list,
to_tuple,
)
from eth_hash.auto import keccak
from eth.constants import (
EMPTY_UNCLE_HASH,
GENESIS_PARENT_HASH,
)
from eth.exceptions import (
CanonicalHeadNotFound,
HeaderNotFound,
ParentNotFound,
TransactionNotFound,
ValidationError,
)
from eth.db.header import BaseHeaderDB, HeaderDB
from eth.db.backends.base import (
BaseDB
)
from eth.db.schema import SchemaV1
from eth.rlp.headers import (
BlockHeader,
)
from eth.rlp.receipts import (
Receipt
)
from eth.utils.hexadecimal import (
encode_hex,
)
from eth.validation import (
validate_word,
)
if TYPE_CHECKING:
from eth.rlp.blocks import (
BaseBlock
)
from eth.rlp.transactions import (
BaseTransaction
)
class TransactionKey(rlp.Serializable):
fields = [
('block_number', rlp.sedes.big_endian_int),
('index', rlp.sedes.big_endian_int),
]
class BaseChainDB(BaseHeaderDB):
db = None
@abstractmethod
def __init__(self, db: BaseDB) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_uncles(self, uncles_hash: Hash32) -> List[BlockHeader]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_block(self, block: 'BaseBlock') -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_uncles(self, uncles: Tuple[BlockHeader]) -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def add_receipt(self,
block_header: BlockHeader,
index_key: int, receipt: Receipt) -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def add_transaction(self,
block_header: BlockHeader,
index_key: int, transaction: 'BaseTransaction') -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_transactions(
self,
block_header: BlockHeader,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_transaction_hashes(self, block_header: BlockHeader) -> Iterable[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_receipts(self,
header: BlockHeader,
receipt_class: Type[Receipt]) -> Iterable[Receipt]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_by_index(
self,
block_number: BlockNumber,
transaction_index: int,
transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[BlockNumber, int]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def exists(self, key: bytes) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get(self, key: bytes) -> bytes:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_trie_data_dict(self, trie_data_dict: Dict[bytes, bytes]) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
class ChainDB(HeaderDB, BaseChainDB):
def __init__(self, db: BaseDB) -> None:
self.db = db
def get_block_uncles(self, uncles_hash: Hash32) -> List[BlockHeader]:
validate_word(uncles_hash, title="Uncles Hash")
if uncles_hash == EMPTY_UNCLE_HASH:
return []
try:
encoded_uncles = self.db[uncles_hash]
except KeyError:
raise HeaderNotFound(
"No uncles found for hash {0}".format(uncles_hash)
)
else:
return rlp.decode(encoded_uncles, sedes=rlp.sedes.CountableList(BlockHeader))
# and it'd be much faster than inserting each header individually.
def persist_header(self, header: BlockHeader) -> Tuple[BlockHeader, ...]:
is_genesis = header.parent_hash == GENESIS_PARENT_HASH
if not is_genesis and not self.header_exists(header.parent_hash):
raise ParentNotFound(
"Cannot persist block header ({}) with unknown parent ({})".format(
encode_hex(header.hash), encode_hex(header.parent_hash)))
self.db.set(
header.hash,
rlp.encode(header),
)
if is_genesis:
score = header.difficulty
else:
score = self.get_score(header.parent_hash) + header.difficulty
self.db.set(
SchemaV1.make_block_hash_to_score_lookup_key(header.hash),
rlp.encode(score, sedes=rlp.sedes.big_endian_int),
)
try:
head_score = self.get_score(self.get_canonical_head().hash)
except CanonicalHeadNotFound:
new_headers = self._set_as_canonical_chain_head(header)
else:
if score > head_score:
new_headers = self._set_as_canonical_chain_head(header)
else:
new_headers = tuple()
return new_headers
def _set_as_canonical_chain_head(self, header: BlockHeader) -> Tuple[BlockHeader, ...]:
try:
self.get_block_header_by_hash(header.hash)
except HeaderNotFound:
raise ValueError("Cannot use unknown block hash as canonical head: {}".format(
header.hash))
new_canonical_headers = tuple(reversed(self._find_new_ancestors(header)))
for h in new_canonical_headers:
try:
old_hash = self.get_canonical_block_hash(h.block_number)
except HeaderNotFound:
break
else:
old_header = self.get_block_header_by_hash(old_hash)
for transaction_hash in self.get_block_transaction_hashes(old_header):
self._remove_transaction_from_canonical_chain(transaction_hash)
pass
for h in new_canonical_headers:
self._add_block_number_to_hash_lookup(h)
self.db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash)
return new_canonical_headers
def persist_block(self, block: 'BaseBlock') -> None:
new_canonical_headers = self.persist_header(block.header)
for header in new_canonical_headers:
for index, transaction_hash in enumerate(self.get_block_transaction_hashes(header)):
self._add_transaction_to_canonical_chain(transaction_hash, header, index)
if block.uncles:
uncles_hash = self.persist_uncles(block.uncles)
else:
uncles_hash = EMPTY_UNCLE_HASH
if uncles_hash != block.header.uncles_hash:
raise ValidationError(
"Block's uncles_hash (%s) does not match actual uncles' hash (%s)",
block.header.uncles_hash, uncles_hash)
def persist_uncles(self, uncles: Tuple[BlockHeader]) -> Hash32:
uncles_hash = keccak(rlp.encode(uncles))
self.db.set(
uncles_hash,
rlp.encode(uncles, sedes=rlp.sedes.CountableList(BlockHeader)))
return uncles_hash
def add_receipt(self, block_header: BlockHeader, index_key: int, receipt: Receipt) -> Hash32:
receipt_db = HexaryTrie(db=self.db, root_hash=block_header.receipt_root)
receipt_db[index_key] = rlp.encode(receipt)
return receipt_db.root_hash
def add_transaction(self,
block_header: BlockHeader,
index_key: int,
transaction: 'BaseTransaction') -> Hash32:
transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
transaction_db[index_key] = rlp.encode(transaction)
return transaction_db.root_hash
def get_block_transactions(
self,
header: BlockHeader,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
return self._get_block_transactions(header.transaction_root, transaction_class)
@to_list
def get_block_transaction_hashes(self, block_header: BlockHeader) -> Iterable[Hash32]:
all_encoded_transactions = self._get_block_transaction_data(
block_header.transaction_root,
)
for encoded_transaction in all_encoded_transactions:
yield keccak(encoded_transaction)
@to_tuple
def get_receipts(self,
header: BlockHeader,
receipt_class: Type[Receipt]) -> Iterable[Receipt]:
receipt_db = HexaryTrie(db=self.db, root_hash=header.receipt_root)
for receipt_idx in itertools.count():
receipt_key = rlp.encode(receipt_idx)
if receipt_key in receipt_db:
receipt_data = receipt_db[receipt_key]
yield rlp.decode(receipt_data, sedes=receipt_class)
else:
break
def get_transaction_by_index(
self,
block_number: BlockNumber,
transaction_index: int,
transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
try:
block_header = self.get_canonical_block_header_by_number(block_number)
except HeaderNotFound:
raise TransactionNotFound("Block {} is not in the canonical chain".format(block_number))
transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
encoded_index = rlp.encode(transaction_index)
if encoded_index in transaction_db:
encoded_transaction = transaction_db[encoded_index]
return rlp.decode(encoded_transaction, sedes=transaction_class)
else:
raise TransactionNotFound(
"No transaction is at index {} of block {}".format(transaction_index, block_number))
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[BlockNumber, int]:
key = SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash)
try:
encoded_key = self.db[key]
except KeyError:
raise TransactionNotFound(
"Transaction {} not found in canonical chain".format(encode_hex(transaction_hash)))
transaction_key = rlp.decode(encoded_key, sedes=TransactionKey)
return (transaction_key.block_number, transaction_key.index)
def _get_block_transaction_data(self, transaction_root: Hash32) -> Iterable[Hash32]:
transaction_db = HexaryTrie(self.db, root_hash=transaction_root)
for transaction_idx in itertools.count():
transaction_key = rlp.encode(transaction_idx)
if transaction_key in transaction_db:
yield transaction_db[transaction_key]
else:
break
@functools.lru_cache(maxsize=32)
@to_list
def _get_block_transactions(
self,
transaction_root: Hash32,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
for encoded_transaction in self._get_block_transaction_data(transaction_root):
yield rlp.decode(encoded_transaction, sedes=transaction_class)
def _remove_transaction_from_canonical_chain(self, transaction_hash: Hash32) -> None:
self.db.delete(SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash))
def _add_transaction_to_canonical_chain(self,
transaction_hash: Hash32,
block_header: BlockHeader,
index: int) -> None:
transaction_key = TransactionKey(block_header.block_number, index)
self.db.set(
SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash),
rlp.encode(transaction_key),
)
def exists(self, key: bytes) -> bool:
return self.db.exists(key)
def get(self, key: bytes) -> bytes:
return self.db[key]
def persist_trie_data_dict(self, trie_data_dict: Dict[bytes, bytes]) -> None:
for key, value in trie_data_dict.items():
self.db[key] = value
| true | true |
f7fcfe5e24cdaa982f988067061dd23deefbc33b | 3,092 | py | Python | tests/test_tutorial/test_query_params_str_validations/test_tutorial012.py | jfunez/fastapi | 7372f6ba11abb515a7f11814dba52a1d1c0925f0 | [
"MIT"
] | 2 | 2020-04-09T07:11:28.000Z | 2020-12-12T14:04:35.000Z | tests/test_tutorial/test_query_params_str_validations/test_tutorial012.py | jfunez/fastapi | 7372f6ba11abb515a7f11814dba52a1d1c0925f0 | [
"MIT"
] | 1 | 2021-03-27T18:37:32.000Z | 2021-05-25T15:08:24.000Z | tests/test_tutorial/test_query_params_str_validations/test_tutorial012.py | jfunez/fastapi | 7372f6ba11abb515a7f11814dba52a1d1c0925f0 | [
"MIT"
] | 1 | 2021-02-03T00:43:04.000Z | 2021-02-03T00:43:04.000Z | from fastapi.testclient import TestClient
from query_params_str_validations.tutorial012 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {
"title": "Q",
"type": "array",
"items": {"type": "string"},
"default": ["foo", "bar"],
},
"name": "q",
"in": "query",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == openapi_schema
def test_default_query_values():
url = "/items/"
response = client.get(url)
assert response.status_code == 200
assert response.json() == {"q": ["foo", "bar"]}
def test_multi_query_values():
url = "/items/?q=baz&q=foobar"
response = client.get(url)
assert response.status_code == 200
assert response.json() == {"q": ["baz", "foobar"]}
| 31.876289 | 86 | 0.362225 | from fastapi.testclient import TestClient
from query_params_str_validations.tutorial012 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {
"title": "Q",
"type": "array",
"items": {"type": "string"},
"default": ["foo", "bar"],
},
"name": "q",
"in": "query",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == openapi_schema
def test_default_query_values():
url = "/items/"
response = client.get(url)
assert response.status_code == 200
assert response.json() == {"q": ["foo", "bar"]}
def test_multi_query_values():
url = "/items/?q=baz&q=foobar"
response = client.get(url)
assert response.status_code == 200
assert response.json() == {"q": ["baz", "foobar"]}
| true | true |
f7fcff5ac53a3ca1369e4f24a20c47901f44dd52 | 2,473 | py | Python | homework/Testing with Examples (Data Format)/impl-name05.py | rvprasad/software-testing-course | 3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0 | [
"CC-BY-4.0"
] | 11 | 2018-02-08T05:23:28.000Z | 2021-05-24T13:23:56.000Z | homework/Testing with Examples (Data Format)/impl-name05.py | rvprasad/software-testing-course | 3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0 | [
"CC-BY-4.0"
] | null | null | null | homework/Testing with Examples (Data Format)/impl-name05.py | rvprasad/software-testing-course | 3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0 | [
"CC-BY-4.0"
] | 2 | 2020-09-15T08:51:22.000Z | 2021-01-26T12:07:18.000Z | import re
class PhysicalInfo(object):
def set_date(self, date):
if not isinstance(date, str):
raise ValueError("date should be a string")
t = date.split("-")
if len(t) != 3:
raise ValueError("date should be in MM-DD-YYYY format")
if re.search(r'[^0-9\-]', date):
raise ValueError("date should contain only numbers and -")
year = int(t[2])
if year < 1900 or year > 2100:
raise ValueError("invalid year {0}".format(year))
is_leap = year % 4 == 0 and (year % 400 == 0 or year % 100 != 0)
month = int(t[0])
if month < 1 or month > 12:
raise ValueError("invalid month {0}".format(month))
day_limit = 31
if month in [4, 6, 7, 9, 11]:
day_limit = 30
elif month == 2:
if is_leap:
day_limit = 29
else:
day_limit = 28
day = int(t[1])
if day < 1 or day > day_limit:
raise ValueError("invalid day {0}".format(day))
self.date = date
def set_name(self, name):
if not isinstance(name, str):
raise ValueError("name should be a string")
tmp1 = name.lower()
if re.search(r'[^a-z0-9 -]', tmp1):
raise ValueError("name should contain letters, numbers, -, and space")
if len(tmp1.replace("-", '')) < 2:
raise ValueError("name should be at least two characters long")
if not re.search(r'[a-z]', tmp1):
raise ValueError("name should contain at least one character")
self.name = name
def set_gender(self, gender):
if gender != 'M' and gender != 'F':
raise ValueError("gender should be either M or F")
self.gender = gender
def set_height(self, height):
if not isinstance(height, int):
raise ValueError("height should be an integer")
if height < 17 or height > 84:
raise ValueError("height should be an integer between 17 and 84")
self.height = height
def set_temperature(self, temperature):
if not isinstance(temperature, float):
raise ValueError("temperature should be a float")
if temperature < 95 or temperature > 104:
raise ValueError("temperature should be a float between 95 and 104")
self.temperature = temperature
| 36.910448 | 82 | 0.545896 | import re
class PhysicalInfo(object):
def set_date(self, date):
if not isinstance(date, str):
raise ValueError("date should be a string")
t = date.split("-")
if len(t) != 3:
raise ValueError("date should be in MM-DD-YYYY format")
if re.search(r'[^0-9\-]', date):
raise ValueError("date should contain only numbers and -")
year = int(t[2])
if year < 1900 or year > 2100:
raise ValueError("invalid year {0}".format(year))
is_leap = year % 4 == 0 and (year % 400 == 0 or year % 100 != 0)
month = int(t[0])
if month < 1 or month > 12:
raise ValueError("invalid month {0}".format(month))
day_limit = 31
if month in [4, 6, 7, 9, 11]:
day_limit = 30
elif month == 2:
if is_leap:
day_limit = 29
else:
day_limit = 28
day = int(t[1])
if day < 1 or day > day_limit:
raise ValueError("invalid day {0}".format(day))
self.date = date
def set_name(self, name):
if not isinstance(name, str):
raise ValueError("name should be a string")
tmp1 = name.lower()
if re.search(r'[^a-z0-9 -]', tmp1):
raise ValueError("name should contain letters, numbers, -, and space")
if len(tmp1.replace("-", '')) < 2:
raise ValueError("name should be at least two characters long")
if not re.search(r'[a-z]', tmp1):
raise ValueError("name should contain at least one character")
self.name = name
def set_gender(self, gender):
if gender != 'M' and gender != 'F':
raise ValueError("gender should be either M or F")
self.gender = gender
def set_height(self, height):
if not isinstance(height, int):
raise ValueError("height should be an integer")
if height < 17 or height > 84:
raise ValueError("height should be an integer between 17 and 84")
self.height = height
def set_temperature(self, temperature):
if not isinstance(temperature, float):
raise ValueError("temperature should be a float")
if temperature < 95 or temperature > 104:
raise ValueError("temperature should be a float between 95 and 104")
self.temperature = temperature
| true | true |
f7fd00376c4dd032a2ad19860a436906fd08f432 | 3,804 | py | Python | frcnn/viewer.py | skmatz/frcnn | eae9d42f964a5883f72dc294984c019b3c75e837 | [
"MIT"
] | null | null | null | frcnn/viewer.py | skmatz/frcnn | eae9d42f964a5883f72dc294984c019b3c75e837 | [
"MIT"
] | null | null | null | frcnn/viewer.py | skmatz/frcnn | eae9d42f964a5883f72dc294984c019b3c75e837 | [
"MIT"
] | null | null | null | """Module for (demo) viewer."""
import os
from dataclasses import dataclass
from glob import glob
from logging import getLogger
from os.path import basename, join
from typing import List, Optional, Tuple
import cv2
import numpy as np
import seaborn as sns
import torch
import torch.cuda
import torchvision
from hydra.utils import to_absolute_path
from frcnn.labels import COCO91
from frcnn.models import FasterRCNN, fasterrcnn_resnet50_fpn
__all__ = ["ImageViewer"]
logger = getLogger(__name__)
ColorType = Tuple[int, int, int]
@dataclass
class BasicConfig:
gpu: bool
conf: float
display: bool
weights: Optional[str]
@dataclass
class ImageConfig:
root: str
outputs: str
@dataclass
class Config:
basic: BasicConfig
image: ImageConfig
@dataclass
class FasterRCNNOutput:
boxes: torch.Tensor
labels: torch.Tensor
scores: torch.Tensor
class ImageViewer:
COLORS: List[ColorType] = [
tuple(int(c * 255) for c in color) for color in sns.color_palette(n_colors=len(COCO91)) # type: ignore
]
def __init__(self, cfg: Config):
self._cfg = cfg
self._model = self._load_model(cfg.basic.weights)
self._paths = sorted(glob(join(to_absolute_path(cfg.image.root), "*")))
self._device = "cuda" if cfg.basic.gpu and torch.cuda.is_available() else "cpu"
os.makedirs(cfg.image.outputs, exist_ok=True)
@torch.no_grad()
def run(self):
self._model = self._model.to(self._device).eval()
for i, path in enumerate(self._paths):
image_bgr: np.ndarray = cv2.imread(path)
image_rgb: np.ndarray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image_tensor: torch.Tensor = torchvision.transforms.functional.to_tensor(image_rgb).to(self._device)
# only the first element because input only one image
output = FasterRCNNOutput(**self._model([image_tensor])[0])
boxes = output.boxes.cpu().numpy()
labels = output.labels.cpu().numpy()
scores = output.scores.cpu().numpy()
logger.debug(
f"[{i + 1}/{len(self._paths)}] Detect {len([s for s in scores if s >= self._cfg.basic.conf]):2d} "
+ f"objects in {path}",
)
image_bgr = self._draw_results(image_bgr, boxes, labels, scores)
if self._cfg.basic.display:
cv2.imshow("", image_bgr)
cv2.waitKey(1)
cv2.imwrite(join(self._cfg.image.outputs, basename(path)), image_bgr)
@staticmethod
def _load_model(weights: Optional[str]) -> FasterRCNN:
logger.debug(f"Load weights: {weights}")
if weights is None:
model = fasterrcnn_resnet50_fpn(pretrained=True)
else:
model = fasterrcnn_resnet50_fpn(pretrained=False)
model = model.load_state_dict(torch.load(weights))
return model
def _draw_results(self, image: np.ndarray, boxes: np.ndarray, labels: np.ndarray, scores: np.ndarray) -> np.ndarray:
"""Draw texts and rectangles to the image (BGR)."""
for box, label, score in zip(boxes, labels, scores):
if score < self._cfg.basic.conf:
continue
image = cv2.putText(
image,
COCO91[label],
(round(box[0]), round(box[1])),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=1,
color=self.COLORS[label],
thickness=2,
)
image = cv2.rectangle(
image,
(round(box[0]), round(box[1])),
(round(box[2]), round(box[3])),
color=self.COLORS[label],
thickness=2,
)
return image
| 28.177778 | 120 | 0.610147 |
import os
from dataclasses import dataclass
from glob import glob
from logging import getLogger
from os.path import basename, join
from typing import List, Optional, Tuple
import cv2
import numpy as np
import seaborn as sns
import torch
import torch.cuda
import torchvision
from hydra.utils import to_absolute_path
from frcnn.labels import COCO91
from frcnn.models import FasterRCNN, fasterrcnn_resnet50_fpn
__all__ = ["ImageViewer"]
logger = getLogger(__name__)
ColorType = Tuple[int, int, int]
@dataclass
class BasicConfig:
gpu: bool
conf: float
display: bool
weights: Optional[str]
@dataclass
class ImageConfig:
root: str
outputs: str
@dataclass
class Config:
basic: BasicConfig
image: ImageConfig
@dataclass
class FasterRCNNOutput:
boxes: torch.Tensor
labels: torch.Tensor
scores: torch.Tensor
class ImageViewer:
COLORS: List[ColorType] = [
tuple(int(c * 255) for c in color) for color in sns.color_palette(n_colors=len(COCO91))
]
def __init__(self, cfg: Config):
self._cfg = cfg
self._model = self._load_model(cfg.basic.weights)
self._paths = sorted(glob(join(to_absolute_path(cfg.image.root), "*")))
self._device = "cuda" if cfg.basic.gpu and torch.cuda.is_available() else "cpu"
os.makedirs(cfg.image.outputs, exist_ok=True)
@torch.no_grad()
def run(self):
self._model = self._model.to(self._device).eval()
for i, path in enumerate(self._paths):
image_bgr: np.ndarray = cv2.imread(path)
image_rgb: np.ndarray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image_tensor: torch.Tensor = torchvision.transforms.functional.to_tensor(image_rgb).to(self._device)
output = FasterRCNNOutput(**self._model([image_tensor])[0])
boxes = output.boxes.cpu().numpy()
labels = output.labels.cpu().numpy()
scores = output.scores.cpu().numpy()
logger.debug(
f"[{i + 1}/{len(self._paths)}] Detect {len([s for s in scores if s >= self._cfg.basic.conf]):2d} "
+ f"objects in {path}",
)
image_bgr = self._draw_results(image_bgr, boxes, labels, scores)
if self._cfg.basic.display:
cv2.imshow("", image_bgr)
cv2.waitKey(1)
cv2.imwrite(join(self._cfg.image.outputs, basename(path)), image_bgr)
@staticmethod
def _load_model(weights: Optional[str]) -> FasterRCNN:
logger.debug(f"Load weights: {weights}")
if weights is None:
model = fasterrcnn_resnet50_fpn(pretrained=True)
else:
model = fasterrcnn_resnet50_fpn(pretrained=False)
model = model.load_state_dict(torch.load(weights))
return model
def _draw_results(self, image: np.ndarray, boxes: np.ndarray, labels: np.ndarray, scores: np.ndarray) -> np.ndarray:
for box, label, score in zip(boxes, labels, scores):
if score < self._cfg.basic.conf:
continue
image = cv2.putText(
image,
COCO91[label],
(round(box[0]), round(box[1])),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=1,
color=self.COLORS[label],
thickness=2,
)
image = cv2.rectangle(
image,
(round(box[0]), round(box[1])),
(round(box[2]), round(box[3])),
color=self.COLORS[label],
thickness=2,
)
return image
| true | true |
f7fd02249303ea1ba2fc488eea425a12ce8bf85a | 1,195 | py | Python | src/GameTools/Tools/Messaging.py | spidertyler2005/GameTools | f64a8ec0ebb4a357e04b90317136853d831d3553 | [
"MIT"
] | 1 | 2021-06-07T18:33:09.000Z | 2021-06-07T18:33:09.000Z | src/GameTools/Tools/Messaging.py | spidertyler2005/GameTools | f64a8ec0ebb4a357e04b90317136853d831d3553 | [
"MIT"
] | null | null | null | src/GameTools/Tools/Messaging.py | spidertyler2005/GameTools | f64a8ec0ebb4a357e04b90317136853d831d3553 | [
"MIT"
] | null | null | null | global messageDict
messageDict={}
def send_Message(messageName, index=None, *args,**kwargs):
'''
tells functions that are listening to this message to run with whatever args are specified
You can also specify and index if there are multiple function in that list.
usage:
send_Message("Default Message",*args,**Kwargs)
'''
global messageDict
if index==None:
for func in messageDict[messageName]:
func(*args,**kwargs)
#func(*args)
else:
messageDict[messageName][index](*args)
def recv_Message(messageName):
'''
This is a decorator that can be used to specify that
your function is a listener for a specific message
usage:
@recv_Message("Message Name")
def myListener(myArg, myKwarg = None):
pass
'''
global messageDict
def inner(function,*args,**kwargs):
global messageDict
if not messageName in messageDict.keys():
messageDict[messageName]=[]
if not function in messageDict[messageName]:
messageDict[messageName].append(function)
function
return inner
@recv_Message("Default Message")
def DefaultHandler(TestKwarg="TestKwarg"):
print("message works")
print(TestKwarg) | 29.146341 | 95 | 0.700418 | global messageDict
messageDict={}
def send_Message(messageName, index=None, *args,**kwargs):
global messageDict
if index==None:
for func in messageDict[messageName]:
func(*args,**kwargs)
else:
messageDict[messageName][index](*args)
def recv_Message(messageName):
global messageDict
def inner(function,*args,**kwargs):
global messageDict
if not messageName in messageDict.keys():
messageDict[messageName]=[]
if not function in messageDict[messageName]:
messageDict[messageName].append(function)
function
return inner
@recv_Message("Default Message")
def DefaultHandler(TestKwarg="TestKwarg"):
print("message works")
print(TestKwarg) | true | true |
f7fd02406db0cdbed19282f63a503c5fbd99b5d4 | 5,413 | py | Python | tests/data/packages/fetchai/protocols/t_protocol_no_ct/dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | tests/data/packages/fetchai/protocols/t_protocol_no_ct/dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | tests/data/packages/fetchai/protocols/t_protocol_no_ct/dialogues.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 fetchai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""
This module contains the classes required for t_protocol_no_ct dialogue management.
- TProtocolNoCtDialogue: The dialogue class maintains state of a dialogue and manages it.
- TProtocolNoCtDialogues: The dialogues class keeps track of all dialogues.
"""
from abc import ABC
from typing import Callable, Dict, FrozenSet, Type, cast
from aea.common import Address
from aea.protocols.base import Message
from aea.protocols.dialogue.base import Dialogue, DialogueLabel, Dialogues
from tests.data.packages.fetchai.protocols.t_protocol_no_ct.message import (
TProtocolNoCtMessage,
)
class TProtocolNoCtDialogue(Dialogue):
"""The t_protocol_no_ct dialogue class maintains state of a dialogue and manages it."""
INITIAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset(
{TProtocolNoCtMessage.Performative.PERFORMATIVE_PT}
)
TERMINAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset(
{
TProtocolNoCtMessage.Performative.PERFORMATIVE_MT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_O,
}
)
VALID_REPLIES: Dict[Message.Performative, FrozenSet[Message.Performative]] = {
TProtocolNoCtMessage.Performative.PERFORMATIVE_EMPTY_CONTENTS: frozenset(
{TProtocolNoCtMessage.Performative.PERFORMATIVE_EMPTY_CONTENTS}
),
TProtocolNoCtMessage.Performative.PERFORMATIVE_MT: frozenset(),
TProtocolNoCtMessage.Performative.PERFORMATIVE_O: frozenset(),
TProtocolNoCtMessage.Performative.PERFORMATIVE_PCT: frozenset(
{
TProtocolNoCtMessage.Performative.PERFORMATIVE_MT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_O,
}
),
TProtocolNoCtMessage.Performative.PERFORMATIVE_PMT: frozenset(
{
TProtocolNoCtMessage.Performative.PERFORMATIVE_MT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_O,
}
),
TProtocolNoCtMessage.Performative.PERFORMATIVE_PT: frozenset(
{
TProtocolNoCtMessage.Performative.PERFORMATIVE_PT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_PCT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_PMT,
}
),
}
class Role(Dialogue.Role):
"""This class defines the agent's role in a t_protocol_no_ct dialogue."""
ROLE_1 = "role_1"
ROLE_2 = "role_2"
class EndState(Dialogue.EndState):
"""This class defines the end states of a t_protocol_no_ct dialogue."""
END_STATE_1 = 0
END_STATE_2 = 1
END_STATE_3 = 2
def __init__(
self,
dialogue_label: DialogueLabel,
self_address: Address,
role: Dialogue.Role,
message_class: Type[TProtocolNoCtMessage] = TProtocolNoCtMessage,
) -> None:
"""
Initialize a dialogue.
:param dialogue_label: the identifier of the dialogue
:param self_address: the address of the entity for whom this dialogue is maintained
:param role: the role of the agent this dialogue is maintained for
:param message_class: the message class used
"""
Dialogue.__init__(
self,
dialogue_label=dialogue_label,
message_class=message_class,
self_address=self_address,
role=role,
)
class TProtocolNoCtDialogues(Dialogues, ABC):
"""This class keeps track of all t_protocol_no_ct dialogues."""
END_STATES = frozenset(
{
TProtocolNoCtDialogue.EndState.END_STATE_1,
TProtocolNoCtDialogue.EndState.END_STATE_2,
TProtocolNoCtDialogue.EndState.END_STATE_3,
}
)
_keep_terminal_state_dialogues = True
def __init__(
self,
self_address: Address,
role_from_first_message: Callable[[Message, Address], Dialogue.Role],
dialogue_class: Type[TProtocolNoCtDialogue] = TProtocolNoCtDialogue,
) -> None:
"""
Initialize dialogues.
:param self_address: the address of the entity for whom dialogues are maintained
:param dialogue_class: the dialogue class used
:param role_from_first_message: the callable determining role from first message
"""
Dialogues.__init__(
self,
self_address=self_address,
end_states=cast(FrozenSet[Dialogue.EndState], self.END_STATES),
message_class=TProtocolNoCtMessage,
dialogue_class=dialogue_class,
role_from_first_message=role_from_first_message,
)
| 36.328859 | 91 | 0.663403 |
from abc import ABC
from typing import Callable, Dict, FrozenSet, Type, cast
from aea.common import Address
from aea.protocols.base import Message
from aea.protocols.dialogue.base import Dialogue, DialogueLabel, Dialogues
from tests.data.packages.fetchai.protocols.t_protocol_no_ct.message import (
TProtocolNoCtMessage,
)
class TProtocolNoCtDialogue(Dialogue):
INITIAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset(
{TProtocolNoCtMessage.Performative.PERFORMATIVE_PT}
)
TERMINAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset(
{
TProtocolNoCtMessage.Performative.PERFORMATIVE_MT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_O,
}
)
VALID_REPLIES: Dict[Message.Performative, FrozenSet[Message.Performative]] = {
TProtocolNoCtMessage.Performative.PERFORMATIVE_EMPTY_CONTENTS: frozenset(
{TProtocolNoCtMessage.Performative.PERFORMATIVE_EMPTY_CONTENTS}
),
TProtocolNoCtMessage.Performative.PERFORMATIVE_MT: frozenset(),
TProtocolNoCtMessage.Performative.PERFORMATIVE_O: frozenset(),
TProtocolNoCtMessage.Performative.PERFORMATIVE_PCT: frozenset(
{
TProtocolNoCtMessage.Performative.PERFORMATIVE_MT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_O,
}
),
TProtocolNoCtMessage.Performative.PERFORMATIVE_PMT: frozenset(
{
TProtocolNoCtMessage.Performative.PERFORMATIVE_MT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_O,
}
),
TProtocolNoCtMessage.Performative.PERFORMATIVE_PT: frozenset(
{
TProtocolNoCtMessage.Performative.PERFORMATIVE_PT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_PCT,
TProtocolNoCtMessage.Performative.PERFORMATIVE_PMT,
}
),
}
class Role(Dialogue.Role):
ROLE_1 = "role_1"
ROLE_2 = "role_2"
class EndState(Dialogue.EndState):
END_STATE_1 = 0
END_STATE_2 = 1
END_STATE_3 = 2
def __init__(
self,
dialogue_label: DialogueLabel,
self_address: Address,
role: Dialogue.Role,
message_class: Type[TProtocolNoCtMessage] = TProtocolNoCtMessage,
) -> None:
Dialogue.__init__(
self,
dialogue_label=dialogue_label,
message_class=message_class,
self_address=self_address,
role=role,
)
class TProtocolNoCtDialogues(Dialogues, ABC):
END_STATES = frozenset(
{
TProtocolNoCtDialogue.EndState.END_STATE_1,
TProtocolNoCtDialogue.EndState.END_STATE_2,
TProtocolNoCtDialogue.EndState.END_STATE_3,
}
)
_keep_terminal_state_dialogues = True
def __init__(
self,
self_address: Address,
role_from_first_message: Callable[[Message, Address], Dialogue.Role],
dialogue_class: Type[TProtocolNoCtDialogue] = TProtocolNoCtDialogue,
) -> None:
Dialogues.__init__(
self,
self_address=self_address,
end_states=cast(FrozenSet[Dialogue.EndState], self.END_STATES),
message_class=TProtocolNoCtMessage,
dialogue_class=dialogue_class,
role_from_first_message=role_from_first_message,
)
| true | true |
f7fd026958d367e8ee9648bd479d624f5a249641 | 22,426 | py | Python | wang/trainModel.py | zhangqx/movie_recommender | 93eddb89f7ac2a8358bbe5c91b26e7e2b4184c38 | [
"MIT"
] | 1 | 2019-12-03T16:20:01.000Z | 2019-12-03T16:20:01.000Z | wang/trainModel.py | zhangqx/movie_recommender | 93eddb89f7ac2a8358bbe5c91b26e7e2b4184c38 | [
"MIT"
] | null | null | null | wang/trainModel.py | zhangqx/movie_recommender | 93eddb89f7ac2a8358bbe5c91b26e7e2b4184c38 | [
"MIT"
] | 3 | 2019-06-24T15:59:42.000Z | 2019-12-03T16:20:02.000Z | import os
#
#
# % matplotlib inline
# % config InlineBackend.figure_format = 'retina'
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import time
import datetime
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import pickle
import tensorflow as tf
# from wang.buildModel import *
# os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
#
# from ml-1m import *
# 从本地读取数据
title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = pickle.load(open('preprocess.p', mode='rb'))
def save_params(params):
"""
Save parameters to file
"""
pickle.dump(params, open('params.p', 'wb'))
def load_params():
"""
Load parameters from file
"""
return pickle.load(open('params.p', mode='rb'))
# 编码实现
# 编码实现
# 编码实现
#嵌入矩阵的维度
embed_dim = 32
#用户ID个数
uid_max = max(features.take(0,1)) + 1 # 6040
#性别个数
gender_max = max(features.take(2,1)) + 1 # 1 + 1 = 2
#年龄类别个数
age_max = max(features.take(3,1)) + 1 # 6 + 1 = 7
#职业个数
job_max = max(features.take(4,1)) + 1# 20 + 1 = 21
#电影ID个数
movie_id_max = max(features.take(1,1)) + 1 # 3952
#电影类型个数
movie_categories_max = max(genres2int.values()) + 1 # 18 + 1 = 19
#电影名单词个数
movie_title_max = len(title_set) # 5216
#对电影类型嵌入向量做加和操作的标志,考虑过使用mean做平均,但是没实现mean
combiner = "sum"
#电影名长度
sentences_size = title_count # = 15
#文本卷积滑动窗口,分别滑动2, 3, 4, 5个单词
window_sizes = {2, 3, 4, 5}
#文本卷积核数量
filter_num = 8
#电影ID转下标的字典,数据集中电影ID跟下标不一致,比如第5行的数据电影ID不一定是5
movieid2idx = {val[0]:i for i, val in enumerate(movies.values)}
# 超参
# Number of Epochs
num_epochs = 5
# Batch Size
batch_size = 256
dropout_keep = 0.5
# Learning Rate
learning_rate = 0.0001
# Show stats for every n number of batches
show_every_n_batches = 20
save_dir = './save'
# 输入
def get_inputs():
uid = tf.placeholder(tf.int32, [None, 1], name="uid")
user_gender = tf.placeholder(tf.int32, [None, 1], name="user_gender")
user_age = tf.placeholder(tf.int32, [None, 1], name="user_age")
user_job = tf.placeholder(tf.int32, [None, 1], name="user_job")
movie_id = tf.placeholder(tf.int32, [None, 1], name="movie_id")
movie_categories = tf.placeholder(tf.int32, [None, 18], name="movie_categories")
movie_titles = tf.placeholder(tf.int32, [None, 15], name="movie_titles")
targets = tf.placeholder(tf.int32, [None, 1], name="targets")
LearningRate = tf.placeholder(tf.float32, name="LearningRate")
dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, LearningRate, dropout_keep_prob
# 构建神经网络
# 构建神经网络
# 构建神经网络
# 定义user的潜入矩阵
def get_user_embedding(uid, user_gender, user_age, user_job):
with tf.name_scope("user_embedding"):
uid_embed_matrix = tf.Variable(tf.random_uniform([uid_max, embed_dim], -1, 1), name="uid_embed_matrix")
uid_embed_layer = tf.nn.embedding_lookup(uid_embed_matrix, uid, name="uid_embed_layer")
gender_embed_matrix = tf.Variable(tf.random_uniform([gender_max, embed_dim // 2], -1, 1),
name="gender_embed_matrix")
gender_embed_layer = tf.nn.embedding_lookup(gender_embed_matrix, user_gender, name="gender_embed_layer")
age_embed_matrix = tf.Variable(tf.random_uniform([age_max, embed_dim // 2], -1, 1), name="age_embed_matrix")
age_embed_layer = tf.nn.embedding_lookup(age_embed_matrix, user_age, name="age_embed_layer")
job_embed_matrix = tf.Variable(tf.random_uniform([job_max, embed_dim // 2], -1, 1), name="job_embed_matrix")
job_embed_layer = tf.nn.embedding_lookup(job_embed_matrix, user_job, name="job_embed_layer")
return uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer
# 将User的嵌入矩阵一起全连接生成User的特征
def get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer):
with tf.name_scope("user_fc"):
# 第一层全连接
uid_fc_layer = tf.layers.dense(uid_embed_layer, embed_dim, name="uid_fc_layer", activation=tf.nn.relu)
gender_fc_layer = tf.layers.dense(gender_embed_layer, embed_dim, name="gender_fc_layer", activation=tf.nn.relu)
age_fc_layer = tf.layers.dense(age_embed_layer, embed_dim, name="age_fc_layer", activation=tf.nn.relu)
job_fc_layer = tf.layers.dense(job_embed_layer, embed_dim, name="job_fc_layer", activation=tf.nn.relu)
# 第二层全连接
user_combine_layer = tf.concat([uid_fc_layer, gender_fc_layer, age_fc_layer, job_fc_layer], 2) # (?, 1, 128)
user_combine_layer = tf.contrib.layers.fully_connected(user_combine_layer, 200, tf.tanh) # (?, 1, 200)
user_combine_layer_flat = tf.reshape(user_combine_layer, [-1, 200])
return user_combine_layer, user_combine_layer_flat
# 定义movie id 的潜入矩阵
def get_movie_id_embed_layer(movie_id):
with tf.name_scope("movie_embedding"):
movie_id_embed_matrix = tf.Variable(tf.random_uniform([movie_id_max, embed_dim], -1, 1), name = "movie_id_embed_matrix")
movie_id_embed_layer = tf.nn.embedding_lookup(movie_id_embed_matrix, movie_id, name = "movie_id_embed_layer")
return movie_id_embed_layer
# 对电影类型的多个嵌入向量做加和
def get_movie_categories_layers(movie_categories):
with tf.name_scope("movie_categories_layers"):
movie_categories_embed_matrix = tf.Variable(tf.random_uniform([movie_categories_max, embed_dim], -1, 1), name = "movie_categories_embed_matrix")
movie_categories_embed_layer = tf.nn.embedding_lookup(movie_categories_embed_matrix, movie_categories, name = "movie_categories_embed_layer")
if combiner == "sum":
movie_categories_embed_layer = tf.reduce_sum(movie_categories_embed_layer, axis=1, keep_dims=True)
# elif combiner == "mean":
return movie_categories_embed_layer
# movie title 的文本卷积网络实现
def get_movie_cnn_layer(movie_titles):
# 从嵌入矩阵中得到电影名对应的各个单词的嵌入向量
with tf.name_scope("movie_embedding"):
movie_title_embed_matrix = tf.Variable(tf.random_uniform([movie_title_max, embed_dim], -1, 1),
name="movie_title_embed_matrix")
movie_title_embed_layer = tf.nn.embedding_lookup(movie_title_embed_matrix, movie_titles,
name="movie_title_embed_layer")
movie_title_embed_layer_expand = tf.expand_dims(movie_title_embed_layer, -1)
# 对文本嵌入层使用不同尺寸的卷积核做卷积和最大池化
pool_layer_lst = []
for window_size in window_sizes:
with tf.name_scope("movie_txt_conv_maxpool_{}".format(window_size)):
filter_weights = tf.Variable(tf.truncated_normal([window_size, embed_dim, 1, filter_num], stddev=0.1),
name="filter_weights")
filter_bias = tf.Variable(tf.constant(0.1, shape=[filter_num]), name="filter_bias")
conv_layer = tf.nn.conv2d(movie_title_embed_layer_expand, filter_weights, [1, 1, 1, 1], padding="VALID",
name="conv_layer")
relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer, filter_bias), name="relu_layer")
maxpool_layer = tf.nn.max_pool(relu_layer, [1, sentences_size - window_size + 1, 1, 1], [1, 1, 1, 1],
padding="VALID", name="maxpool_layer")
pool_layer_lst.append(maxpool_layer)
# Dropout层
with tf.name_scope("pool_dropout"):
pool_layer = tf.concat(pool_layer_lst, 3, name="pool_layer")
max_num = len(window_sizes) * filter_num
pool_layer_flat = tf.reshape(pool_layer, [-1, 1, max_num], name="pool_layer_flat")
dropout_layer = tf.nn.dropout(pool_layer_flat, dropout_keep_prob, name="dropout_layer")
return pool_layer_flat, dropout_layer
# 将movie的 各个层一起做全连接
def get_movie_feature_layer(movie_id_embed_layer, movie_categories_embed_layer, dropout_layer):
with tf.name_scope("movie_fc"):
# 第一层全连接
movie_id_fc_layer = tf.layers.dense(movie_id_embed_layer, embed_dim, name="movie_id_fc_layer",
activation=tf.nn.relu)
movie_categories_fc_layer = tf.layers.dense(movie_categories_embed_layer, embed_dim,
name="movie_categories_fc_layer", activation=tf.nn.relu)
# 第二层全连接
movie_combine_layer = tf.concat([movie_id_fc_layer, movie_categories_fc_layer, dropout_layer], 2) # (?, 1, 96)
movie_combine_layer = tf.contrib.layers.fully_connected(movie_combine_layer, 200, tf.tanh) # (?, 1, 200)
movie_combine_layer_flat = tf.reshape(movie_combine_layer, [-1, 200])
return movie_combine_layer, movie_combine_layer_flat
# 构建计算图
# 构建计算图
# 构建计算图
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
#获取输入占位符
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob = get_inputs()
#获取User的4个嵌入向量
uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer = get_user_embedding(uid, user_gender, user_age, user_job)
#得到用户特征
user_combine_layer, user_combine_layer_flat = get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer)
#获取电影ID的嵌入向量
movie_id_embed_layer = get_movie_id_embed_layer(movie_id)
#获取电影类型的嵌入向量
movie_categories_embed_layer = get_movie_categories_layers(movie_categories)
#获取电影名的特征向量
pool_layer_flat, dropout_layer = get_movie_cnn_layer(movie_titles)
#得到电影特征
movie_combine_layer, movie_combine_layer_flat = get_movie_feature_layer(movie_id_embed_layer,
movie_categories_embed_layer,
dropout_layer)
#计算出评分,要注意两个不同的方案,inference的名字(name值)是不一样的,后面做推荐时要根据name取得tensor
with tf.name_scope("inference"):
#将用户特征和电影特征作为输入,经过全连接,输出一个值的方案
# inference_layer = tf.concat([user_combine_layer_flat, movie_combine_layer_flat], 1) #(?, 200)
# inference = tf.layers.dense(inference_layer, 1,
# kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
# kernel_regularizer=tf.nn.l2_loss, name="inference")
#简单的将用户特征和电影特征做矩阵乘法得到一个预测评分
# inference = tf.matmul(user_combine_layer_flat, tf.transpose(movie_combine_layer_flat))
inference = tf.reduce_sum(user_combine_layer_flat * movie_combine_layer_flat, axis=1)
inference = tf.expand_dims(inference, axis=1)
with tf.name_scope("loss"):
# MSE损失,将计算值回归到评分
cost = tf.losses.mean_squared_error(targets, inference )
loss = tf.reduce_mean(cost)
# 优化损失
# train_op = tf.train.AdamOptimizer(lr).minimize(loss) #cost
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(lr)
gradients = optimizer.compute_gradients(loss) #cost
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
# 取得batch
def get_batches(Xs, ys, batch_size):
for start in range(0, len(Xs), batch_size):
end = min(start + batch_size, len(Xs))
yield Xs[start:end], ys[start:end]
# 训练网络
# 训练网络
# 训练网络
losses = {'train': [], 'test': []}
with tf.Session(graph=train_graph) as sess:
# 搜集数据给tensorBoard用
# Keep track of gradient values and sparsity
grad_summaries = []
for g, v in gradients:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name.replace(':', '_')), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name.replace(':', '_')),
tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", loss)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Inference summaries
inference_summary_op = tf.summary.merge([loss_summary])
inference_summary_dir = os.path.join(out_dir, "summaries", "inference")
inference_summary_writer = tf.summary.FileWriter(inference_summary_dir, sess.graph)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for epoch_i in range(num_epochs):
# 将数据集分成训练集和测试集,随机种子不固定
train_X, test_X, train_y, test_y = train_test_split(features,
targets_values,
test_size=0.2,
random_state=0)
train_batches = get_batches(train_X, train_y, batch_size)
test_batches = get_batches(test_X, test_y, batch_size)
# 训练的迭代,保存训练损失
for batch_i in range(len(train_X) // batch_size):
x, y = next(train_batches)
categories = np.zeros([batch_size, 18])
for i in range(batch_size):
categories[i] = x.take(6, 1)[i]
titles = np.zeros([batch_size, sentences_size])
for i in range(batch_size):
titles[i] = x.take(5, 1)[i]
feed = {
uid: np.reshape(x.take(0, 1), [batch_size, 1]),
user_gender: np.reshape(x.take(2, 1), [batch_size, 1]),
user_age: np.reshape(x.take(3, 1), [batch_size, 1]),
user_job: np.reshape(x.take(4, 1), [batch_size, 1]),
movie_id: np.reshape(x.take(1, 1), [batch_size, 1]),
movie_categories: categories, # x.take(6,1)
movie_titles: titles, # x.take(5,1)
targets: np.reshape(y, [batch_size, 1]),
dropout_keep_prob: dropout_keep, # dropout_keep
lr: learning_rate}
step, train_loss, summaries, _ = sess.run([global_step, loss, train_summary_op, train_op], feed) # cost
losses['train'].append(train_loss)
train_summary_writer.add_summary(summaries, step) #
# Show every <show_every_n_batches> batches
if (epoch_i * (len(train_X) // batch_size) + batch_i) % show_every_n_batches == 0:
time_str = datetime.datetime.now().isoformat()
print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
time_str,
epoch_i,
batch_i,
(len(train_X) // batch_size),
train_loss))
# 使用测试数据的迭代
for batch_i in range(len(test_X) // batch_size):
x, y = next(test_batches)
categories = np.zeros([batch_size, 18])
for i in range(batch_size):
categories[i] = x.take(6, 1)[i]
titles = np.zeros([batch_size, sentences_size])
for i in range(batch_size):
titles[i] = x.take(5, 1)[i]
feed = {
uid: np.reshape(x.take(0, 1), [batch_size, 1]),
user_gender: np.reshape(x.take(2, 1), [batch_size, 1]),
user_age: np.reshape(x.take(3, 1), [batch_size, 1]),
user_job: np.reshape(x.take(4, 1), [batch_size, 1]),
movie_id: np.reshape(x.take(1, 1), [batch_size, 1]),
movie_categories: categories, # x.take(6,1)
movie_titles: titles, # x.take(5,1)
targets: np.reshape(y, [batch_size, 1]),
dropout_keep_prob: 1,
lr: learning_rate}
step, test_loss, summaries = sess.run([global_step, loss, inference_summary_op], feed) # cost
# 保存测试损失
losses['test'].append(test_loss)
inference_summary_writer.add_summary(summaries, step) #
time_str = datetime.datetime.now().isoformat()
if (epoch_i * (len(test_X) // batch_size) + batch_i) % show_every_n_batches == 0:
print('{}: Epoch {:>3} Batch {:>4}/{} test_loss = {:.3f}'.format(
time_str,
epoch_i,
batch_i,
(len(test_X) // batch_size),
test_loss))
# Save Model
saver.save(sess, save_dir) # , global_step=epoch_i
print('Model Trained and Saved')
# 保存参数
save_params((save_dir))
load_dir = load_params()
print('load_dir: ', load_dir)
# 显示训练loss
plt.plot(losses['train'], label='Training loss')
plt.legend()
_ = plt.ylim()
# 显示测试loss
plt.plot(losses['test'], label='Test loss')
plt.legend()
_ = plt.ylim()
plt.show()
# 获取tensors 使用函数 get_tensor_by_name()从 loaded_graph 中获取tensors,后面的推荐功能要用到
def get_tensors(loaded_graph):
uid = loaded_graph.get_tensor_by_name("uid:0")
user_gender = loaded_graph.get_tensor_by_name("user_gender:0")
user_age = loaded_graph.get_tensor_by_name("user_age:0")
user_job = loaded_graph.get_tensor_by_name("user_job:0")
movie_id = loaded_graph.get_tensor_by_name("movie_id:0")
movie_categories = loaded_graph.get_tensor_by_name("movie_categories:0")
movie_titles = loaded_graph.get_tensor_by_name("movie_titles:0")
targets = loaded_graph.get_tensor_by_name("targets:0")
dropout_keep_prob = loaded_graph.get_tensor_by_name("dropout_keep_prob:0")
lr = loaded_graph.get_tensor_by_name("LearningRate:0")
#两种不同计算预测评分的方案使用不同的name获取tensor inference
# inference = loaded_graph.get_tensor_by_name("inference/inference/BiasAdd:0")
inference = loaded_graph.get_tensor_by_name("inference/ExpandDims:0") # 之前是MatMul:0 因为inference代码修改了 这里也要修改 感谢网友 @清歌 指出问题
movie_combine_layer_flat = loaded_graph.get_tensor_by_name("movie_fc/Reshape:0")
user_combine_layer_flat = loaded_graph.get_tensor_by_name("user_fc/Reshape:0")
return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, movie_combine_layer_flat, user_combine_layer_flat
# 指定用户和电影进行评分,这部分就是对网络做正向传播,计算得到预测的评分
def rating_movie(user_id_val, movie_id_val):
loaded_graph = tf.Graph() #
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, _, __ = get_tensors(
loaded_graph) # loaded_graph
categories = np.zeros([1, 18])
categories[0] = movies.values[movieid2idx[movie_id_val]][2]
titles = np.zeros([1, sentences_size])
titles[0] = movies.values[movieid2idx[movie_id_val]][1]
feed = {
uid: np.reshape(users.values[user_id_val - 1][0], [1, 1]),
user_gender: np.reshape(users.values[user_id_val - 1][1], [1, 1]),
user_age: np.reshape(users.values[user_id_val - 1][2], [1, 1]),
user_job: np.reshape(users.values[user_id_val - 1][3], [1, 1]),
movie_id: np.reshape(movies.values[movieid2idx[movie_id_val]][0], [1, 1]),
movie_categories: categories, # x.take(6,1)
movie_titles: titles, # x.take(5,1)
dropout_keep_prob: 1}
# Get Prediction
inference_val = sess.run([inference], feed)
return (inference_val)
# ?????????
rating_movie(234, 1401)
# 生成movie特征矩阵 将训练好的电影特征组合成电影特征矩阵并保存到本地
loaded_graph = tf.Graph() #
movie_matrics = []
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, movie_combine_layer_flat, __ = get_tensors(loaded_graph) #loaded_graph
for item in movies.values:
categories = np.zeros([1, 18])
categories[0] = item.take(2)
titles = np.zeros([1, sentences_size])
titles[0] = item.take(1)
feed = {
movie_id: np.reshape(item.take(0), [1, 1]),
movie_categories: categories, #x.take(6,1)
movie_titles: titles, #x.take(5,1)
dropout_keep_prob: 1}
movie_combine_layer_flat_val = sess.run([movie_combine_layer_flat], feed)
movie_matrics.append(movie_combine_layer_flat_val)
pickle.dump((np.array(movie_matrics).reshape(-1, 200)), open('movie_matrics.p', 'wb'))
movie_matrics = pickle.load(open('movie_matrics.p', mode='rb'))
# movie_matrics = pickle.load(open('movie_matrics.p', mode='rb'))
#生成user特征矩阵 将训练好的用户特征组合成用户特征矩阵并保存到本地
loaded_graph = tf.Graph() #
users_matrics = []
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, __,user_combine_layer_flat = get_tensors(loaded_graph) #loaded_graph
for item in users.values:
feed = {
uid: np.reshape(item.take(0), [1, 1]),
user_gender: np.reshape(item.take(1), [1, 1]),
user_age: np.reshape(item.take(2), [1, 1]),
user_job: np.reshape(item.take(3), [1, 1]),
dropout_keep_prob: 1}
user_combine_layer_flat_val = sess.run([user_combine_layer_flat], feed)
users_matrics.append(user_combine_layer_flat_val)
pickle.dump((np.array(users_matrics).reshape(-1, 200)), open('users_matrics.p', 'wb'))
# users_matrics = pickle.load(open('users_matrics.p', mode='rb'))
# users_matrics = pickle.load(open('users_matrics.p', mode='rb'))
| 40.189964 | 190 | 0.65919 | import os
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import time
import datetime
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import pickle
import tensorflow as tf
title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = pickle.load(open('preprocess.p', mode='rb'))
def save_params(params):
pickle.dump(params, open('params.p', 'wb'))
def load_params():
return pickle.load(open('params.p', mode='rb'))
embed_dim = 32
uid_max = max(features.take(0,1)) + 1
gender_max = max(features.take(2,1)) + 1
age_max = max(features.take(3,1)) + 1
job_max = max(features.take(4,1)) + 1
movie_id_max = max(features.take(1,1)) + 1
movie_categories_max = max(genres2int.values()) + 1
movie_title_max = len(title_set)
combiner = "sum"
sentences_size = title_count
window_sizes = {2, 3, 4, 5}
filter_num = 8
movieid2idx = {val[0]:i for i, val in enumerate(movies.values)}
num_epochs = 5
batch_size = 256
dropout_keep = 0.5
learning_rate = 0.0001
show_every_n_batches = 20
save_dir = './save'
def get_inputs():
uid = tf.placeholder(tf.int32, [None, 1], name="uid")
user_gender = tf.placeholder(tf.int32, [None, 1], name="user_gender")
user_age = tf.placeholder(tf.int32, [None, 1], name="user_age")
user_job = tf.placeholder(tf.int32, [None, 1], name="user_job")
movie_id = tf.placeholder(tf.int32, [None, 1], name="movie_id")
movie_categories = tf.placeholder(tf.int32, [None, 18], name="movie_categories")
movie_titles = tf.placeholder(tf.int32, [None, 15], name="movie_titles")
targets = tf.placeholder(tf.int32, [None, 1], name="targets")
LearningRate = tf.placeholder(tf.float32, name="LearningRate")
dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, LearningRate, dropout_keep_prob
def get_user_embedding(uid, user_gender, user_age, user_job):
with tf.name_scope("user_embedding"):
uid_embed_matrix = tf.Variable(tf.random_uniform([uid_max, embed_dim], -1, 1), name="uid_embed_matrix")
uid_embed_layer = tf.nn.embedding_lookup(uid_embed_matrix, uid, name="uid_embed_layer")
gender_embed_matrix = tf.Variable(tf.random_uniform([gender_max, embed_dim // 2], -1, 1),
name="gender_embed_matrix")
gender_embed_layer = tf.nn.embedding_lookup(gender_embed_matrix, user_gender, name="gender_embed_layer")
age_embed_matrix = tf.Variable(tf.random_uniform([age_max, embed_dim // 2], -1, 1), name="age_embed_matrix")
age_embed_layer = tf.nn.embedding_lookup(age_embed_matrix, user_age, name="age_embed_layer")
job_embed_matrix = tf.Variable(tf.random_uniform([job_max, embed_dim // 2], -1, 1), name="job_embed_matrix")
job_embed_layer = tf.nn.embedding_lookup(job_embed_matrix, user_job, name="job_embed_layer")
return uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer
def get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer):
with tf.name_scope("user_fc"):
uid_fc_layer = tf.layers.dense(uid_embed_layer, embed_dim, name="uid_fc_layer", activation=tf.nn.relu)
gender_fc_layer = tf.layers.dense(gender_embed_layer, embed_dim, name="gender_fc_layer", activation=tf.nn.relu)
age_fc_layer = tf.layers.dense(age_embed_layer, embed_dim, name="age_fc_layer", activation=tf.nn.relu)
job_fc_layer = tf.layers.dense(job_embed_layer, embed_dim, name="job_fc_layer", activation=tf.nn.relu)
user_combine_layer = tf.concat([uid_fc_layer, gender_fc_layer, age_fc_layer, job_fc_layer], 2)
user_combine_layer = tf.contrib.layers.fully_connected(user_combine_layer, 200, tf.tanh)
user_combine_layer_flat = tf.reshape(user_combine_layer, [-1, 200])
return user_combine_layer, user_combine_layer_flat
def get_movie_id_embed_layer(movie_id):
with tf.name_scope("movie_embedding"):
movie_id_embed_matrix = tf.Variable(tf.random_uniform([movie_id_max, embed_dim], -1, 1), name = "movie_id_embed_matrix")
movie_id_embed_layer = tf.nn.embedding_lookup(movie_id_embed_matrix, movie_id, name = "movie_id_embed_layer")
return movie_id_embed_layer
def get_movie_categories_layers(movie_categories):
with tf.name_scope("movie_categories_layers"):
movie_categories_embed_matrix = tf.Variable(tf.random_uniform([movie_categories_max, embed_dim], -1, 1), name = "movie_categories_embed_matrix")
movie_categories_embed_layer = tf.nn.embedding_lookup(movie_categories_embed_matrix, movie_categories, name = "movie_categories_embed_layer")
if combiner == "sum":
movie_categories_embed_layer = tf.reduce_sum(movie_categories_embed_layer, axis=1, keep_dims=True)
return movie_categories_embed_layer
def get_movie_cnn_layer(movie_titles):
with tf.name_scope("movie_embedding"):
movie_title_embed_matrix = tf.Variable(tf.random_uniform([movie_title_max, embed_dim], -1, 1),
name="movie_title_embed_matrix")
movie_title_embed_layer = tf.nn.embedding_lookup(movie_title_embed_matrix, movie_titles,
name="movie_title_embed_layer")
movie_title_embed_layer_expand = tf.expand_dims(movie_title_embed_layer, -1)
pool_layer_lst = []
for window_size in window_sizes:
with tf.name_scope("movie_txt_conv_maxpool_{}".format(window_size)):
filter_weights = tf.Variable(tf.truncated_normal([window_size, embed_dim, 1, filter_num], stddev=0.1),
name="filter_weights")
filter_bias = tf.Variable(tf.constant(0.1, shape=[filter_num]), name="filter_bias")
conv_layer = tf.nn.conv2d(movie_title_embed_layer_expand, filter_weights, [1, 1, 1, 1], padding="VALID",
name="conv_layer")
relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer, filter_bias), name="relu_layer")
maxpool_layer = tf.nn.max_pool(relu_layer, [1, sentences_size - window_size + 1, 1, 1], [1, 1, 1, 1],
padding="VALID", name="maxpool_layer")
pool_layer_lst.append(maxpool_layer)
with tf.name_scope("pool_dropout"):
pool_layer = tf.concat(pool_layer_lst, 3, name="pool_layer")
max_num = len(window_sizes) * filter_num
pool_layer_flat = tf.reshape(pool_layer, [-1, 1, max_num], name="pool_layer_flat")
dropout_layer = tf.nn.dropout(pool_layer_flat, dropout_keep_prob, name="dropout_layer")
return pool_layer_flat, dropout_layer
def get_movie_feature_layer(movie_id_embed_layer, movie_categories_embed_layer, dropout_layer):
with tf.name_scope("movie_fc"):
movie_id_fc_layer = tf.layers.dense(movie_id_embed_layer, embed_dim, name="movie_id_fc_layer",
activation=tf.nn.relu)
movie_categories_fc_layer = tf.layers.dense(movie_categories_embed_layer, embed_dim,
name="movie_categories_fc_layer", activation=tf.nn.relu)
movie_combine_layer = tf.concat([movie_id_fc_layer, movie_categories_fc_layer, dropout_layer], 2)
movie_combine_layer = tf.contrib.layers.fully_connected(movie_combine_layer, 200, tf.tanh)
movie_combine_layer_flat = tf.reshape(movie_combine_layer, [-1, 200])
return movie_combine_layer, movie_combine_layer_flat
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob = get_inputs()
uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer = get_user_embedding(uid, user_gender, user_age, user_job)
user_combine_layer, user_combine_layer_flat = get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer)
movie_id_embed_layer = get_movie_id_embed_layer(movie_id)
movie_categories_embed_layer = get_movie_categories_layers(movie_categories)
pool_layer_flat, dropout_layer = get_movie_cnn_layer(movie_titles)
movie_combine_layer, movie_combine_layer_flat = get_movie_feature_layer(movie_id_embed_layer,
movie_categories_embed_layer,
dropout_layer)
with tf.name_scope("inference"):
inference = tf.reduce_sum(user_combine_layer_flat * movie_combine_layer_flat, axis=1)
inference = tf.expand_dims(inference, axis=1)
with tf.name_scope("loss"):
cost = tf.losses.mean_squared_error(targets, inference )
loss = tf.reduce_mean(cost)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(lr)
gradients = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
def get_batches(Xs, ys, batch_size):
for start in range(0, len(Xs), batch_size):
end = min(start + batch_size, len(Xs))
yield Xs[start:end], ys[start:end]
losses = {'train': [], 'test': []}
with tf.Session(graph=train_graph) as sess:
grad_summaries = []
for g, v in gradients:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name.replace(':', '_')), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name.replace(':', '_')),
tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
loss_summary = tf.summary.scalar("loss", loss)
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
inference_summary_op = tf.summary.merge([loss_summary])
inference_summary_dir = os.path.join(out_dir, "summaries", "inference")
inference_summary_writer = tf.summary.FileWriter(inference_summary_dir, sess.graph)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for epoch_i in range(num_epochs):
train_X, test_X, train_y, test_y = train_test_split(features,
targets_values,
test_size=0.2,
random_state=0)
train_batches = get_batches(train_X, train_y, batch_size)
test_batches = get_batches(test_X, test_y, batch_size)
for batch_i in range(len(train_X) // batch_size):
x, y = next(train_batches)
categories = np.zeros([batch_size, 18])
for i in range(batch_size):
categories[i] = x.take(6, 1)[i]
titles = np.zeros([batch_size, sentences_size])
for i in range(batch_size):
titles[i] = x.take(5, 1)[i]
feed = {
uid: np.reshape(x.take(0, 1), [batch_size, 1]),
user_gender: np.reshape(x.take(2, 1), [batch_size, 1]),
user_age: np.reshape(x.take(3, 1), [batch_size, 1]),
user_job: np.reshape(x.take(4, 1), [batch_size, 1]),
movie_id: np.reshape(x.take(1, 1), [batch_size, 1]),
movie_categories: categories,
movie_titles: titles,
targets: np.reshape(y, [batch_size, 1]),
dropout_keep_prob: dropout_keep,
lr: learning_rate}
step, train_loss, summaries, _ = sess.run([global_step, loss, train_summary_op, train_op], feed)
losses['train'].append(train_loss)
train_summary_writer.add_summary(summaries, step)
if (epoch_i * (len(train_X) // batch_size) + batch_i) % show_every_n_batches == 0:
time_str = datetime.datetime.now().isoformat()
print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
time_str,
epoch_i,
batch_i,
(len(train_X) // batch_size),
train_loss))
for batch_i in range(len(test_X) // batch_size):
x, y = next(test_batches)
categories = np.zeros([batch_size, 18])
for i in range(batch_size):
categories[i] = x.take(6, 1)[i]
titles = np.zeros([batch_size, sentences_size])
for i in range(batch_size):
titles[i] = x.take(5, 1)[i]
feed = {
uid: np.reshape(x.take(0, 1), [batch_size, 1]),
user_gender: np.reshape(x.take(2, 1), [batch_size, 1]),
user_age: np.reshape(x.take(3, 1), [batch_size, 1]),
user_job: np.reshape(x.take(4, 1), [batch_size, 1]),
movie_id: np.reshape(x.take(1, 1), [batch_size, 1]),
movie_categories: categories,
movie_titles: titles,
targets: np.reshape(y, [batch_size, 1]),
dropout_keep_prob: 1,
lr: learning_rate}
step, test_loss, summaries = sess.run([global_step, loss, inference_summary_op], feed)
losses['test'].append(test_loss)
inference_summary_writer.add_summary(summaries, step)
time_str = datetime.datetime.now().isoformat()
if (epoch_i * (len(test_X) // batch_size) + batch_i) % show_every_n_batches == 0:
print('{}: Epoch {:>3} Batch {:>4}/{} test_loss = {:.3f}'.format(
time_str,
epoch_i,
batch_i,
(len(test_X) // batch_size),
test_loss))
saver.save(sess, save_dir)
print('Model Trained and Saved')
save_params((save_dir))
load_dir = load_params()
print('load_dir: ', load_dir)
plt.plot(losses['train'], label='Training loss')
plt.legend()
_ = plt.ylim()
plt.plot(losses['test'], label='Test loss')
plt.legend()
_ = plt.ylim()
plt.show()
def get_tensors(loaded_graph):
uid = loaded_graph.get_tensor_by_name("uid:0")
user_gender = loaded_graph.get_tensor_by_name("user_gender:0")
user_age = loaded_graph.get_tensor_by_name("user_age:0")
user_job = loaded_graph.get_tensor_by_name("user_job:0")
movie_id = loaded_graph.get_tensor_by_name("movie_id:0")
movie_categories = loaded_graph.get_tensor_by_name("movie_categories:0")
movie_titles = loaded_graph.get_tensor_by_name("movie_titles:0")
targets = loaded_graph.get_tensor_by_name("targets:0")
dropout_keep_prob = loaded_graph.get_tensor_by_name("dropout_keep_prob:0")
lr = loaded_graph.get_tensor_by_name("LearningRate:0")
inference = loaded_graph.get_tensor_by_name("inference/ExpandDims:0")
movie_combine_layer_flat = loaded_graph.get_tensor_by_name("movie_fc/Reshape:0")
user_combine_layer_flat = loaded_graph.get_tensor_by_name("user_fc/Reshape:0")
return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, movie_combine_layer_flat, user_combine_layer_flat
def rating_movie(user_id_val, movie_id_val):
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, _, __ = get_tensors(
loaded_graph)
categories = np.zeros([1, 18])
categories[0] = movies.values[movieid2idx[movie_id_val]][2]
titles = np.zeros([1, sentences_size])
titles[0] = movies.values[movieid2idx[movie_id_val]][1]
feed = {
uid: np.reshape(users.values[user_id_val - 1][0], [1, 1]),
user_gender: np.reshape(users.values[user_id_val - 1][1], [1, 1]),
user_age: np.reshape(users.values[user_id_val - 1][2], [1, 1]),
user_job: np.reshape(users.values[user_id_val - 1][3], [1, 1]),
movie_id: np.reshape(movies.values[movieid2idx[movie_id_val]][0], [1, 1]),
movie_categories: categories,
movie_titles: titles,
dropout_keep_prob: 1}
inference_val = sess.run([inference], feed)
return (inference_val)
rating_movie(234, 1401)
loaded_graph = tf.Graph()
movie_matrics = []
with tf.Session(graph=loaded_graph) as sess:
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, movie_combine_layer_flat, __ = get_tensors(loaded_graph)
for item in movies.values:
categories = np.zeros([1, 18])
categories[0] = item.take(2)
titles = np.zeros([1, sentences_size])
titles[0] = item.take(1)
feed = {
movie_id: np.reshape(item.take(0), [1, 1]),
movie_categories: categories,
movie_titles: titles,
dropout_keep_prob: 1}
movie_combine_layer_flat_val = sess.run([movie_combine_layer_flat], feed)
movie_matrics.append(movie_combine_layer_flat_val)
pickle.dump((np.array(movie_matrics).reshape(-1, 200)), open('movie_matrics.p', 'wb'))
movie_matrics = pickle.load(open('movie_matrics.p', mode='rb'))
loaded_graph = tf.Graph()
users_matrics = []
with tf.Session(graph=loaded_graph) as sess:
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, __,user_combine_layer_flat = get_tensors(loaded_graph)
for item in users.values:
feed = {
uid: np.reshape(item.take(0), [1, 1]),
user_gender: np.reshape(item.take(1), [1, 1]),
user_age: np.reshape(item.take(2), [1, 1]),
user_job: np.reshape(item.take(3), [1, 1]),
dropout_keep_prob: 1}
user_combine_layer_flat_val = sess.run([user_combine_layer_flat], feed)
users_matrics.append(user_combine_layer_flat_val)
pickle.dump((np.array(users_matrics).reshape(-1, 200)), open('users_matrics.p', 'wb'))
| true | true |
f7fd031b3050ebbc13593e25c2968f68853a9554 | 6,401 | py | Python | ogs6py/classes/processes.py | bilke/ogs6py | b6d9364bf7275f65b324a466e38a2bbef9ca076b | [
"BSD-3-Clause"
] | null | null | null | ogs6py/classes/processes.py | bilke/ogs6py | b6d9364bf7275f65b324a466e38a2bbef9ca076b | [
"BSD-3-Clause"
] | null | null | null | ogs6py/classes/processes.py | bilke/ogs6py | b6d9364bf7275f65b324a466e38a2bbef9ca076b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (c) 2012-2021, OpenGeoSys Community (http://www.opengeosys.org)
Distributed under a Modified BSD License.
See accompanying file LICENSE or
http://www.opengeosys.org/project/license
"""
# pylint: disable=C0103, R0902, R0914, R0913
from ogs6py.classes import build_tree
class Processes(build_tree.BuildTree):
"""
Class for managing the processes section in the project file.
"""
def __init__(self):
self.tree = {
'processes': {
'tag': 'processes',
'text': '',
'attr': {},
'children': {}
}
}
self.tree['processes']['children'] = {
'process': {
'tag': 'process',
'text': '',
'attr': {},
'children': {}
}
}
self.constreltree = {
'tag': 'constitutive_relation',
'text': '',
'attr': {},
'children': {}
}
self.proc_vartree = {
'tag': 'process_variables',
'text': '',
'attr': {},
'children': {}
}
self.sec_vartree = {
'tag': 'secondary_variables',
'text': '',
'attr': {},
'children': {}
}
self.sflux_vartree = {
'tag': 'calculatesurfaceflux',
'text': '',
'attr': {},
'children': {}
}
def add_process_variable(self, **args):
"""
Adds a process variable.
Parameters
----------
process_variable : `str`
process_variable_name : `str`
secondary_variable : `str`
output_name : `str`
"""
self._convertargs(args)
if "process_variable" in args:
if "process_variable_name" not in args:
raise KeyError("process_variable_name missing.")
self.tree['processes']['children']['process']['children'][
'process_variables'] = self.proc_vartree
self.proc_vartree['children'][args['process_variable']] = {
'tag': args['process_variable'],
'text': args['process_variable_name'],
'attr': {},
'children': {}
}
elif "secondary_variable" in args:
if "output_name" not in args:
raise KeyError("No output_name given.")
self.tree['processes']['children']['process']['children'][
'secondary_variables'] = self.sec_vartree
self.sec_vartree['children'][args['output_name']] = {
'tag': 'secondary_variable',
'text': '',
'attr': {
'internal_name': args['secondary_variable'],
'output_name': args['output_name']
},
'children': {}
}
else:
raise KeyError("No process_variable/secondary_variable given.")
def set_process(self, **args):
"""
Set basic process properties.
Parameters
----------
name : `str`
type : `str`
integration_order : `str`
darcy_gravity : `list` or `tuple`
holding darcy accelleration as vector
any pair tag="value" translates to
<tag>value</tag> in process section
"""
self._convertargs(args)
if "name" not in args:
raise KeyError("No process name given.")
if "type" not in args:
raise KeyError("type missing.")
if "integration_order" not in args:
raise KeyError("integration_order missing.")
if "darcy_gravity" in args:
for i, entry in enumerate(args["darcy_gravity"]):
if entry != 0.0:
self.tree['processes']['children']['process'][
'children']['darcy_gravity'] = self.populate_tree('darcy_gravity')
darcy_vel = self.tree['processes']['children']['process'][
'children']['darcy_gravity']
darcy_vel['children']['axis'] = self.populate_tree('axis_id', text=str(i))
darcy_vel['children']['g'] = self.populate_tree('g', text=str(entry))
for key, value in args.items():
if isinstance(value, str):
self.tree['processes']['children']['process'][
'children'][key] = self.populate_tree(key, text=args[key])
def set_constitutive_relation(self, **args):
"""
Sets constituitive relation
Parameters
----------
any pair tag="value" translates to
<tag>value</tag> in process section
"""
self._convertargs(args)
self.tree['processes']['children']['process']['children'][
'constitutive_relation'] = self.constreltree
for key in args:
self.constreltree['children'][key] = {
'tag': key,
'text': args[key],
'attr': {},
'children': {}
}
def add_surfaceflux(self,**args):
"""
Add SurfaceFlux
Parameters
----------
mesh : `str`
property_name : `str`
Raises
------
KeyError
DESCRIPTION.
Returns
-------
None.
"""
self._convertargs(args)
if "mesh" not in args:
raise KeyError("No surface mesh for flux analysis assigned")
if "property_name" not in args:
raise KeyError("No property name, e.g specific_flux, assigned")
self.tree['processes']['children']['process']['children'][
'calculatesurfaceflux'] = self.sflux_vartree
self.sflux_vartree['children']['mesh'] = {
'tag': 'mesh',
'text': args['mesh'],
'attr': {},
'children': {}
}
self.sflux_vartree['children']['property_name'] = {
'tag': 'property_name',
'text': args['property_name'],
'attr': {},
'children': {}
}
| 32.994845 | 94 | 0.471333 |
from ogs6py.classes import build_tree
class Processes(build_tree.BuildTree):
def __init__(self):
self.tree = {
'processes': {
'tag': 'processes',
'text': '',
'attr': {},
'children': {}
}
}
self.tree['processes']['children'] = {
'process': {
'tag': 'process',
'text': '',
'attr': {},
'children': {}
}
}
self.constreltree = {
'tag': 'constitutive_relation',
'text': '',
'attr': {},
'children': {}
}
self.proc_vartree = {
'tag': 'process_variables',
'text': '',
'attr': {},
'children': {}
}
self.sec_vartree = {
'tag': 'secondary_variables',
'text': '',
'attr': {},
'children': {}
}
self.sflux_vartree = {
'tag': 'calculatesurfaceflux',
'text': '',
'attr': {},
'children': {}
}
def add_process_variable(self, **args):
self._convertargs(args)
if "process_variable" in args:
if "process_variable_name" not in args:
raise KeyError("process_variable_name missing.")
self.tree['processes']['children']['process']['children'][
'process_variables'] = self.proc_vartree
self.proc_vartree['children'][args['process_variable']] = {
'tag': args['process_variable'],
'text': args['process_variable_name'],
'attr': {},
'children': {}
}
elif "secondary_variable" in args:
if "output_name" not in args:
raise KeyError("No output_name given.")
self.tree['processes']['children']['process']['children'][
'secondary_variables'] = self.sec_vartree
self.sec_vartree['children'][args['output_name']] = {
'tag': 'secondary_variable',
'text': '',
'attr': {
'internal_name': args['secondary_variable'],
'output_name': args['output_name']
},
'children': {}
}
else:
raise KeyError("No process_variable/secondary_variable given.")
def set_process(self, **args):
self._convertargs(args)
if "name" not in args:
raise KeyError("No process name given.")
if "type" not in args:
raise KeyError("type missing.")
if "integration_order" not in args:
raise KeyError("integration_order missing.")
if "darcy_gravity" in args:
for i, entry in enumerate(args["darcy_gravity"]):
if entry != 0.0:
self.tree['processes']['children']['process'][
'children']['darcy_gravity'] = self.populate_tree('darcy_gravity')
darcy_vel = self.tree['processes']['children']['process'][
'children']['darcy_gravity']
darcy_vel['children']['axis'] = self.populate_tree('axis_id', text=str(i))
darcy_vel['children']['g'] = self.populate_tree('g', text=str(entry))
for key, value in args.items():
if isinstance(value, str):
self.tree['processes']['children']['process'][
'children'][key] = self.populate_tree(key, text=args[key])
def set_constitutive_relation(self, **args):
self._convertargs(args)
self.tree['processes']['children']['process']['children'][
'constitutive_relation'] = self.constreltree
for key in args:
self.constreltree['children'][key] = {
'tag': key,
'text': args[key],
'attr': {},
'children': {}
}
def add_surfaceflux(self,**args):
self._convertargs(args)
if "mesh" not in args:
raise KeyError("No surface mesh for flux analysis assigned")
if "property_name" not in args:
raise KeyError("No property name, e.g specific_flux, assigned")
self.tree['processes']['children']['process']['children'][
'calculatesurfaceflux'] = self.sflux_vartree
self.sflux_vartree['children']['mesh'] = {
'tag': 'mesh',
'text': args['mesh'],
'attr': {},
'children': {}
}
self.sflux_vartree['children']['property_name'] = {
'tag': 'property_name',
'text': args['property_name'],
'attr': {},
'children': {}
}
| true | true |
f7fd0323381cfecd8177a419dced559b61c4db17 | 4,975 | py | Python | acme/agents/jax/dqn/agent.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 2,650 | 2020-06-01T16:31:25.000Z | 2022-03-31T07:32:41.000Z | acme/agents/jax/dqn/agent.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 199 | 2020-06-02T01:09:09.000Z | 2022-03-31T17:11:20.000Z | acme/agents/jax/dqn/agent.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 344 | 2020-06-01T16:45:21.000Z | 2022-03-30T11:15:09.000Z | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agent implementation."""
from acme import specs
from acme.agents import agent
from acme.agents import replay
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax.dqn import config as dqn_config
from acme.agents.jax.dqn import learning_lib
from acme.agents.jax.dqn import losses
from acme.jax import networks as networks_lib
from acme.jax import variable_utils
import jax
import jax.numpy as jnp
import optax
import rlax
class DQNFromConfig(agent.Agent):
"""DQN agent.
This implements a single-process DQN agent. This is a simple Q-learning
algorithm that inserts N-step transitions into a replay buffer, and
periodically updates its policy by sampling these transitions using
prioritization.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: networks_lib.FeedForwardNetwork,
config: dqn_config.DQNConfig,
):
"""Initialize the agent."""
# Data is communicated via reverb replay.
reverb_replay = replay.make_reverb_prioritized_nstep_replay(
environment_spec=environment_spec,
n_step=config.n_step,
batch_size=config.batch_size,
max_replay_size=config.max_replay_size,
min_replay_size=config.min_replay_size,
priority_exponent=config.priority_exponent,
discount=config.discount,
)
self._server = reverb_replay.server
optimizer = optax.chain(
optax.clip_by_global_norm(config.max_gradient_norm),
optax.adam(config.learning_rate),
)
key_learner, key_actor = jax.random.split(jax.random.PRNGKey(config.seed))
# The learner updates the parameters (and initializes them).
loss_fn = losses.PrioritizedDoubleQLearning(
discount=config.discount,
importance_sampling_exponent=config.importance_sampling_exponent,
)
learner = learning_lib.SGDLearner(
network=network,
loss_fn=loss_fn,
data_iterator=reverb_replay.data_iterator,
optimizer=optimizer,
target_update_period=config.target_update_period,
random_key=key_learner,
replay_client=reverb_replay.client,
)
# The actor selects actions according to the policy.
def policy(params: networks_lib.Params, key: jnp.ndarray,
observation: jnp.ndarray) -> jnp.ndarray:
action_values = network.apply(params, observation)
return rlax.epsilon_greedy(config.epsilon).sample(key, action_values)
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(policy)
variable_client = variable_utils.VariableClient(learner, '')
actor = actors.GenericActor(
actor_core, key_actor, variable_client, reverb_replay.adder)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(config.batch_size, config.min_replay_size),
observations_per_step=config.batch_size / config.samples_per_insert,
)
class DQN(DQNFromConfig):
"""DQN agent.
We are in the process of migrating towards a more modular agent configuration.
This is maintained now for compatibility.
"""
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: networks_lib.FeedForwardNetwork,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
samples_per_insert: float = 0.5,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
importance_sampling_exponent: float = 0.2,
priority_exponent: float = 0.6,
n_step: int = 5,
epsilon: float = 0.05,
learning_rate: float = 1e-3,
discount: float = 0.99,
seed: int = 1,
):
config = dqn_config.DQNConfig(
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
samples_per_insert=samples_per_insert,
min_replay_size=min_replay_size,
max_replay_size=max_replay_size,
importance_sampling_exponent=importance_sampling_exponent,
priority_exponent=priority_exponent,
n_step=n_step,
epsilon=epsilon,
learning_rate=learning_rate,
discount=discount,
seed=seed,
)
super().__init__(
environment_spec=environment_spec,
network=network,
config=config,
)
| 34.310345 | 80 | 0.721005 |
from acme import specs
from acme.agents import agent
from acme.agents import replay
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.agents.jax.dqn import config as dqn_config
from acme.agents.jax.dqn import learning_lib
from acme.agents.jax.dqn import losses
from acme.jax import networks as networks_lib
from acme.jax import variable_utils
import jax
import jax.numpy as jnp
import optax
import rlax
class DQNFromConfig(agent.Agent):
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: networks_lib.FeedForwardNetwork,
config: dqn_config.DQNConfig,
):
reverb_replay = replay.make_reverb_prioritized_nstep_replay(
environment_spec=environment_spec,
n_step=config.n_step,
batch_size=config.batch_size,
max_replay_size=config.max_replay_size,
min_replay_size=config.min_replay_size,
priority_exponent=config.priority_exponent,
discount=config.discount,
)
self._server = reverb_replay.server
optimizer = optax.chain(
optax.clip_by_global_norm(config.max_gradient_norm),
optax.adam(config.learning_rate),
)
key_learner, key_actor = jax.random.split(jax.random.PRNGKey(config.seed))
loss_fn = losses.PrioritizedDoubleQLearning(
discount=config.discount,
importance_sampling_exponent=config.importance_sampling_exponent,
)
learner = learning_lib.SGDLearner(
network=network,
loss_fn=loss_fn,
data_iterator=reverb_replay.data_iterator,
optimizer=optimizer,
target_update_period=config.target_update_period,
random_key=key_learner,
replay_client=reverb_replay.client,
)
def policy(params: networks_lib.Params, key: jnp.ndarray,
observation: jnp.ndarray) -> jnp.ndarray:
action_values = network.apply(params, observation)
return rlax.epsilon_greedy(config.epsilon).sample(key, action_values)
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(policy)
variable_client = variable_utils.VariableClient(learner, '')
actor = actors.GenericActor(
actor_core, key_actor, variable_client, reverb_replay.adder)
super().__init__(
actor=actor,
learner=learner,
min_observations=max(config.batch_size, config.min_replay_size),
observations_per_step=config.batch_size / config.samples_per_insert,
)
class DQN(DQNFromConfig):
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
network: networks_lib.FeedForwardNetwork,
batch_size: int = 256,
prefetch_size: int = 4,
target_update_period: int = 100,
samples_per_insert: float = 0.5,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
importance_sampling_exponent: float = 0.2,
priority_exponent: float = 0.6,
n_step: int = 5,
epsilon: float = 0.05,
learning_rate: float = 1e-3,
discount: float = 0.99,
seed: int = 1,
):
config = dqn_config.DQNConfig(
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
samples_per_insert=samples_per_insert,
min_replay_size=min_replay_size,
max_replay_size=max_replay_size,
importance_sampling_exponent=importance_sampling_exponent,
priority_exponent=priority_exponent,
n_step=n_step,
epsilon=epsilon,
learning_rate=learning_rate,
discount=discount,
seed=seed,
)
super().__init__(
environment_spec=environment_spec,
network=network,
config=config,
)
| true | true |
f7fd039edd681255dcaaa15a917813ccf078b23f | 1,830 | py | Python | common/tests.py | jyothishankit/flask-example | 15739bcf9aa7ac469184927becb17fe98c962307 | [
"MIT"
] | 68 | 2015-02-27T17:22:20.000Z | 2021-02-07T15:06:10.000Z | common/tests.py | jyothishankit/flask-example | 15739bcf9aa7ac469184927becb17fe98c962307 | [
"MIT"
] | 3 | 2015-04-12T02:11:34.000Z | 2018-01-08T04:57:22.000Z | common/tests.py | jyothishankit/flask-example | 15739bcf9aa7ac469184927becb17fe98c962307 | [
"MIT"
] | 53 | 2015-04-03T10:39:32.000Z | 2021-11-09T17:27:07.000Z | # -*- coding: utf-8 -*-
from flask import template_rendered, url_for
from contextlib import contextmanager
import unittest
from application import create_app
class BaseTestCase(unittest.TestCase):
def __call__(self, result=None):
self._pre_setup()
super(BaseTestCase, self).__call__(result)
self._post_teardown()
def _pre_setup(self):
self.app = create_app('settings_test')
self.client = self.app.test_client()
self.ctx = self.app.test_request_context()
self.ctx.push()
def _post_teardown(self):
self.ctx.pop()
def assertRedirects(self, resp, location):
self.assertTrue(resp.status_code in (301, 302))
self.assertEqual(resp.location, 'http://localhost' + location)
def assertStatus(self, resp, status_code):
self.assertEqual(resp.status_code, status_code)
def login(self, username, password, client=None):
if client:
client = client
else:
client = self.client
resp = client.post(
url_for('accounts_app.login'),
data=dict(username='user1', password='123456'),
follow_redirects=True
)
return resp
def logout(self, client=None):
if client:
client = client
else:
client = self.client
resp = client.get(
url_for('accounts_app.logout'),
follow_redirects=True
)
return resp
@contextmanager
def captured_templates(self, app):
recorded = []
def record(sender, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record, app)
try:
yield recorded
finally:
template_rendered.disconnect(record, app)
| 26.521739 | 70 | 0.609836 |
from flask import template_rendered, url_for
from contextlib import contextmanager
import unittest
from application import create_app
class BaseTestCase(unittest.TestCase):
def __call__(self, result=None):
self._pre_setup()
super(BaseTestCase, self).__call__(result)
self._post_teardown()
def _pre_setup(self):
self.app = create_app('settings_test')
self.client = self.app.test_client()
self.ctx = self.app.test_request_context()
self.ctx.push()
def _post_teardown(self):
self.ctx.pop()
def assertRedirects(self, resp, location):
self.assertTrue(resp.status_code in (301, 302))
self.assertEqual(resp.location, 'http://localhost' + location)
def assertStatus(self, resp, status_code):
self.assertEqual(resp.status_code, status_code)
def login(self, username, password, client=None):
if client:
client = client
else:
client = self.client
resp = client.post(
url_for('accounts_app.login'),
data=dict(username='user1', password='123456'),
follow_redirects=True
)
return resp
def logout(self, client=None):
if client:
client = client
else:
client = self.client
resp = client.get(
url_for('accounts_app.logout'),
follow_redirects=True
)
return resp
@contextmanager
def captured_templates(self, app):
recorded = []
def record(sender, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record, app)
try:
yield recorded
finally:
template_rendered.disconnect(record, app)
| true | true |
f7fd043933dd2ed0f75a42886a9a752fe1af9843 | 13,278 | py | Python | airflow/gcp/operators/kubernetes_engine.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/gcp/operators/kubernetes_engine.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2017-05-11T22:57:49.000Z | 2017-05-11T22:57:49.000Z | airflow/gcp/operators/kubernetes_engine.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-11-16T09:03:58.000Z | 2020-11-16T09:03:58.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains Google Kubernetes Engine operators.
"""
import os
import subprocess
import tempfile
from typing import Union, Dict, Optional
from google.auth.environment_vars import CREDENTIALS
from google.cloud.container_v1.types import Cluster
from airflow import AirflowException
from airflow.gcp.hooks.kubernetes_engine import GKEClusterHook
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GKEClusterDeleteOperator(BaseOperator):
"""
Deletes the cluster, including the Kubernetes endpoint and all worker nodes.
To delete a certain cluster, you must specify the ``project_id``, the ``name``
of the cluster, the ``location`` that the cluster is in, and the ``task_id``.
**Operator Creation**: ::
operator = GKEClusterDeleteOperator(
task_id='cluster_delete',
project_id='my-project',
location='cluster-location'
name='cluster-name')
.. seealso::
For more detail about deleting clusters have a look at the reference:
https://google-cloud-python.readthedocs.io/en/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.delete_cluster
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param name: The name of the resource to delete, in this case cluster name
:type name: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
"""
template_fields = ['project_id', 'gcp_conn_id', 'name', 'location', 'api_version']
@apply_defaults
def __init__(self,
name: str,
location: str,
project_id: str = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.name = name
self._check_input()
def _check_input(self):
if not all([self.project_id, self.name, self.location]):
self.log.error(
'One of (project_id, name, location) is missing or incorrect')
raise AirflowException('Operator has incorrect or missing input.')
def execute(self, context):
hook = GKEClusterHook(gcp_conn_id=self.gcp_conn_id, location=self.location)
delete_result = hook.delete_cluster(name=self.name, project_id=self.project_id)
return delete_result
class GKEClusterCreateOperator(BaseOperator):
"""
Create a Google Kubernetes Engine Cluster of specified dimensions
The operator will wait until the cluster is created.
The **minimum** required to define a cluster to create is:
``dict()`` ::
cluster_def = {'name': 'my-cluster-name',
'initial_node_count': 1}
or
``Cluster`` proto ::
from google.cloud.container_v1.types import Cluster
cluster_def = Cluster(name='my-cluster-name', initial_node_count=1)
**Operator Creation**: ::
operator = GKEClusterCreateOperator(
task_id='cluster_create',
project_id='my-project',
location='my-location'
body=cluster_def)
.. seealso::
For more detail on about creating clusters have a look at the reference:
:class:`google.cloud.container_v1.types.Cluster`
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param body: The Cluster definition to create, can be protobuf or python dict, if
dict it must match protobuf message Cluster
:type body: dict or google.cloud.container_v1.types.Cluster
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
"""
template_fields = ['project_id', 'gcp_conn_id', 'location', 'api_version', 'body']
@apply_defaults
def __init__(self,
location: str,
body: Optional[Union[Dict, Cluster]],
project_id: str = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.body = body
self._check_input()
def _check_input(self):
if not all([self.project_id, self.location, self.body]) or not (
(isinstance(self.body, dict) and "name" in self.body and "initial_node_count" in self.body) or
(getattr(self.body, "name", None) and getattr(self.body, "initial_node_count", None))
):
self.log.error(
"One of (project_id, location, body, body['name'], "
"body['initial_node_count']) is missing or incorrect"
)
raise AirflowException("Operator has incorrect or missing input.")
def execute(self, context):
hook = GKEClusterHook(gcp_conn_id=self.gcp_conn_id, location=self.location)
create_op = hook.create_cluster(cluster=self.body, project_id=self.project_id)
return create_op
KUBE_CONFIG_ENV_VAR = "KUBECONFIG"
class GKEPodOperator(KubernetesPodOperator):
"""
Executes a task in a Kubernetes pod in the specified Google Kubernetes
Engine cluster
This Operator assumes that the system has gcloud installed and either
has working default application credentials or has configured a
connection id with a service account.
The **minimum** required to define a cluster to create are the variables
``task_id``, ``project_id``, ``location``, ``cluster_name``, ``name``,
``namespace``, and ``image``
**Operator Creation**: ::
operator = GKEPodOperator(task_id='pod_op',
project_id='my-project',
location='us-central1-a',
cluster_name='my-cluster-name',
name='task-name',
namespace='default',
image='perl')
.. seealso::
For more detail about application authentication have a look at the reference:
https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application
:param project_id: The Google Developers Console project id
:type project_id: str
:param location: The name of the Google Kubernetes Engine zone in which the
cluster resides, e.g. 'us-central1-a'
:type location: str
:param cluster_name: The name of the Google Kubernetes Engine cluster the pod
should be spawned in
:type cluster_name: str
:param gcp_conn_id: The google cloud connection id to use. This allows for
users to specify a service account.
:type gcp_conn_id: str
"""
template_fields = ('project_id', 'location',
'cluster_name') + KubernetesPodOperator.template_fields
@apply_defaults
def __init__(self,
project_id: str,
location: str,
cluster_name: str,
gcp_conn_id: str = 'google_cloud_default',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
# Specifying a service account file allows the user to using non default
# authentication for creating a Kubernetes Pod. This is done by setting the
# environment variable `GOOGLE_APPLICATION_CREDENTIALS` that gcloud looks at.
key_file = None
# If gcp_conn_id is not specified gcloud will use the default
# service account credentials.
if self.gcp_conn_id:
from airflow.hooks.base_hook import BaseHook
# extras is a deserialized json object
extras = BaseHook.get_connection(self.gcp_conn_id).extra_dejson
# key_file only gets set if a json file is created from a JSON string in
# the web ui, else none
key_file = self._set_env_from_extras(extras=extras)
# Write config to a temp file and set the environment variable to point to it.
# This is to avoid race conditions of reading/writing a single file
with tempfile.NamedTemporaryFile() as conf_file:
os.environ[KUBE_CONFIG_ENV_VAR] = conf_file.name
# Attempt to get/update credentials
# We call gcloud directly instead of using google-cloud-python api
# because there is no way to write kubernetes config to a file, which is
# required by KubernetesPodOperator.
# The gcloud command looks at the env variable `KUBECONFIG` for where to save
# the kubernetes config file.
subprocess.check_call(
["gcloud", "container", "clusters", "get-credentials",
self.cluster_name,
"--zone", self.location,
"--project", self.project_id])
# Since the key file is of type mkstemp() closing the file will delete it from
# the file system so it cannot be accessed after we don't need it anymore
if key_file:
key_file.close()
# Tell `KubernetesPodOperator` where the config file is located
self.config_file = os.environ[KUBE_CONFIG_ENV_VAR]
return super().execute(context)
def _set_env_from_extras(self, extras):
"""
Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either:
- The path to the keyfile from the specified connection id
- A generated file's path if the user specified JSON in the connection id. The
file is assumed to be deleted after the process dies due to how mkstemp()
works.
The environment variable is used inside the gcloud command to determine correct
service account to use.
"""
key_path = self._get_field(extras, 'key_path', False)
keyfile_json_str = self._get_field(extras, 'keyfile_dict', False)
if not key_path and not keyfile_json_str:
self.log.info('Using gcloud with application default credentials.')
return None
elif key_path:
os.environ[CREDENTIALS] = key_path
return None
else:
# Write service account JSON to secure file for gcloud to reference
service_key = tempfile.NamedTemporaryFile(delete=False)
service_key.write(keyfile_json_str.encode('utf-8'))
os.environ[CREDENTIALS] = service_key.name
# Return file object to have a pointer to close after use,
# thus deleting from file system.
return service_key
def _get_field(self, extras, field, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
"""
long_f = 'extra__google_cloud_platform__{}'.format(field)
if long_f in extras:
return extras[long_f]
else:
self.log.info('Field %s not found in extras.', field)
return default
| 40.855385 | 150 | 0.646483 |
import os
import subprocess
import tempfile
from typing import Union, Dict, Optional
from google.auth.environment_vars import CREDENTIALS
from google.cloud.container_v1.types import Cluster
from airflow import AirflowException
from airflow.gcp.hooks.kubernetes_engine import GKEClusterHook
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GKEClusterDeleteOperator(BaseOperator):
template_fields = ['project_id', 'gcp_conn_id', 'name', 'location', 'api_version']
@apply_defaults
def __init__(self,
name: str,
location: str,
project_id: str = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.name = name
self._check_input()
def _check_input(self):
if not all([self.project_id, self.name, self.location]):
self.log.error(
'One of (project_id, name, location) is missing or incorrect')
raise AirflowException('Operator has incorrect or missing input.')
def execute(self, context):
hook = GKEClusterHook(gcp_conn_id=self.gcp_conn_id, location=self.location)
delete_result = hook.delete_cluster(name=self.name, project_id=self.project_id)
return delete_result
class GKEClusterCreateOperator(BaseOperator):
template_fields = ['project_id', 'gcp_conn_id', 'location', 'api_version', 'body']
@apply_defaults
def __init__(self,
location: str,
body: Optional[Union[Dict, Cluster]],
project_id: str = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.body = body
self._check_input()
def _check_input(self):
if not all([self.project_id, self.location, self.body]) or not (
(isinstance(self.body, dict) and "name" in self.body and "initial_node_count" in self.body) or
(getattr(self.body, "name", None) and getattr(self.body, "initial_node_count", None))
):
self.log.error(
"One of (project_id, location, body, body['name'], "
"body['initial_node_count']) is missing or incorrect"
)
raise AirflowException("Operator has incorrect or missing input.")
def execute(self, context):
hook = GKEClusterHook(gcp_conn_id=self.gcp_conn_id, location=self.location)
create_op = hook.create_cluster(cluster=self.body, project_id=self.project_id)
return create_op
KUBE_CONFIG_ENV_VAR = "KUBECONFIG"
class GKEPodOperator(KubernetesPodOperator):
template_fields = ('project_id', 'location',
'cluster_name') + KubernetesPodOperator.template_fields
@apply_defaults
def __init__(self,
project_id: str,
location: str,
cluster_name: str,
gcp_conn_id: str = 'google_cloud_default',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
key_file = None
if self.gcp_conn_id:
from airflow.hooks.base_hook import BaseHook
extras = BaseHook.get_connection(self.gcp_conn_id).extra_dejson
key_file = self._set_env_from_extras(extras=extras)
with tempfile.NamedTemporaryFile() as conf_file:
os.environ[KUBE_CONFIG_ENV_VAR] = conf_file.name
subprocess.check_call(
["gcloud", "container", "clusters", "get-credentials",
self.cluster_name,
"--zone", self.location,
"--project", self.project_id])
if key_file:
key_file.close()
# Tell `KubernetesPodOperator` where the config file is located
self.config_file = os.environ[KUBE_CONFIG_ENV_VAR]
return super().execute(context)
def _set_env_from_extras(self, extras):
key_path = self._get_field(extras, 'key_path', False)
keyfile_json_str = self._get_field(extras, 'keyfile_dict', False)
if not key_path and not keyfile_json_str:
self.log.info('Using gcloud with application default credentials.')
return None
elif key_path:
os.environ[CREDENTIALS] = key_path
return None
else:
# Write service account JSON to secure file for gcloud to reference
service_key = tempfile.NamedTemporaryFile(delete=False)
service_key.write(keyfile_json_str.encode('utf-8'))
os.environ[CREDENTIALS] = service_key.name
# Return file object to have a pointer to close after use,
# thus deleting from file system.
return service_key
def _get_field(self, extras, field, default=None):
long_f = 'extra__google_cloud_platform__{}'.format(field)
if long_f in extras:
return extras[long_f]
else:
self.log.info('Field %s not found in extras.', field)
return default
| true | true |
f7fd05639b438cdae4d7aae8cad7a4e0733f299b | 14,563 | py | Python | dnn_utils.py | santamm/DeepNet | fd05804200eb1bd62fb3a80a793b22794e4ec7d2 | [
"MIT"
] | null | null | null | dnn_utils.py | santamm/DeepNet | fd05804200eb1bd62fb3a80a793b22794e4ec7d2 | [
"MIT"
] | null | null | null | dnn_utils.py | santamm/DeepNet | fd05804200eb1bd62fb3a80a793b22794e4ec7d2 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import h5py
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def load_data(train_dataset, test_dataset):
train_dataset = h5py.File(train_dataset, "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File(test_dataset, "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1]) #*0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
Z = W.dot(A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = "relu")
caches.append(cache)
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = "sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1./m * np.dot(dZ,A_prev.T)
db = 1./m * np.sum(dZ, axis = 1, keepdims = True)
dA_prev = np.dot(W.T,dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (there are (L-1) or them, indexes from 0 to L-2)
the cache of linear_activation_forward() with "sigmoid" (there is one, index L-1)
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache, activation = "relu")
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a L-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
Accuracy -- accuracy
"""
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
p = np.zeros((1,m))
# Forward propagation
probas, caches = L_model_forward(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
if probas[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
accuracy = np.sum((p == y)/m)
#print("Accuracy: " + str(accuracy))
return p, accuracy
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
num_images = len(mislabeled_indices[0])
plt.figure(figsize=(10,10))
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
plt.axis('off')
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
plt.show()
| 32.947964 | 143 | 0.628373 | import numpy as np
import matplotlib.pyplot as plt
import h5py
def sigmoid(Z):
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
Z = cache
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def load_data(train_dataset, test_dataset):
train_dataset = h5py.File(train_dataset, "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:])
train_set_y_orig = np.array(train_dataset["train_set_y"][:])
test_dataset = h5py.File(test_dataset, "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:])
test_set_y_orig = np.array(test_dataset["test_set_y"][:])
classes = np.array(test_dataset["list_classes"][:])
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def initialize_parameters(n_x, n_h, n_y):
np.random.seed(1)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def initialize_parameters_deep(layer_dims):
np.random.seed(1)
parameters = {}
L = len(layer_dims)
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
Z = W.dot(A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
if activation == "sigmoid":
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
caches = []
A = X
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = "relu")
caches.append(cache)
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = "sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
m = Y.shape[1]
cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))
cost = np.squeeze(cost)
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1./m * np.dot(dZ,A_prev.T)
db = 1./m * np.sum(dZ, axis = 1, keepdims = True)
dA_prev = np.dot(W.T,dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache, activation = "relu")
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_parameters(parameters, grads, learning_rate):
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
return parameters
def predict(X, y, parameters):
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
p = np.zeros((1,m))
# Forward propagation
probas, caches = L_model_forward(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
if probas[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
accuracy = np.sum((p == y)/m)
#print("Accuracy: " + str(accuracy))
return p, accuracy
def print_mislabeled_images(classes, X, y, p):
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
num_images = len(mislabeled_indices[0])
plt.figure(figsize=(10,10))
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
plt.axis('off')
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
plt.show()
| true | true |
f7fd059a13914c97c85675154d9a8ef45a5e7a71 | 807 | py | Python | analyse.py | lrobidou/kbf | 443da37c9c9c450292bb43bd149c2a32fde1acf0 | [
"BSD-2-Clause"
] | null | null | null | analyse.py | lrobidou/kbf | 443da37c9c9c450292bb43bd149c2a32fde1acf0 | [
"BSD-2-Clause"
] | null | null | null | analyse.py | lrobidou/kbf | 443da37c9c9c450292bb43bd149c2a32fde1acf0 | [
"BSD-2-Clause"
] | null | null | null | # don't mind me, I'm a quick and dirty python script
# I extract false positive rate from the output files of kbf
def lire(filemane):
TP, TN, FP, FN = 0, 0, 0, 0
with open(filemane, "r") as fichier:
for ligne in fichier:
cols = ligne.split()
if cols[1] == "0":
if cols[2] == "0":
TN += 1
else:
FN += 1
else:
if cols[2] == "0":
FP += 1
else:
TP += 1
return TP, TN, FP, FN
def analyser(filename):
TP, TN, FP, FN = lire(filename)
return filename, (FP / (FP + TN)) * 100
def main():
print(analyser("test_classic.txt"))
print(analyser("test_kbf1.txt"))
print(analyser("test_kbf2.txt"))
| 25.21875 | 60 | 0.469641 |
def lire(filemane):
TP, TN, FP, FN = 0, 0, 0, 0
with open(filemane, "r") as fichier:
for ligne in fichier:
cols = ligne.split()
if cols[1] == "0":
if cols[2] == "0":
TN += 1
else:
FN += 1
else:
if cols[2] == "0":
FP += 1
else:
TP += 1
return TP, TN, FP, FN
def analyser(filename):
TP, TN, FP, FN = lire(filename)
return filename, (FP / (FP + TN)) * 100
def main():
print(analyser("test_classic.txt"))
print(analyser("test_kbf1.txt"))
print(analyser("test_kbf2.txt"))
| true | true |
f7fd05e739992e4c1f58efd11d8a7dfa9ef68ff8 | 65,714 | py | Python | env/lib/python3.8/site-packages/attr/_make.py | avdhari/enigma | b7e965a91ca5f0e929c4c719d695f15ccb8b5a2c | [
"MIT"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | env/lib/python3.8/site-packages/attr/_make.py | avdhari/enigma | b7e965a91ca5f0e929c4c719d695f15ccb8b5a2c | [
"MIT"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | env/lib/python3.8/site-packages/attr/_make.py | avdhari/enigma | b7e965a91ca5f0e929c4c719d695f15ccb8b5a2c | [
"MIT"
] | 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | from __future__ import absolute_import, division, print_function
import copy
import hashlib
import linecache
import sys
import threading
import warnings
from operator import itemgetter
from . import _config
from ._compat import (
PY2,
isclass,
iteritems,
metadata_proxy,
ordered_dict,
set_closure_cell,
)
from .exceptions import (
DefaultAlreadySetError,
FrozenInstanceError,
NotAnAttrsClassError,
PythonTooOldError,
UnannotatedAttributeError,
)
# This is used at least twice, so cache it here.
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = (
" {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
)
_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar")
# we don't use a double-underscore prefix because that triggers
# name mangling when trying to create a slot for the field
# (when slots=True)
_hash_cache_field = "_attrs_cached_hash"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
"""
Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
``_Nothing`` is a singleton. There is only ever one of it.
"""
_singleton = None
def __new__(cls):
if _Nothing._singleton is None:
_Nothing._singleton = super(_Nothing, cls).__new__(cls)
return _Nothing._singleton
def __repr__(self):
return "NOTHING"
NOTHING = _Nothing()
"""
Sentinel to indicate the lack of a value when ``None`` is ambiguous.
"""
def attrib(
default=NOTHING,
validator=None,
repr=True,
cmp=True,
hash=None,
init=True,
convert=None,
metadata=None,
type=None,
converter=None,
factory=None,
kw_only=False,
):
"""
Create a new attribute on a class.
.. warning::
Does *not* do anything unless the class is also decorated with
:func:`attr.s`!
:param default: A value that is used if an ``attrs``-generated ``__init__``
is used and no value is passed while instantiating or the attribute is
excluded using ``init=False``.
If the value is an instance of :class:`Factory`, its callable will be
used to construct a new value (useful for mutable data types like lists
or dicts).
If a default is not set (or set manually to ``attr.NOTHING``), a value
*must* be supplied when instantiating; otherwise a :exc:`TypeError`
will be raised.
The default can also be set using decorator notation as shown below.
:type default: Any value.
:param callable factory: Syntactic sugar for
``default=attr.Factory(callable)``.
:param validator: :func:`callable` that is called by ``attrs``-generated
``__init__`` methods after the instance has been initialized. They
receive the initialized instance, the :class:`Attribute`, and the
passed value.
The return value is *not* inspected so the validator has to throw an
exception itself.
If a ``list`` is passed, its items are treated as validators and must
all pass.
Validators can be globally disabled and re-enabled using
:func:`get_run_validators`.
The validator can also be set using decorator notation as shown below.
:type validator: ``callable`` or a ``list`` of ``callable``\\ s.
:param bool repr: Include this attribute in the generated ``__repr__``
method.
:param bool cmp: Include this attribute in the generated comparison methods
(``__eq__`` et al).
:param hash: Include this attribute in the generated ``__hash__``
method. If ``None`` (default), mirror *cmp*'s value. This is the
correct behavior according the Python spec. Setting this value to
anything else than ``None`` is *discouraged*.
:type hash: ``bool`` or ``None``
:param bool init: Include this attribute in the generated ``__init__``
method. It is possible to set this to ``False`` and set a default
value. In that case this attributed is unconditionally initialized
with the specified default value or factory.
:param callable converter: :func:`callable` that is called by
``attrs``-generated ``__init__`` methods to converter attribute's value
to the desired format. It is given the passed-in value, and the
returned value will be used as the new value of the attribute. The
value is converted before being passed to the validator, if any.
:param metadata: An arbitrary mapping, to be used by third-party
components. See :ref:`extending_metadata`.
:param type: The type of the attribute. In Python 3.6 or greater, the
preferred method to specify the type is using a variable annotation
(see `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_).
This argument is provided for backward compatibility.
Regardless of the approach used, the type will be stored on
``Attribute.type``.
Please note that ``attrs`` doesn't do anything with this metadata by
itself. You can use it as part of your own code or for
:doc:`static type checking <types>`.
:param kw_only: Make this attribute keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
.. versionadded:: 15.2.0 *convert*
.. versionadded:: 16.3.0 *metadata*
.. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
.. versionchanged:: 17.1.0
*hash* is ``None`` and therefore mirrors *cmp* by default.
.. versionadded:: 17.3.0 *type*
.. deprecated:: 17.4.0 *convert*
.. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
*convert* to achieve consistency with other noun-based arguments.
.. versionadded:: 18.1.0
``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
.. versionadded:: 18.2.0 *kw_only*
"""
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning,
stacklevel=2,
)
converter = convert
if factory is not None:
if default is not NOTHING:
raise ValueError(
"The `default` and `factory` arguments are mutually "
"exclusive."
)
if not callable(factory):
raise ValueError("The `factory` argument must be a callable.")
default = Factory(factory)
if metadata is None:
metadata = {}
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
kw_only=kw_only,
)
def _make_attr_tuple_class(cls_name, attr_names):
"""
Create a tuple subclass to hold `Attribute`s for an `attrs` class.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(
_tuple_property_pat.format(index=i, attr_name=attr_name)
)
else:
attr_class_template.append(" pass")
globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
# Tuple class for extracted attributes from a class definition.
# `base_attrs` is a subset of `attrs`.
_Attributes = _make_attr_tuple_class(
"_Attributes",
[
# all attributes to build dunder methods for
"attrs",
# attributes that have been inherited
"base_attrs",
# map inherited attributes to their originating classes
"base_attrs_map",
],
)
def _is_class_var(annot):
"""
Check whether *annot* is a typing.ClassVar.
The string comparison hack is used to avoid evaluating all string
annotations which would put attrs-based classes at a performance
disadvantage compared to plain old classes.
"""
return str(annot).startswith(_classvar_prefixes)
def _get_annotations(cls):
"""
Get annotations for *cls*.
"""
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
# Verify that the annotations aren't merely inherited.
for base_cls in cls.__mro__[1:]:
if anns is getattr(base_cls, "__annotations__", None):
return {}
return anns
def _counter_getter(e):
"""
Key function for sorting to avoid re-creating a lambda for every class.
"""
return e[1].counter
def _transform_attrs(cls, these, auto_attribs, kw_only):
"""
Transform all `_CountingAttr`s on a class into `Attribute`s.
If *these* is passed, use that and don't look for them on the class.
Return an `_Attributes`.
"""
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = [(name, ca) for name, ca in iteritems(these)]
if not isinstance(these, ordered_dict):
ca_list.sort(key=_counter_getter)
elif auto_attribs is True:
ca_names = {
name
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: "
+ ", ".join(
sorted(unannotated, key=lambda n: cd.get(n).counter)
)
+ "."
)
else:
ca_list = sorted(
(
(name, attr)
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
),
key=lambda e: e[1].counter,
)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name, ca=ca, type=anns.get(attr_name)
)
for attr_name, ca in ca_list
]
base_attrs = []
base_attr_map = {} # A dictionary of base attrs to their classes.
taken_attr_names = {a.name: a for a in own_attrs}
# Traverse the MRO and collect attributes.
for base_cls in cls.__mro__[1:-1]:
sub_attrs = getattr(base_cls, "__attrs_attrs__", None)
if sub_attrs is not None:
for a in sub_attrs:
prev_a = taken_attr_names.get(a.name)
# Only add an attribute if it hasn't been defined before. This
# allows for overwriting attribute definitions by subclassing.
if prev_a is None:
base_attrs.append(a)
taken_attr_names[a.name] = a
base_attr_map[a.name] = base_cls
attr_names = [a.name for a in base_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
if kw_only:
own_attrs = [a._assoc(kw_only=True) for a in own_attrs]
base_attrs = [a._assoc(kw_only=True) for a in base_attrs]
attrs = AttrsClass(base_attrs + own_attrs)
had_default = False
was_kw_only = False
for a in attrs:
if (
was_kw_only is False
and had_default is True
and a.default is NOTHING
and a.init is True
and a.kw_only is False
):
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: %r" % (a,)
)
elif (
had_default is False
and a.default is not NOTHING
and a.init is not False
and
# Keyword-only attributes without defaults can be specified
# after keyword-only attributes with defaults.
a.kw_only is False
):
had_default = True
if was_kw_only is True and a.kw_only is False:
raise ValueError(
"Non keyword-only attributes are not allowed after a "
"keyword-only attribute. Attribute in question: {a!r}".format(
a=a
)
)
if was_kw_only is False and a.init is True and a.kw_only is True:
was_kw_only = True
return _Attributes((attrs, base_attrs, base_attr_map))
def _frozen_setattrs(self, name, value):
"""
Attached to frozen classes as __setattr__.
"""
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
"""
Attached to frozen classes as __delattr__.
"""
raise FrozenInstanceError()
class _ClassBuilder(object):
"""
Iteratively build *one* class.
"""
__slots__ = (
"_cls",
"_cls_dict",
"_attrs",
"_base_names",
"_attr_names",
"_slots",
"_frozen",
"_weakref_slot",
"_cache_hash",
"_has_post_init",
"_delete_attribs",
"_base_attr_map",
)
def __init__(
self,
cls,
these,
slots,
frozen,
weakref_slot,
auto_attribs,
kw_only,
cache_hash,
):
attrs, base_attrs, base_map = _transform_attrs(
cls, these, auto_attribs, kw_only
)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._base_names = set(a.name for a in base_attrs)
self._base_attr_map = base_map
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen or _has_frozen_base_class(cls)
self._weakref_slot = weakref_slot
self._cache_hash = cache_hash
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._delete_attribs = not bool(these)
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
"""
Finalize class based on the accumulated configuration.
Builder cannot be used after calling this method.
"""
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
"""
Apply accumulated methods and return the class.
"""
cls = self._cls
base_names = self._base_names
# Clean class of attribute definitions (`attr.ib()`s).
if self._delete_attribs:
for name in self._attr_names:
if (
name not in base_names
and getattr(cls, name, None) is not None
):
try:
delattr(cls, name)
except AttributeError:
# This can happen if a base class defines a class
# variable and we want to set an attribute with the
# same name by using only a type annotation.
pass
# Attach our dunder methods.
for name, value in self._cls_dict.items():
setattr(cls, name, value)
return cls
def _create_slots_class(self):
"""
Build and return a new class with a `__slots__` attribute.
"""
base_names = self._base_names
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
}
weakref_inherited = False
# Traverse the MRO to check for an existing __weakref__.
for base_cls in self._cls.__mro__[1:-1]:
if "__weakref__" in getattr(base_cls, "__dict__", ()):
weakref_inherited = True
break
names = self._attr_names
if (
self._weakref_slot
and "__weakref__" not in getattr(self._cls, "__slots__", ())
and "__weakref__" not in names
and not weakref_inherited
):
names += ("__weakref__",)
# We only add the names of attributes that aren't inherited.
# Settings __slots__ to inherited attributes wastes memory.
slot_names = [name for name in names if name not in base_names]
if self._cache_hash:
slot_names.append(_hash_cache_field)
cd["__slots__"] = tuple(slot_names)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
# __weakref__ is not writable.
state_attr_names = tuple(
an for an in self._attr_names if an != "__weakref__"
)
def slots_getstate(self):
"""
Automatically created by attrs.
"""
return tuple(getattr(self, name) for name in state_attr_names)
def slots_setstate(self, state):
"""
Automatically created by attrs.
"""
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(state_attr_names, state):
__bound_setattr(name, value)
# slots and frozen require __getstate__/__setstate__ to work
cd["__getstate__"] = slots_getstate
cd["__setstate__"] = slots_setstate
# Create new class based on old class and our methods.
cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
if cell.cell_contents is self._cls:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns=ns)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(
self._attrs, frozen=self._frozen, cache_hash=self._cache_hash
)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._attrs,
self._has_post_init,
self._frozen,
self._slots,
self._cache_hash,
self._base_attr_map,
)
)
return self
def add_cmp(self):
cd = self._cls_dict
cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd[
"__gt__"
], cd["__ge__"] = (
self._add_method_dunders(meth) for meth in _make_cmp(self._attrs)
)
return self
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__)
)
except AttributeError:
pass
return method
def attrs(
maybe_cls=None,
these=None,
repr_ns=None,
repr=True,
cmp=True,
hash=None,
init=True,
slots=False,
frozen=False,
weakref_slot=True,
str=False,
auto_attribs=False,
kw_only=False,
cache_hash=False,
):
r"""
A class decorator that adds `dunder
<https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
specified attributes using :func:`attr.ib` or the *these* argument.
:param these: A dictionary of name to :func:`attr.ib` mappings. This is
useful to avoid the definition of your attributes within the class body
because you can't (e.g. if you want to add ``__repr__`` methods to
Django models) or don't want to.
If *these* is not ``None``, ``attrs`` will *not* search the class body
for attributes and will *not* remove any attributes from it.
If *these* is an ordered dict (:class:`dict` on Python 3.6+,
:class:`collections.OrderedDict` otherwise), the order is deduced from
the order of the attributes inside *these*. Otherwise the order
of the definition of the attributes is used.
:type these: :class:`dict` of :class:`str` to :func:`attr.ib`
:param str repr_ns: When using nested classes, there's no way in Python 2
to automatically detect that. Therefore it's possible to set the
namespace explicitly for a more meaningful ``repr`` output.
:param bool repr: Create a ``__repr__`` method with a human readable
representation of ``attrs`` attributes..
:param bool str: Create a ``__str__`` method that is identical to
``__repr__``. This is usually not necessary except for
:class:`Exception`\ s.
:param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
``__gt__``, and ``__ge__`` methods that compare the class as if it were
a tuple of its ``attrs`` attributes. But the attributes are *only*
compared, if the types of both classes are *identical*!
:param hash: If ``None`` (default), the ``__hash__`` method is generated
according how *cmp* and *frozen* are set.
1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
None, marking it unhashable (which it is).
3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
``__hash__`` method of the base class will be used (if base class is
``object``, this means it will fall back to id-based hashing.).
Although not recommended, you can decide for yourself and force
``attrs`` to create one (e.g. if the class is immutable even though you
didn't freeze it programmatically) by passing ``True`` or not. Both of
these cases are rather special and should be used carefully.
See the `Python documentation \
<https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
and the `GitHub issue that led to the default behavior \
<https://github.com/python-attrs/attrs/issues/136>`_ for more details.
:type hash: ``bool`` or ``None``
:param bool init: Create a ``__init__`` method that initializes the
``attrs`` attributes. Leading underscores are stripped for the
argument name. If a ``__attrs_post_init__`` method exists on the
class, it will be called after the class is fully initialized.
:param bool slots: Create a slots_-style class that's more
memory-efficient. See :ref:`slots` for further ramifications.
:param bool frozen: Make instances immutable after initialization. If
someone attempts to modify a frozen instance,
:exc:`attr.exceptions.FrozenInstanceError` is raised.
Please note:
1. This is achieved by installing a custom ``__setattr__`` method
on your class so you can't implement an own one.
2. True immutability is impossible in Python.
3. This *does* have a minor a runtime performance :ref:`impact
<how-frozen>` when initializing new instances. In other words:
``__init__`` is slightly slower with ``frozen=True``.
4. If a class is frozen, you cannot modify ``self`` in
``__attrs_post_init__`` or a self-written ``__init__``. You can
circumvent that limitation by using
``object.__setattr__(self, "attribute_name", value)``.
.. _slots: https://docs.python.org/3/reference/datamodel.html#slots
:param bool weakref_slot: Make instances weak-referenceable. This has no
effect unless ``slots`` is also enabled.
:param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes
(Python 3.6 and later only) from the class body.
In this case, you **must** annotate every field. If ``attrs``
encounters a field that is set to an :func:`attr.ib` but lacks a type
annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is
raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
want to set a type.
If you assign a value to those attributes (e.g. ``x: int = 42``), that
value becomes the default value like if it were passed using
``attr.ib(default=42)``. Passing an instance of :class:`Factory` also
works as expected.
Attributes annotated as :data:`typing.ClassVar` are **ignored**.
.. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/
:param bool kw_only: Make all attributes keyword-only (Python 3+)
in the generated ``__init__`` (if ``init`` is ``False``, this
parameter is ignored).
:param bool cache_hash: Ensure that the object's hash code is computed
only once and stored on the object. If this is set to ``True``,
hashing must be either explicitly or implicitly enabled for this
class. If the hash code is cached, then no attributes of this
class which participate in hash code computation may be mutated
after object creation.
.. versionadded:: 16.0.0 *slots*
.. versionadded:: 16.1.0 *frozen*
.. versionadded:: 16.3.0 *str*
.. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
.. versionchanged:: 17.1.0
*hash* supports ``None`` as value which is also the default now.
.. versionadded:: 17.3.0 *auto_attribs*
.. versionchanged:: 18.1.0
If *these* is passed, no attributes are deleted from the class body.
.. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
.. versionadded:: 18.2.0 *weakref_slot*
.. deprecated:: 18.2.0
``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
:class:`DeprecationWarning` if the classes compared are subclasses of
each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
to each other.
.. versionadded:: 18.2.0 *kw_only*
.. versionadded:: 18.2.0 *cache_hash*
"""
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
builder = _ClassBuilder(
cls,
these,
slots,
frozen,
weakref_slot,
auto_attribs,
kw_only,
cache_hash,
)
if repr is True:
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
if cmp is True:
builder.add_cmp()
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
elif hash is True or (hash is None and cmp is True and frozen is True):
builder.add_hash()
else:
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
builder.make_unhashable()
if init is True:
builder.add_init()
else:
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" init must be True."
)
return builder.build_class()
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
"""
Internal alias so we can use it in functions that take an argument called
*attrs*.
"""
if PY2:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return (
getattr(cls.__setattr__, "__module__", None)
== _frozen_setattrs.__module__
and cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs)
def _make_hash(attrs, frozen, cache_hash):
attrs = tuple(
a
for a in attrs
if a.hash is True or (a.hash is None and a.cmp is True)
)
tab = " "
# We cache the generated hash methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated hash %s>" % (sha1.hexdigest(),)
type_hash = hash(unique_filename)
method_lines = ["def __hash__(self):"]
def append_hash_computation_lines(prefix, indent):
"""
Generate the code for actually computing the hash code.
Below this will either be returned directly or used to compute
a value which is then cached, depending on the value of cache_hash
"""
method_lines.extend(
[indent + prefix + "hash((", indent + " %d," % (type_hash,)]
)
for a in attrs:
method_lines.append(indent + " self.%s," % a.name)
method_lines.append(indent + " ))")
if cache_hash:
method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
if frozen:
append_hash_computation_lines(
"object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
)
method_lines.append(tab * 2 + ")") # close __setattr__
else:
append_hash_computation_lines(
"self.%s = " % _hash_cache_field, tab * 2
)
method_lines.append(tab + "return self.%s" % _hash_cache_field)
else:
append_hash_computation_lines("return ", tab)
script = "\n".join(method_lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__hash__"]
def _add_hash(cls, attrs):
"""
Add a hash method to *cls*.
"""
cls.__hash__ = _make_hash(attrs, frozen=False, cache_hash=False)
return cls
def __ne__(self, other):
"""
Check equality and either forward a NotImplemented or return the result
negated.
"""
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
WARNING_CMP_ISINSTANCE = (
"Comparision of subclasses using __%s__ is deprecated and will be removed "
"in 2019."
)
def _make_cmp(attrs):
attrs = [a for a in attrs if a.cmp]
# We cache the generated eq methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated eq %s>" % (sha1.hexdigest(),)
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
# irregularities like nan == nan is false but (nan,) == (nan,) is true.
if attrs:
lines.append(" return (")
others = [" ) == ("]
for a in attrs:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
eq = locs["__eq__"]
ne = __ne__
def attrs_to_tuple(obj):
"""
Save us some typing.
"""
return _attrs_to_tuple(obj, attrs)
def __lt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
if other.__class__ is not self.__class__:
warnings.warn(
WARNING_CMP_ISINSTANCE % ("lt",), DeprecationWarning
)
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def __le__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
if other.__class__ is not self.__class__:
warnings.warn(
WARNING_CMP_ISINSTANCE % ("le",), DeprecationWarning
)
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def __gt__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
if other.__class__ is not self.__class__:
warnings.warn(
WARNING_CMP_ISINSTANCE % ("gt",), DeprecationWarning
)
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def __ge__(self, other):
"""
Automatically created by attrs.
"""
if isinstance(other, self.__class__):
if other.__class__ is not self.__class__:
warnings.warn(
WARNING_CMP_ISINSTANCE % ("ge",), DeprecationWarning
)
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
return eq, ne, __lt__, __le__, __gt__, __ge__
def _add_cmp(cls, attrs=None):
"""
Add comparison methods to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = _make_cmp( # noqa
attrs
)
return cls
_already_repring = threading.local()
def _make_repr(attrs, ns):
"""
Make a repr method for *attr_names* adding *ns* to the full name.
"""
attr_names = tuple(a.name for a in attrs if a.repr)
def __repr__(self):
"""
Automatically created by attrs.
"""
try:
working_set = _already_repring.working_set
except AttributeError:
working_set = set()
_already_repring.working_set = working_set
if id(self) in working_set:
return "..."
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
# Since 'self' remains on the stack (i.e.: strongly referenced) for the
# duration of this call, it's safe to depend on id(...) stability, and
# not need to track the instance and therefore worry about properties
# like weakref- or hash-ability.
working_set.add(id(self))
try:
result = [class_name, "("]
first = True
for name in attr_names:
if first:
first = False
else:
result.append(", ")
result.extend((name, "=", repr(getattr(self, name, NOTHING))))
return "".join(result) + ")"
finally:
working_set.remove(id(self))
return __repr__
def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls
def _make_init(attrs, post_init, frozen, slots, cache_hash, base_attr_map):
attrs = [a for a in attrs if a.init or a.default is not NOTHING]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(sha1.hexdigest())
script, globs, annotations = _attrs_to_init_script(
attrs, frozen, slots, post_init, cache_hash, base_attr_map
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
__init__ = locs["__init__"]
__init__.__annotations__ = annotations
return __init__
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
"""
cls.__init__ = _make_init(
cls.__attrs_attrs__,
getattr(cls, "__attrs_post_init__", False),
frozen,
_is_slot_cls(cls),
cache_hash=False,
base_attr_map={},
)
return cls
def fields(cls):
"""
Return the tuple of ``attrs`` attributes for a class.
The tuple also allows accessing the fields by their names (see below for
examples).
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: tuple (with name accessors) of :class:`attr.Attribute`
.. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
by name.
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def fields_dict(cls):
"""
Return an ordered dictionary of ``attrs`` attributes for a class, whose
keys are the attribute names.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: an ordered dict where keys are attribute names and values are
:class:`attr.Attribute`\\ s. This will be a :class:`dict` if it's
naturally ordered like on Python 3.6+ or an
:class:`~collections.OrderedDict` otherwise.
.. versionadded:: 18.1.0
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs))
def validate(inst):
"""
Validate all attributes on *inst* that have a validator.
Leaves all exceptions through.
:param inst: Instance of a class with ``attrs`` attributes.
"""
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _is_slot_cls(cls):
return "__slots__" in cls.__dict__
def _is_slot_attr(a_name, base_attr_map):
"""
Check if the attribute name comes from a slot class.
"""
return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
def _attrs_to_init_script(
attrs, frozen, slots, post_init, cache_hash, base_attr_map
):
"""
Return a script of an initializer for *attrs* and a dict of globals.
The globals are expected by the generated script.
If *frozen* is True, we cannot set the attributes directly so we use
a cached ``object.__setattr__``.
"""
lines = []
any_slot_ancestors = any(
_is_slot_attr(a.name, base_attr_map) for a in attrs
)
if frozen is True:
if slots is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
# Note _setattr will be used again below if cache_hash is True
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
# Dict frozen classes assign directly to __dict__.
# But only if the attribute doesn't come from an ancestor slot
# class.
# Note _inst_dict will be used again below if cache_hash is True
lines.append("_inst_dict = self.__dict__")
if any_slot_ancestors:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup
# per assignment.
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
if _is_slot_attr(attr_name, base_attr_map):
res = "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
else:
res = "_inst_dict['%(attr_name)s'] = %(value_var)s" % {
"attr_name": attr_name,
"value_var": value_var,
}
return res
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
if _is_slot_attr(attr_name, base_attr_map):
tmpl = "_setattr('%(attr_name)s', %(c)s(%(value_var)s))"
else:
tmpl = "_inst_dict['%(attr_name)s'] = %(c)s(%(value_var)s)"
return tmpl % {
"attr_name": attr_name,
"value_var": value_var,
"c": conv_name,
}
else:
# Not frozen.
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
kw_only_args = []
attrs_to_validate = []
# This is a dictionary of names to validator and converter callables.
# Injecting this into __init__ globals lets us avoid lookups.
names_for_globals = {}
annotations = {"return": None}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default".format(
attr_name=attr_name
),
)
)
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default".format(
attr_name=attr_name
),
)
)
elif a.default is not NOTHING and not has_factory:
arg = "{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name, attr_name=attr_name
)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
arg = "{arg_name}=NOTHING".format(arg_name=arg_name)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
lines.append(
"if {arg_name} is not NOTHING:".format(arg_name=arg_name)
)
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
" " + fmt_setter_with_converter(attr_name, arg_name)
)
lines.append("else:")
lines.append(
" "
+ fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(
" "
+ fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.kw_only:
kw_only_args.append(arg_name)
else:
args.append(arg_name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name))
if a.init is True and a.converter is None and a.type is not None:
annotations[arg_name] = a.type
if attrs_to_validate: # we can skip this if there are no validators.
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(
" {}(self, {}, self.{})".format(val_name, attr_name, a.name)
)
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
# because this is set only after __attrs_post_init is called, a crash
# will result if post-init tries to access the hash code. This seemed
# preferable to setting this beforehand, in which case alteration to
# field values during post-init combined with post-init accessing the
# hash code would result in silent bugs.
if cache_hash:
if frozen:
if slots:
# if frozen and slots, then _setattr defined above
init_hash_cache = "_setattr('%s', %s)"
else:
# if frozen and not slots, then _inst_dict defined above
init_hash_cache = "_inst_dict['%s'] = %s"
else:
init_hash_cache = "self.%s = %s"
lines.append(init_hash_cache % (_hash_cache_field, "None"))
args = ", ".join(args)
if kw_only_args:
if PY2:
raise PythonTooOldError(
"Keyword-only arguments only work on Python 3 and later."
)
args += "{leading_comma}*, {kw_only_args}".format(
leading_comma=", " if args else "",
kw_only_args=", ".join(kw_only_args),
)
return (
"""\
def __init__(self, {args}):
{lines}
""".format(
args=args, lines="\n ".join(lines) if lines else "pass"
),
names_for_globals,
annotations,
)
class Attribute(object):
"""
*Read-only* representation of an attribute.
:attribute name: The name of the attribute.
Plus *all* arguments of :func:`attr.ib`.
For the version history of the fields, see :func:`attr.ib`.
"""
__slots__ = (
"name",
"default",
"validator",
"repr",
"cmp",
"hash",
"init",
"metadata",
"type",
"converter",
"kw_only",
)
def __init__(
self,
name,
default,
validator,
repr,
cmp,
hash,
init,
convert=None,
metadata=None,
type=None,
converter=None,
kw_only=False,
):
# Cache this descriptor here to speed things up later.
bound_setattr = _obj_setattr.__get__(self, Attribute)
# Despite the big red warning, people *do* instantiate `Attribute`
# themselves.
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`."
" It will be removed after 2019/01.",
DeprecationWarning,
stacklevel=2,
)
converter = convert
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr(
"metadata",
(
metadata_proxy(metadata)
if metadata
else _empty_metadata_singleton
),
)
bound_setattr("type", type)
bound_setattr("kw_only", kw_only)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@property
def convert(self):
warnings.warn(
"The `convert` attribute is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning,
stacklevel=2,
)
return self.converter
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k in Attribute.__slots__
if k
not in (
"name",
"validator",
"default",
"type",
"convert",
) # exclude methods and deprecated alias
}
return cls(
name=name,
validator=ca._validator,
default=ca._default,
type=type,
**inst_dict
)
# Don't use attr.assoc since fields(Attribute) doesn't work
def _assoc(self, **changes):
"""
Copy *self* and apply *changes*.
"""
new = copy.copy(self)
new._setattrs(changes.items())
return new
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(
getattr(self, name) if name != "metadata" else dict(self.metadata)
for name in self.__slots__
)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
self._setattrs(zip(self.__slots__, state))
def _setattrs(self, name_values_pairs):
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in name_values_pairs:
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(
name,
metadata_proxy(value)
if value
else _empty_metadata_singleton,
)
_a = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=True,
hash=(name != "metadata"),
init=True,
)
for name in Attribute.__slots__
if name != "convert" # XXX: remove once `convert` is gone
]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash],
)
class _CountingAttr(object):
"""
Intermediate representation of attributes that uses a counter to preserve
the order in which the attributes have been defined.
*Internal* data structure of the attrs library. Running into is most
likely the result of a bug like a forgotten `@attr.s` decorator.
"""
__slots__ = (
"counter",
"_default",
"repr",
"cmp",
"hash",
"init",
"metadata",
"_validator",
"converter",
"type",
"kw_only",
)
__attrs_attrs__ = tuple(
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=True,
hash=True,
init=True,
kw_only=False,
)
for name in ("counter", "_default", "repr", "cmp", "hash", "init")
) + (
Attribute(
name="metadata",
default=None,
validator=None,
repr=True,
cmp=True,
hash=False,
init=True,
kw_only=False,
),
)
cls_counter = 0
def __init__(
self,
default,
validator,
repr,
cmp,
hash,
init,
converter,
metadata,
type,
kw_only,
):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.converter = converter
self.metadata = metadata
self.type = type
self.kw_only = kw_only
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
"""
Decorator that allows to set the default for an attribute.
Returns *meth* unchanged.
:raises DefaultAlreadySetError: If default has been set before.
.. versionadded:: 17.1.0
"""
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attrs(slots=True, init=False, hash=True)
class Factory(object):
"""
Stores a factory callable.
If passed as the default value to :func:`attr.ib`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
factory = attrib()
takes_self = attrib()
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+,
:class:`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init},
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__"
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators.
@attrs(slots=True, hash=True)
class _AndValidator(object):
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
:param validators: Arbitrary number of validators.
:type validators: callables
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators
if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| 32.291892 | 95 | 0.576772 | from __future__ import absolute_import, division, print_function
import copy
import hashlib
import linecache
import sys
import threading
import warnings
from operator import itemgetter
from . import _config
from ._compat import (
PY2,
isclass,
iteritems,
metadata_proxy,
ordered_dict,
set_closure_cell,
)
from .exceptions import (
DefaultAlreadySetError,
FrozenInstanceError,
NotAnAttrsClassError,
PythonTooOldError,
UnannotatedAttributeError,
)
_obj_setattr = object.__setattr__
_init_converter_pat = "__attr_converter_{}"
_init_factory_pat = "__attr_factory_{}"
_tuple_property_pat = (
" {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
)
_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar")
# name mangling when trying to create a slot for the field
# (when slots=True)
_hash_cache_field = "_attrs_cached_hash"
_empty_metadata_singleton = metadata_proxy({})
class _Nothing(object):
_singleton = None
def __new__(cls):
if _Nothing._singleton is None:
_Nothing._singleton = super(_Nothing, cls).__new__(cls)
return _Nothing._singleton
def __repr__(self):
return "NOTHING"
NOTHING = _Nothing()
def attrib(
default=NOTHING,
validator=None,
repr=True,
cmp=True,
hash=None,
init=True,
convert=None,
metadata=None,
type=None,
converter=None,
factory=None,
kw_only=False,
):
if hash is not None and hash is not True and hash is not False:
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning,
stacklevel=2,
)
converter = convert
if factory is not None:
if default is not NOTHING:
raise ValueError(
"The `default` and `factory` arguments are mutually "
"exclusive."
)
if not callable(factory):
raise ValueError("The `factory` argument must be a callable.")
default = Factory(factory)
if metadata is None:
metadata = {}
return _CountingAttr(
default=default,
validator=validator,
repr=repr,
cmp=cmp,
hash=hash,
init=init,
converter=converter,
metadata=metadata,
type=type,
kw_only=kw_only,
)
def _make_attr_tuple_class(cls_name, attr_names):
attr_class_name = "{}Attributes".format(cls_name)
attr_class_template = [
"class {}(tuple):".format(attr_class_name),
" __slots__ = ()",
]
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(
_tuple_property_pat.format(index=i, attr_name=attr_name)
)
else:
attr_class_template.append(" pass")
globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
eval(compile("\n".join(attr_class_template), "", "exec"), globs)
return globs[attr_class_name]
_Attributes = _make_attr_tuple_class(
"_Attributes",
[
"attrs",
"base_attrs",
"base_attrs_map",
],
)
def _is_class_var(annot):
return str(annot).startswith(_classvar_prefixes)
def _get_annotations(cls):
anns = getattr(cls, "__annotations__", None)
if anns is None:
return {}
for base_cls in cls.__mro__[1:]:
if anns is getattr(base_cls, "__annotations__", None):
return {}
return anns
def _counter_getter(e):
return e[1].counter
def _transform_attrs(cls, these, auto_attribs, kw_only):
cd = cls.__dict__
anns = _get_annotations(cls)
if these is not None:
ca_list = [(name, ca) for name, ca in iteritems(these)]
if not isinstance(these, ordered_dict):
ca_list.sort(key=_counter_getter)
elif auto_attribs is True:
ca_names = {
name
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
}
ca_list = []
annot_names = set()
for attr_name, type in anns.items():
if _is_class_var(type):
continue
annot_names.add(attr_name)
a = cd.get(attr_name, NOTHING)
if not isinstance(a, _CountingAttr):
if a is NOTHING:
a = attrib()
else:
a = attrib(default=a)
ca_list.append((attr_name, a))
unannotated = ca_names - annot_names
if len(unannotated) > 0:
raise UnannotatedAttributeError(
"The following `attr.ib`s lack a type annotation: "
+ ", ".join(
sorted(unannotated, key=lambda n: cd.get(n).counter)
)
+ "."
)
else:
ca_list = sorted(
(
(name, attr)
for name, attr in cd.items()
if isinstance(attr, _CountingAttr)
),
key=lambda e: e[1].counter,
)
own_attrs = [
Attribute.from_counting_attr(
name=attr_name, ca=ca, type=anns.get(attr_name)
)
for attr_name, ca in ca_list
]
base_attrs = []
base_attr_map = {} # A dictionary of base attrs to their classes.
taken_attr_names = {a.name: a for a in own_attrs}
# Traverse the MRO and collect attributes.
for base_cls in cls.__mro__[1:-1]:
sub_attrs = getattr(base_cls, "__attrs_attrs__", None)
if sub_attrs is not None:
for a in sub_attrs:
prev_a = taken_attr_names.get(a.name)
# Only add an attribute if it hasn't been defined before. This
if prev_a is None:
base_attrs.append(a)
taken_attr_names[a.name] = a
base_attr_map[a.name] = base_cls
attr_names = [a.name for a in base_attrs + own_attrs]
AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
if kw_only:
own_attrs = [a._assoc(kw_only=True) for a in own_attrs]
base_attrs = [a._assoc(kw_only=True) for a in base_attrs]
attrs = AttrsClass(base_attrs + own_attrs)
had_default = False
was_kw_only = False
for a in attrs:
if (
was_kw_only is False
and had_default is True
and a.default is NOTHING
and a.init is True
and a.kw_only is False
):
raise ValueError(
"No mandatory attributes allowed after an attribute with a "
"default value or factory. Attribute in question: %r" % (a,)
)
elif (
had_default is False
and a.default is not NOTHING
and a.init is not False
and
a.kw_only is False
):
had_default = True
if was_kw_only is True and a.kw_only is False:
raise ValueError(
"Non keyword-only attributes are not allowed after a "
"keyword-only attribute. Attribute in question: {a!r}".format(
a=a
)
)
if was_kw_only is False and a.init is True and a.kw_only is True:
was_kw_only = True
return _Attributes((attrs, base_attrs, base_attr_map))
def _frozen_setattrs(self, name, value):
raise FrozenInstanceError()
def _frozen_delattrs(self, name):
raise FrozenInstanceError()
class _ClassBuilder(object):
__slots__ = (
"_cls",
"_cls_dict",
"_attrs",
"_base_names",
"_attr_names",
"_slots",
"_frozen",
"_weakref_slot",
"_cache_hash",
"_has_post_init",
"_delete_attribs",
"_base_attr_map",
)
def __init__(
self,
cls,
these,
slots,
frozen,
weakref_slot,
auto_attribs,
kw_only,
cache_hash,
):
attrs, base_attrs, base_map = _transform_attrs(
cls, these, auto_attribs, kw_only
)
self._cls = cls
self._cls_dict = dict(cls.__dict__) if slots else {}
self._attrs = attrs
self._base_names = set(a.name for a in base_attrs)
self._base_attr_map = base_map
self._attr_names = tuple(a.name for a in attrs)
self._slots = slots
self._frozen = frozen or _has_frozen_base_class(cls)
self._weakref_slot = weakref_slot
self._cache_hash = cache_hash
self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
self._delete_attribs = not bool(these)
self._cls_dict["__attrs_attrs__"] = self._attrs
if frozen:
self._cls_dict["__setattr__"] = _frozen_setattrs
self._cls_dict["__delattr__"] = _frozen_delattrs
def __repr__(self):
return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
def build_class(self):
if self._slots is True:
return self._create_slots_class()
else:
return self._patch_original_class()
def _patch_original_class(self):
cls = self._cls
base_names = self._base_names
if self._delete_attribs:
for name in self._attr_names:
if (
name not in base_names
and getattr(cls, name, None) is not None
):
try:
delattr(cls, name)
except AttributeError:
pass
for name, value in self._cls_dict.items():
setattr(cls, name, value)
return cls
def _create_slots_class(self):
base_names = self._base_names
cd = {
k: v
for k, v in iteritems(self._cls_dict)
if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
}
weakref_inherited = False
for base_cls in self._cls.__mro__[1:-1]:
if "__weakref__" in getattr(base_cls, "__dict__", ()):
weakref_inherited = True
break
names = self._attr_names
if (
self._weakref_slot
and "__weakref__" not in getattr(self._cls, "__slots__", ())
and "__weakref__" not in names
and not weakref_inherited
):
names += ("__weakref__",)
# Settings __slots__ to inherited attributes wastes memory.
slot_names = [name for name in names if name not in base_names]
if self._cache_hash:
slot_names.append(_hash_cache_field)
cd["__slots__"] = tuple(slot_names)
qualname = getattr(self._cls, "__qualname__", None)
if qualname is not None:
cd["__qualname__"] = qualname
# __weakref__ is not writable.
state_attr_names = tuple(
an for an in self._attr_names if an != "__weakref__"
)
def slots_getstate(self):
return tuple(getattr(self, name) for name in state_attr_names)
def slots_setstate(self, state):
__bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in zip(state_attr_names, state):
__bound_setattr(name, value)
# slots and frozen require __getstate__/__setstate__ to work
cd["__getstate__"] = slots_getstate
cd["__setstate__"] = slots_setstate
# Create new class based on old class and our methods.
cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
# The following is a fix for
# https://github.com/python-attrs/attrs/issues/102. On Python 3,
# if a method mentions `__class__` or uses the no-arg super(), the
# compiler will bake a reference to the class in the method itself
# as `method.__closure__`. Since we replace the class with a
# clone, we rewrite these references so it keeps working.
for item in cls.__dict__.values():
if isinstance(item, (classmethod, staticmethod)):
# Class- and staticmethods hide their functions inside.
# These might need to be rewritten as well.
closure_cells = getattr(item.__func__, "__closure__", None)
else:
closure_cells = getattr(item, "__closure__", None)
if not closure_cells: # Catch None or the empty list.
continue
for cell in closure_cells:
if cell.cell_contents is self._cls:
set_closure_cell(cell, cls)
return cls
def add_repr(self, ns):
self._cls_dict["__repr__"] = self._add_method_dunders(
_make_repr(self._attrs, ns=ns)
)
return self
def add_str(self):
repr = self._cls_dict.get("__repr__")
if repr is None:
raise ValueError(
"__str__ can only be generated if a __repr__ exists."
)
def __str__(self):
return self.__repr__()
self._cls_dict["__str__"] = self._add_method_dunders(__str__)
return self
def make_unhashable(self):
self._cls_dict["__hash__"] = None
return self
def add_hash(self):
self._cls_dict["__hash__"] = self._add_method_dunders(
_make_hash(
self._attrs, frozen=self._frozen, cache_hash=self._cache_hash
)
)
return self
def add_init(self):
self._cls_dict["__init__"] = self._add_method_dunders(
_make_init(
self._attrs,
self._has_post_init,
self._frozen,
self._slots,
self._cache_hash,
self._base_attr_map,
)
)
return self
def add_cmp(self):
cd = self._cls_dict
cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd[
"__gt__"
], cd["__ge__"] = (
self._add_method_dunders(meth) for meth in _make_cmp(self._attrs)
)
return self
def _add_method_dunders(self, method):
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__)
)
except AttributeError:
pass
return method
def attrs(
maybe_cls=None,
these=None,
repr_ns=None,
repr=True,
cmp=True,
hash=None,
init=True,
slots=False,
frozen=False,
weakref_slot=True,
str=False,
auto_attribs=False,
kw_only=False,
cache_hash=False,
):
def wrap(cls):
if getattr(cls, "__class__", None) is None:
raise TypeError("attrs only works with new-style classes.")
builder = _ClassBuilder(
cls,
these,
slots,
frozen,
weakref_slot,
auto_attribs,
kw_only,
cache_hash,
)
if repr is True:
builder.add_repr(repr_ns)
if str is True:
builder.add_str()
if cmp is True:
builder.add_cmp()
if hash is not True and hash is not False and hash is not None:
# Can't use `hash in` because 1 == True for example.
raise TypeError(
"Invalid value for hash. Must be True, False, or None."
)
elif hash is False or (hash is None and cmp is False):
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
elif hash is True or (hash is None and cmp is True and frozen is True):
builder.add_hash()
else:
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" hashing must be either explicitly or implicitly "
"enabled."
)
builder.make_unhashable()
if init is True:
builder.add_init()
else:
if cache_hash:
raise TypeError(
"Invalid value for cache_hash. To use hash caching,"
" init must be True."
)
return builder.build_class()
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
_attrs = attrs
if PY2:
def _has_frozen_base_class(cls):
return (
getattr(cls.__setattr__, "__module__", None)
== _frozen_setattrs.__module__
and cls.__setattr__.__name__ == _frozen_setattrs.__name__
)
else:
def _has_frozen_base_class(cls):
"""
Check whether *cls* has a frozen ancestor by looking at its
__setattr__.
"""
return cls.__setattr__ == _frozen_setattrs
def _attrs_to_tuple(obj, attrs):
return tuple(getattr(obj, a.name) for a in attrs)
def _make_hash(attrs, frozen, cache_hash):
attrs = tuple(
a
for a in attrs
if a.hash is True or (a.hash is None and a.cmp is True)
)
tab = " "
# We cache the generated hash methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated hash %s>" % (sha1.hexdigest(),)
type_hash = hash(unique_filename)
method_lines = ["def __hash__(self):"]
def append_hash_computation_lines(prefix, indent):
method_lines.extend(
[indent + prefix + "hash((", indent + " %d," % (type_hash,)]
)
for a in attrs:
method_lines.append(indent + " self.%s," % a.name)
method_lines.append(indent + " ))")
if cache_hash:
method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
if frozen:
append_hash_computation_lines(
"object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
)
method_lines.append(tab * 2 + ")") # close __setattr__
else:
append_hash_computation_lines(
"self.%s = " % _hash_cache_field, tab * 2
)
method_lines.append(tab + "return self.%s" % _hash_cache_field)
else:
append_hash_computation_lines("return ", tab)
script = "\n".join(method_lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
return locs["__hash__"]
def _add_hash(cls, attrs):
cls.__hash__ = _make_hash(attrs, frozen=False, cache_hash=False)
return cls
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
return not result
WARNING_CMP_ISINSTANCE = (
"Comparision of subclasses using __%s__ is deprecated and will be removed "
"in 2019."
)
def _make_cmp(attrs):
attrs = [a for a in attrs if a.cmp]
# We cache the generated eq methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated eq %s>" % (sha1.hexdigest(),)
lines = [
"def __eq__(self, other):",
" if other.__class__ is not self.__class__:",
" return NotImplemented",
]
# We can't just do a big self.x = other.x and... clause due to
if attrs:
lines.append(" return (")
others = [" ) == ("]
for a in attrs:
lines.append(" self.%s," % (a.name,))
others.append(" other.%s," % (a.name,))
lines += others + [" )"]
else:
lines.append(" return True")
script = "\n".join(lines)
globs = {}
locs = {}
bytecode = compile(script, unique_filename, "exec")
eval(bytecode, globs, locs)
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
eq = locs["__eq__"]
ne = __ne__
def attrs_to_tuple(obj):
return _attrs_to_tuple(obj, attrs)
def __lt__(self, other):
if isinstance(other, self.__class__):
if other.__class__ is not self.__class__:
warnings.warn(
WARNING_CMP_ISINSTANCE % ("lt",), DeprecationWarning
)
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, self.__class__):
if other.__class__ is not self.__class__:
warnings.warn(
WARNING_CMP_ISINSTANCE % ("le",), DeprecationWarning
)
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, self.__class__):
if other.__class__ is not self.__class__:
warnings.warn(
WARNING_CMP_ISINSTANCE % ("gt",), DeprecationWarning
)
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, self.__class__):
if other.__class__ is not self.__class__:
warnings.warn(
WARNING_CMP_ISINSTANCE % ("ge",), DeprecationWarning
)
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
return eq, ne, __lt__, __le__, __gt__, __ge__
def _add_cmp(cls, attrs=None):
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = _make_cmp(
attrs
)
return cls
_already_repring = threading.local()
def _make_repr(attrs, ns):
attr_names = tuple(a.name for a in attrs if a.repr)
def __repr__(self):
try:
working_set = _already_repring.working_set
except AttributeError:
working_set = set()
_already_repring.working_set = working_set
if id(self) in working_set:
return "..."
real_cls = self.__class__
if ns is None:
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
else:
class_name = ns + "." + real_cls.__name__
# not need to track the instance and therefore worry about properties
# like weakref- or hash-ability.
working_set.add(id(self))
try:
result = [class_name, "("]
first = True
for name in attr_names:
if first:
first = False
else:
result.append(", ")
result.extend((name, "=", repr(getattr(self, name, NOTHING))))
return "".join(result) + ")"
finally:
working_set.remove(id(self))
return __repr__
def _add_repr(cls, ns=None, attrs=None):
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls
def _make_init(attrs, post_init, frozen, slots, cache_hash, base_attr_map):
attrs = [a for a in attrs if a.init or a.default is not NOTHING]
# We cache the generated init methods for the same kinds of attributes.
sha1 = hashlib.sha1()
sha1.update(repr(attrs).encode("utf-8"))
unique_filename = "<attrs generated init {0}>".format(sha1.hexdigest())
script, globs, annotations = _attrs_to_init_script(
attrs, frozen, slots, post_init, cache_hash, base_attr_map
)
locs = {}
bytecode = compile(script, unique_filename, "exec")
attr_dict = dict((a.name, a) for a in attrs)
globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
if frozen is True:
# Save the lookup overhead in __init__ if we need to circumvent
# immutability.
globs["_cached_setattr"] = _obj_setattr
eval(bytecode, globs, locs)
# In order of debuggers like PDB being able to step through the code,
# we add a fake linecache entry.
linecache.cache[unique_filename] = (
len(script),
None,
script.splitlines(True),
unique_filename,
)
__init__ = locs["__init__"]
__init__.__annotations__ = annotations
return __init__
def _add_init(cls, frozen):
cls.__init__ = _make_init(
cls.__attrs_attrs__,
getattr(cls, "__attrs_post_init__", False),
frozen,
_is_slot_cls(cls),
cache_hash=False,
base_attr_map={},
)
return cls
def fields(cls):
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return attrs
def fields_dict(cls):
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs))
def validate(inst):
if _config._run_validators is False:
return
for a in fields(inst.__class__):
v = a.validator
if v is not None:
v(inst, a, getattr(inst, a.name))
def _is_slot_cls(cls):
return "__slots__" in cls.__dict__
def _is_slot_attr(a_name, base_attr_map):
return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
def _attrs_to_init_script(
attrs, frozen, slots, post_init, cache_hash, base_attr_map
):
lines = []
any_slot_ancestors = any(
_is_slot_attr(a.name, base_attr_map) for a in attrs
)
if frozen is True:
if slots is True:
lines.append(
# Circumvent the __setattr__ descriptor to save one lookup per
# assignment.
# Note _setattr will be used again below if cache_hash is True
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
return "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
else:
# Dict frozen classes assign directly to __dict__.
# But only if the attribute doesn't come from an ancestor slot
lines.append("_inst_dict = self.__dict__")
if any_slot_ancestors:
lines.append(
"_setattr = _cached_setattr.__get__(self, self.__class__)"
)
def fmt_setter(attr_name, value_var):
if _is_slot_attr(attr_name, base_attr_map):
res = "_setattr('%(attr_name)s', %(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
}
else:
res = "_inst_dict['%(attr_name)s'] = %(value_var)s" % {
"attr_name": attr_name,
"value_var": value_var,
}
return res
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
if _is_slot_attr(attr_name, base_attr_map):
tmpl = "_setattr('%(attr_name)s', %(c)s(%(value_var)s))"
else:
tmpl = "_inst_dict['%(attr_name)s'] = %(c)s(%(value_var)s)"
return tmpl % {
"attr_name": attr_name,
"value_var": value_var,
"c": conv_name,
}
else:
def fmt_setter(attr_name, value):
return "self.%(attr_name)s = %(value)s" % {
"attr_name": attr_name,
"value": value,
}
def fmt_setter_with_converter(attr_name, value_var):
conv_name = _init_converter_pat.format(attr_name)
return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
"attr_name": attr_name,
"value_var": value_var,
"conv": conv_name,
}
args = []
kw_only_args = []
attrs_to_validate = []
names_for_globals = {}
annotations = {"return": None}
for a in attrs:
if a.validator:
attrs_to_validate.append(a)
attr_name = a.name
arg_name = a.name.lstrip("_")
has_factory = isinstance(a.default, Factory)
if has_factory and a.default.takes_self:
maybe_self = "self"
else:
maybe_self = ""
if a.init is False:
if has_factory:
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.converter is not None:
lines.append(
fmt_setter_with_converter(
attr_name,
"attr_dict['{attr_name}'].default".format(
attr_name=attr_name
),
)
)
conv_name = _init_converter_pat.format(a.name)
names_for_globals[conv_name] = a.converter
else:
lines.append(
fmt_setter(
attr_name,
"attr_dict['{attr_name}'].default".format(
attr_name=attr_name
),
)
)
elif a.default is not NOTHING and not has_factory:
arg = "{arg_name}=attr_dict['{attr_name}'].default".format(
arg_name=arg_name, attr_name=attr_name
)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name))
elif has_factory:
arg = "{arg_name}=NOTHING".format(arg_name=arg_name)
if a.kw_only:
kw_only_args.append(arg)
else:
args.append(arg)
lines.append(
"if {arg_name} is not NOTHING:".format(arg_name=arg_name)
)
init_factory_name = _init_factory_pat.format(a.name)
if a.converter is not None:
lines.append(
" " + fmt_setter_with_converter(attr_name, arg_name)
)
lines.append("else:")
lines.append(
" "
+ fmt_setter_with_converter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(" " + fmt_setter(attr_name, arg_name))
lines.append("else:")
lines.append(
" "
+ fmt_setter(
attr_name,
init_factory_name + "({0})".format(maybe_self),
)
)
names_for_globals[init_factory_name] = a.default.factory
else:
if a.kw_only:
kw_only_args.append(arg_name)
else:
args.append(arg_name)
if a.converter is not None:
lines.append(fmt_setter_with_converter(attr_name, arg_name))
names_for_globals[
_init_converter_pat.format(a.name)
] = a.converter
else:
lines.append(fmt_setter(attr_name, arg_name))
if a.init is True and a.converter is None and a.type is not None:
annotations[arg_name] = a.type
if attrs_to_validate:
names_for_globals["_config"] = _config
lines.append("if _config._run_validators is True:")
for a in attrs_to_validate:
val_name = "__attr_validator_{}".format(a.name)
attr_name = "__attr_{}".format(a.name)
lines.append(
" {}(self, {}, self.{})".format(val_name, attr_name, a.name)
)
names_for_globals[val_name] = a.validator
names_for_globals[attr_name] = a
if post_init:
lines.append("self.__attrs_post_init__()")
if cache_hash:
if frozen:
if slots:
init_hash_cache = "_setattr('%s', %s)"
else:
init_hash_cache = "_inst_dict['%s'] = %s"
else:
init_hash_cache = "self.%s = %s"
lines.append(init_hash_cache % (_hash_cache_field, "None"))
args = ", ".join(args)
if kw_only_args:
if PY2:
raise PythonTooOldError(
"Keyword-only arguments only work on Python 3 and later."
)
args += "{leading_comma}*, {kw_only_args}".format(
leading_comma=", " if args else "",
kw_only_args=", ".join(kw_only_args),
)
return (
"""\
def __init__(self, {args}):
{lines}
""".format(
args=args, lines="\n ".join(lines) if lines else "pass"
),
names_for_globals,
annotations,
)
class Attribute(object):
__slots__ = (
"name",
"default",
"validator",
"repr",
"cmp",
"hash",
"init",
"metadata",
"type",
"converter",
"kw_only",
)
def __init__(
self,
name,
default,
validator,
repr,
cmp,
hash,
init,
convert=None,
metadata=None,
type=None,
converter=None,
kw_only=False,
):
bound_setattr = _obj_setattr.__get__(self, Attribute)
if convert is not None:
if converter is not None:
raise RuntimeError(
"Can't pass both `convert` and `converter`. "
"Please use `converter` only."
)
warnings.warn(
"The `convert` argument is deprecated in favor of `converter`."
" It will be removed after 2019/01.",
DeprecationWarning,
stacklevel=2,
)
converter = convert
bound_setattr("name", name)
bound_setattr("default", default)
bound_setattr("validator", validator)
bound_setattr("repr", repr)
bound_setattr("cmp", cmp)
bound_setattr("hash", hash)
bound_setattr("init", init)
bound_setattr("converter", converter)
bound_setattr(
"metadata",
(
metadata_proxy(metadata)
if metadata
else _empty_metadata_singleton
),
)
bound_setattr("type", type)
bound_setattr("kw_only", kw_only)
def __setattr__(self, name, value):
raise FrozenInstanceError()
@property
def convert(self):
warnings.warn(
"The `convert` attribute is deprecated in favor of `converter`. "
"It will be removed after 2019/01.",
DeprecationWarning,
stacklevel=2,
)
return self.converter
@classmethod
def from_counting_attr(cls, name, ca, type=None):
# type holds the annotated value. deal with conflicts:
if type is None:
type = ca.type
elif ca.type is not None:
raise ValueError(
"Type annotation and type argument cannot both be present"
)
inst_dict = {
k: getattr(ca, k)
for k in Attribute.__slots__
if k
not in (
"name",
"validator",
"default",
"type",
"convert",
) # exclude methods and deprecated alias
}
return cls(
name=name,
validator=ca._validator,
default=ca._default,
type=type,
**inst_dict
)
# Don't use attr.assoc since fields(Attribute) doesn't work
def _assoc(self, **changes):
new = copy.copy(self)
new._setattrs(changes.items())
return new
# Don't use _add_pickle since fields(Attribute) doesn't work
def __getstate__(self):
return tuple(
getattr(self, name) if name != "metadata" else dict(self.metadata)
for name in self.__slots__
)
def __setstate__(self, state):
self._setattrs(zip(self.__slots__, state))
def _setattrs(self, name_values_pairs):
bound_setattr = _obj_setattr.__get__(self, Attribute)
for name, value in name_values_pairs:
if name != "metadata":
bound_setattr(name, value)
else:
bound_setattr(
name,
metadata_proxy(value)
if value
else _empty_metadata_singleton,
)
_a = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=True,
hash=(name != "metadata"),
init=True,
)
for name in Attribute.__slots__
if name != "convert" # XXX: remove once `convert` is gone
]
Attribute = _add_hash(
_add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
attrs=[a for a in _a if a.hash],
)
class _CountingAttr(object):
__slots__ = (
"counter",
"_default",
"repr",
"cmp",
"hash",
"init",
"metadata",
"_validator",
"converter",
"type",
"kw_only",
)
__attrs_attrs__ = tuple(
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=True,
hash=True,
init=True,
kw_only=False,
)
for name in ("counter", "_default", "repr", "cmp", "hash", "init")
) + (
Attribute(
name="metadata",
default=None,
validator=None,
repr=True,
cmp=True,
hash=False,
init=True,
kw_only=False,
),
)
cls_counter = 0
def __init__(
self,
default,
validator,
repr,
cmp,
hash,
init,
converter,
metadata,
type,
kw_only,
):
_CountingAttr.cls_counter += 1
self.counter = _CountingAttr.cls_counter
self._default = default
# If validator is a list/tuple, wrap it using helper validator.
if validator and isinstance(validator, (list, tuple)):
self._validator = and_(*validator)
else:
self._validator = validator
self.repr = repr
self.cmp = cmp
self.hash = hash
self.init = init
self.converter = converter
self.metadata = metadata
self.type = type
self.kw_only = kw_only
def validator(self, meth):
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth
def default(self, meth):
if self._default is not NOTHING:
raise DefaultAlreadySetError()
self._default = Factory(meth, takes_self=True)
return meth
_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
@attrs(slots=True, init=False, hash=True)
class Factory(object):
factory = attrib()
takes_self = attrib()
def __init__(self, factory, takes_self=False):
self.factory = factory
self.takes_self = takes_self
def make_class(name, attrs, bases=(object,), **attributes_arguments):
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init},
)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__"
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators.
@attrs(slots=True, hash=True)
class _AndValidator(object):
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
vals = []
for validator in validators:
vals.extend(
validator._validators
if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
| true | true |
f7fd0651dececa1cf9a855ddc9e39003857e7617 | 1,943 | py | Python | Chapter12/object_detection.py | poshan0126/TensorFlow-Machine-Learning-Projects | 5b8d7988b86e39ffdd8babec2b8e3c791b5e853e | [
"MIT"
] | 74 | 2019-01-19T23:37:26.000Z | 2022-03-30T12:59:34.000Z | Chapter12/object_detection.py | poshan0126/TensorFlow-Machine-Learning-Projects | 5b8d7988b86e39ffdd8babec2b8e3c791b5e853e | [
"MIT"
] | 46 | 2019-01-23T05:02:43.000Z | 2022-03-12T01:04:37.000Z | Chapter12/object_detection.py | poshan0126/TensorFlow-Machine-Learning-Projects | 5b8d7988b86e39ffdd8babec2b8e3c791b5e853e | [
"MIT"
] | 54 | 2018-08-07T00:56:48.000Z | 2022-03-22T18:06:37.000Z | # To allow Python to find Spark driver
import findspark
findspark.init('/home/ubuntu/spark-2.4.0-bin-hadoop2.7')
import os
SUBMIT_ARGS = "--packages databricks:spark-deep-learning:1.3.0-spark2.4-s_2.11 pyspark-shell"
os.environ["PYSPARK_SUBMIT_ARGS"] = SUBMIT_ARGS
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("ImageClassification") \
.config("spark.executor.memory", "70g") \
.config("spark.driver.memory", "50g") \
.config("spark.memory.offHeap.enabled",True) \
.config("spark.memory.offHeap.size","16g") \
.getOrCreate()
import pyspark.sql.functions as f
import sparkdl as dl
from pyspark.ml.image import ImageSchema
dfbuses = ImageSchema.readImages('data/buses/').withColumn('label', f.lit(0))
dfcars = ImageSchema.readImages('data/cars/').withColumn('label', f.lit(1))
dfbuses.show(5)
dfcars.show(5)
trainDFbuses, testDFbuses = dfbuses.randomSplit([0.60,0.40], seed = 123)
trainDFcars, testDFcars = dfcars.randomSplit([0.60,0.40], seed = 122)
trainDF = trainDFbuses.unionAll(trainDFcars)
testDF = testDFbuses.unionAll(testDFcars)
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
vectorizer = dl.DeepImageFeaturizer(inputCol="image", outputCol="features",
modelName="InceptionV3")
logreg = LogisticRegression(maxIter=30, labelCol="label")
pipeline = Pipeline(stages=[vectorizer, logreg])
pipeline_model = pipeline.fit(trainDF)
predictDF = pipeline_model.transform(testDF)
predictDF.select('prediction', 'label').show(n = testDF.toPandas().shape[0], truncate=False)
predictDF.crosstab('prediction', 'label').show()
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
scoring = predictDF.select("prediction", "label")
accuracy_score = MulticlassClassificationEvaluator(metricName="accuracy")
rate = accuracy_score.evaluate(scoring)*100
print("accuracy: {}%" .format(round(rate,2)))
| 35.327273 | 93 | 0.750901 |
import findspark
findspark.init('/home/ubuntu/spark-2.4.0-bin-hadoop2.7')
import os
SUBMIT_ARGS = "--packages databricks:spark-deep-learning:1.3.0-spark2.4-s_2.11 pyspark-shell"
os.environ["PYSPARK_SUBMIT_ARGS"] = SUBMIT_ARGS
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("ImageClassification") \
.config("spark.executor.memory", "70g") \
.config("spark.driver.memory", "50g") \
.config("spark.memory.offHeap.enabled",True) \
.config("spark.memory.offHeap.size","16g") \
.getOrCreate()
import pyspark.sql.functions as f
import sparkdl as dl
from pyspark.ml.image import ImageSchema
dfbuses = ImageSchema.readImages('data/buses/').withColumn('label', f.lit(0))
dfcars = ImageSchema.readImages('data/cars/').withColumn('label', f.lit(1))
dfbuses.show(5)
dfcars.show(5)
trainDFbuses, testDFbuses = dfbuses.randomSplit([0.60,0.40], seed = 123)
trainDFcars, testDFcars = dfcars.randomSplit([0.60,0.40], seed = 122)
trainDF = trainDFbuses.unionAll(trainDFcars)
testDF = testDFbuses.unionAll(testDFcars)
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
vectorizer = dl.DeepImageFeaturizer(inputCol="image", outputCol="features",
modelName="InceptionV3")
logreg = LogisticRegression(maxIter=30, labelCol="label")
pipeline = Pipeline(stages=[vectorizer, logreg])
pipeline_model = pipeline.fit(trainDF)
predictDF = pipeline_model.transform(testDF)
predictDF.select('prediction', 'label').show(n = testDF.toPandas().shape[0], truncate=False)
predictDF.crosstab('prediction', 'label').show()
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
scoring = predictDF.select("prediction", "label")
accuracy_score = MulticlassClassificationEvaluator(metricName="accuracy")
rate = accuracy_score.evaluate(scoring)*100
print("accuracy: {}%" .format(round(rate,2)))
| true | true |
f7fd0758dd89f675c47e152e462219a5f97f6d03 | 310 | py | Python | setup.py | ShuzhiLiu/MaskRCNN | 02602c20f17d583621e1baff6d81901c0869098e | [
"MIT"
] | 9 | 2020-02-16T04:05:58.000Z | 2022-02-08T05:23:45.000Z | setup.py | ShuzhiLiu/MaskRCNN | 02602c20f17d583621e1baff6d81901c0869098e | [
"MIT"
] | 1 | 2020-04-03T11:41:48.000Z | 2020-04-04T07:03:59.000Z | setup.py | ShuzhiLiu/MaskRCNN | 02602c20f17d583621e1baff6d81901c0869098e | [
"MIT"
] | 2 | 2020-04-03T12:36:02.000Z | 2020-06-28T07:52:36.000Z | import os
dir_list = ['SavedModels',
'SavedDebugImages',
'TensorboardLogs']
for directory in dir_list:
if not os.path.isdir(directory):
print(f"directory: {directory} doesn't exist, create now")
os.mkdir(directory)
print(f"created directory: {directory}")
| 25.833333 | 66 | 0.632258 | import os
dir_list = ['SavedModels',
'SavedDebugImages',
'TensorboardLogs']
for directory in dir_list:
if not os.path.isdir(directory):
print(f"directory: {directory} doesn't exist, create now")
os.mkdir(directory)
print(f"created directory: {directory}")
| true | true |
f7fd0765402602733631ae03e2147097f87c6cdc | 376 | py | Python | rapidsms/contrib/messagelog/tables.py | catalpainternational/rapidsms | eb7234d04ceb31e4d57187f2d6ba2806d0c54e15 | [
"BSD-3-Clause"
] | 330 | 2015-01-11T03:00:14.000Z | 2022-03-21T11:34:23.000Z | rapidsms/contrib/messagelog/tables.py | catalpainternational/rapidsms | eb7234d04ceb31e4d57187f2d6ba2806d0c54e15 | [
"BSD-3-Clause"
] | 45 | 2015-01-06T16:14:19.000Z | 2022-03-16T13:12:53.000Z | rapidsms/contrib/messagelog/tables.py | catalpainternational/rapidsms | eb7234d04ceb31e4d57187f2d6ba2806d0c54e15 | [
"BSD-3-Clause"
] | 166 | 2015-01-30T19:53:38.000Z | 2021-11-09T18:44:44.000Z | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import django_tables2 as tables
from rapidsms.contrib.messagelog.models import Message
class MessageTable(tables.Table):
class Meta:
model = Message
exclude = ('id', )
order_by = ('-date', )
attrs = {
'class': 'table table-striped table-bordered table-condensed'
}
| 22.117647 | 73 | 0.617021 |
import django_tables2 as tables
from rapidsms.contrib.messagelog.models import Message
class MessageTable(tables.Table):
class Meta:
model = Message
exclude = ('id', )
order_by = ('-date', )
attrs = {
'class': 'table table-striped table-bordered table-condensed'
}
| true | true |
f7fd08828e31e14203c94603aec1e4a1bc2ec669 | 12,054 | py | Python | python/ccxt/async_support/base/exchange.py | invao/ccxt | a255988654c0052adb2ccc99cff54e4e0c28028d | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | invao/ccxt | a255988654c0052adb2ccc99cff54e4e0c28028d | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | invao/ccxt | a255988654c0052adb2ccc99cff54e4e0c28028d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.18.472'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
self.open()
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile)
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
# if self.verbose:
# print('Waiting for tokens: Exchange: {0}'.format(self.id))
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
# if self.verbose:
# print('Adding new tokens: Exchange: {0}'.format(self.id))
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
encoded_body = body.encode() if body else None
session_method = getattr(self.session, method.lower())
response = None
http_response = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
print("\nResponse:", method, url, response.status, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status, headers, http_response)
except socket.gaierror as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except concurrent.futures._base.TimeoutError as e:
self.raise_error(RequestTimeout, method, url, e, None)
except aiohttp.client_exceptions.ClientConnectionError as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except aiohttp.client_exceptions.ClientError as e: # base exception class
self.raise_error(ExchangeError, url, method, e, None)
self.handle_errors(response.status, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_errors(None, response.status, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method, headers, body)
if json_response is not None:
return json_response
return http_response
async def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
self.raise_error(NotSupported, details='fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
self.raise_error(NotSupported, details='fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
| 42.146853 | 355 | 0.611581 |
__version__ = '1.18.472'
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import sys
import yarl
from ccxt.async_support.base.throttle import throttle
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.exchange import Exchange as BaseExchange
__all__ = [
'BaseExchange',
'Exchange',
]
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
self.open()
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
context = ssl.create_default_context(cafile=self.cafile)
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
encoded_body = body.encode() if body else None
session_method = getattr(self.session, method.lower())
response = None
http_response = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
print("\nResponse:", method, url, response.status, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status, headers, http_response)
except socket.gaierror as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except concurrent.futures._base.TimeoutError as e:
self.raise_error(RequestTimeout, method, url, e, None)
except aiohttp.client_exceptions.ClientConnectionError as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except aiohttp.client_exceptions.ClientError as e:
self.raise_error(ExchangeError, url, method, e, None)
self.handle_errors(response.status, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_errors(None, response.status, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method, headers, body)
if json_response is not None:
return json_response
return http_response
async def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
return self.currencies
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
self.raise_error(NotSupported, details='fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
self.raise_error(NotSupported, details='fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
| true | true |
f7fd099ffcd67116bb077ce15304ec63da9d0195 | 5,923 | py | Python | pymathics/setup.py | Jankyboy/Mathics | d79a0af13d21c10f5e8d1df399d1ac13399b479a | [
"Apache-2.0"
] | null | null | null | pymathics/setup.py | Jankyboy/Mathics | d79a0af13d21c10f5e8d1df399d1ac13399b479a | [
"Apache-2.0"
] | 2 | 2020-10-12T23:17:05.000Z | 2021-08-07T22:37:06.000Z | pymathics/setup.py | Jankyboy/Mathics | d79a0af13d21c10f5e8d1df399d1ac13399b479a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
"""Setuptools based setup script for Mathics.
For the easiest installation just type the following command (you'll probably
need root privileges):
python setup.py install
This will install the library in the default location. For instructions on
how to customize the install procedure read the output of:
python setup.py --help install
In addition, there are some other commands:
python setup.py clean -> will clean all trash (*.pyc and stuff)
To get a full list of avaiable commands, read the output of:
python setup.py --help-commands
Or, if all else fails, feel free to write to the mathics users list at
mathics-users@googlegroups.com and ask for help.
"""
import sys
import platform
import os
from setuptools import setup, Command, Extension
# Ensure user has the correct Python version
if sys.version_info < (3, 6):
print("Mathics support Python 3.6 and above; you have %d.%d" % sys.version_info[:2])
sys.exit(-1)
# stores __version__ in the current namespace
exec(compile(open("../mathics/version.py").read(), "../mathics/version.py", "exec"))
is_PyPy = platform.python_implementation() == "PyPy"
INSTALL_REQUIRES = []
DEPENDENCY_LINKS = []
try:
if is_PyPy:
raise ImportError
from Cython.Distutils import build_ext
except ImportError:
EXTENSIONS = []
CMDCLASS = {}
else:
EXTENSIONS = {
"core": ["expression", "numbers", "rules", "pattern"],
"builtin": ["arithmetic", "numeric", "patterns", "graphics"],
}
EXTENSIONS = [
Extension(
"mathics.%s.%s" % (parent, module), ["mathics/%s/%s.py" % (parent, module)]
)
for parent, modules in EXTENSIONS.items()
for module in modules
]
CMDCLASS = {"build_ext": build_ext}
INSTALL_REQUIRES += ["cython>=0.15.1"]
# General Requirements
INSTALL_REQUIRES += [
"sympy>=1.6, < 1.7",
"django >= 1.8, < 1.12",
"mpmath>=1.1.0",
"python-dateutil",
"colorama",
]
def subdirs(root, file="*.*", depth=10):
for k in range(depth):
yield root + "*/" * k + file
class initialize(Command):
"""
Manually create the Django database used by the web notebook
"""
description = "manually create the Django database used by the web notebook"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
def run(self):
import os
import subprocess
settings = {}
exec(
compile(open("mathics/settings.py").read(), "mathics/settings.py", "exec"),
settings,
)
database_file = settings["DATABASES"]["default"]["NAME"]
print("Creating data directory %s" % settings["DATA_DIR"])
if not os.path.exists(settings["DATA_DIR"]):
os.makedirs(settings["DATA_DIR"])
print("Creating database %s" % database_file)
try:
subprocess.check_call(
[sys.executable, "mathics/manage.py", "migrate", "--noinput"]
)
print("")
print("Database created successfully.")
except subprocess.CalledProcessError:
print("Error: failed to create database")
sys.exit(1)
class test(Command):
"""
Run the unittests
"""
description = "run the unittests"
user_options = []
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
def run(self):
import unittest
test_loader = unittest.defaultTestLoader
test_runner = unittest.TextTestRunner(verbosity=3)
test_suite = test_loader.discover("test/")
test_result = test_runner.run(test_suite)
if not test_result.wasSuccessful():
sys.exit(1)
CMDCLASS["initialize"] = initialize
CMDCLASS["test"] = test
mathjax_files = list(subdirs("media/js/mathjax/"))
setup(
name="PyMathics",
cmdclass=CMDCLASS,
ext_modules=EXTENSIONS,
version=__version__,
packages=[
"pymathics.natlang",
"pymathics.testpythaticsmodule",
],
install_requires=INSTALL_REQUIRES,
dependency_links=DEPENDENCY_LINKS,
# don't pack Mathics in egg because of media files, etc.
zip_safe=False,
# metadata for upload to PyPI
author="Angus Griffith",
author_email="mathics@angusgriffith.com",
description="A general-purpose computer algebra system.",
license="GPL",
url="https://mathics.github.io/",
download_url="https://github.com/mathics/Mathics/tarball/v0.9",
keywords=["Mathematica", "Wolfram", "Interpreter", "Shell", "Math", "CAS"],
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Interpreters",
],
# TODO: could also include long_description, download_url,
)
| 29.17734 | 88 | 0.638528 |
from __future__ import print_function
from __future__ import absolute_import
import sys
import platform
import os
from setuptools import setup, Command, Extension
if sys.version_info < (3, 6):
print("Mathics support Python 3.6 and above; you have %d.%d" % sys.version_info[:2])
sys.exit(-1)
exec(compile(open("../mathics/version.py").read(), "../mathics/version.py", "exec"))
is_PyPy = platform.python_implementation() == "PyPy"
INSTALL_REQUIRES = []
DEPENDENCY_LINKS = []
try:
if is_PyPy:
raise ImportError
from Cython.Distutils import build_ext
except ImportError:
EXTENSIONS = []
CMDCLASS = {}
else:
EXTENSIONS = {
"core": ["expression", "numbers", "rules", "pattern"],
"builtin": ["arithmetic", "numeric", "patterns", "graphics"],
}
EXTENSIONS = [
Extension(
"mathics.%s.%s" % (parent, module), ["mathics/%s/%s.py" % (parent, module)]
)
for parent, modules in EXTENSIONS.items()
for module in modules
]
CMDCLASS = {"build_ext": build_ext}
INSTALL_REQUIRES += ["cython>=0.15.1"]
INSTALL_REQUIRES += [
"sympy>=1.6, < 1.7",
"django >= 1.8, < 1.12",
"mpmath>=1.1.0",
"python-dateutil",
"colorama",
]
def subdirs(root, file="*.*", depth=10):
for k in range(depth):
yield root + "*/" * k + file
class initialize(Command):
description = "manually create the Django database used by the web notebook"
user_options = []
def __init__(self, *args):
self.args = args[0]
Command.__init__(self, *args)
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import os
import subprocess
settings = {}
exec(
compile(open("mathics/settings.py").read(), "mathics/settings.py", "exec"),
settings,
)
database_file = settings["DATABASES"]["default"]["NAME"]
print("Creating data directory %s" % settings["DATA_DIR"])
if not os.path.exists(settings["DATA_DIR"]):
os.makedirs(settings["DATA_DIR"])
print("Creating database %s" % database_file)
try:
subprocess.check_call(
[sys.executable, "mathics/manage.py", "migrate", "--noinput"]
)
print("")
print("Database created successfully.")
except subprocess.CalledProcessError:
print("Error: failed to create database")
sys.exit(1)
class test(Command):
description = "run the unittests"
user_options = []
def __init__(self, *args):
self.args = args[0]
Command.__init__(self, *args)
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import unittest
test_loader = unittest.defaultTestLoader
test_runner = unittest.TextTestRunner(verbosity=3)
test_suite = test_loader.discover("test/")
test_result = test_runner.run(test_suite)
if not test_result.wasSuccessful():
sys.exit(1)
CMDCLASS["initialize"] = initialize
CMDCLASS["test"] = test
mathjax_files = list(subdirs("media/js/mathjax/"))
setup(
name="PyMathics",
cmdclass=CMDCLASS,
ext_modules=EXTENSIONS,
version=__version__,
packages=[
"pymathics.natlang",
"pymathics.testpythaticsmodule",
],
install_requires=INSTALL_REQUIRES,
dependency_links=DEPENDENCY_LINKS,
zip_safe=False,
# metadata for upload to PyPI
author="Angus Griffith",
author_email="mathics@angusgriffith.com",
description="A general-purpose computer algebra system.",
license="GPL",
url="https://mathics.github.io/",
download_url="https://github.com/mathics/Mathics/tarball/v0.9",
keywords=["Mathematica", "Wolfram", "Interpreter", "Shell", "Math", "CAS"],
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Interpreters",
],
# TODO: could also include long_description, download_url,
)
| true | true |
f7fd0bb02f6429066eb47f9dbcc6f8d27b28445a | 428 | py | Python | extract.py | akusumoto/sample_dash | 431d4b8a0e524ed8eb6f616594afbb67f5dcd428 | [
"BSD-3-Clause"
] | null | null | null | extract.py | akusumoto/sample_dash | 431d4b8a0e524ed8eb6f616594afbb67f5dcd428 | [
"BSD-3-Clause"
] | null | null | null | extract.py | akusumoto/sample_dash | 431d4b8a0e524ed8eb6f616594afbb67f5dcd428 | [
"BSD-3-Clause"
] | 1 | 2021-11-03T07:48:25.000Z | 2021-11-03T07:48:25.000Z | import numpy as np
def random(texture, num):
# idx = np.random.choice(texture.shape[0], num, replace=False) # 乱数を抽出するときに重複を許さない場合(ただし、サンプル数が少ないとエラーになりやすい)
idx = np.random.choice(texture.shape[0], num) # 乱数を抽出するときに重複を許す場合(ただし、サンプル数が少ない時でも安定)
return texture[idx]
def stat(texture, num):
pass
def hybrid(texture, num):
pass
method = {'random': random, 'STAT': stat, 'HybridIA': hybrid}
| 22.526316 | 113 | 0.670561 | import numpy as np
def random(texture, num):
um)
return texture[idx]
def stat(texture, num):
pass
def hybrid(texture, num):
pass
method = {'random': random, 'STAT': stat, 'HybridIA': hybrid}
| true | true |
f7fd0bb889bd2a74fc08867527386634f53f6475 | 2,682 | py | Python | quorra/methods.py | bprinty/quorra-python | 82b6583992c6db4f16ee0d74ea7b1fe5e84cf03c | [
"Apache-2.0"
] | null | null | null | quorra/methods.py | bprinty/quorra-python | 82b6583992c6db4f16ee0d74ea7b1fe5e84cf03c | [
"Apache-2.0"
] | null | null | null | quorra/methods.py | bprinty/quorra-python | 82b6583992c6db4f16ee0d74ea7b1fe5e84cf03c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Methods for interacting with quorra plots.
#
# @author <bprinty@gmail.com>
# ------------------------------------------------
# imports
# -------
import os
# config
# ------
_open = None
_app = None
__src__ = os.path.dirname(os.path.realpath(__file__))
__cwd__ = os.getcwd()
__templates__ = os.path.join(__src__, 'tmpl')
# methods
# -------
def export(plot, filename, width=800, height=600):
"""
Export plot to file.
Args:
plot (quorra.Plot): Quorra plot object to export.
width (int): Width for plot (pixels).
height (int): Height for plot (pixels).
filename (str): Filename to export to.
"""
import signal
import uuid
from selenium.webdriver import PhantomJS
global __templates__, __cwd__
phantom = PhantomJS(service_log_path=os.path.devnull)
tmpl = os.path.join(__templates__, 'export.html')
exp = os.path.join(__cwd__, '.' + str(uuid.uuid1()) + '.html')
try:
with open(tmpl, 'r') as fi, open(exp, 'w') as fo:
dat = fi.read()
dat = dat.replace('var plot = undefined;', 'var plot = {};'.format(str(plot)))
dat = dat.replace('width: 800px;', 'width: {}px;'.format(width))
dat = dat.replace('height: 500px;', 'height: {}px;'.format(height))
fo.write(dat)
phantom.get('file://' + exp)
phantom.save_screenshot(filename.replace('.png', '') + '.png')
finally:
phantom.service.process.send_signal(signal.SIGTERM)
phantom.quit()
if os.path.exists(exp):
os.remove(exp)
return
def render(plot, width=800, height=600, append=False):
"""
Update current view with new plot.
Args:
plot (quorra.Plot): Quorra plot object to render.
width (int): Width for plot (pixels).
height (int): Height for plot (pixels).
append (bool): Whether or not to append the plot
to the current view.
"""
import webbrowser
global _open, __templates__
if _open is None:
# this should change later to an actual server that
# waits for data and can support multiple plots at
# the same time
_open = os.path.join('/tmp/quorra-server.html')
tmpl = os.path.join(__templates__, 'render.html')
with open(tmpl, 'r') as fi, open(_open, 'w') as fo:
dat = fi.read()
dat = dat.replace('var plot = undefined;', 'var plot = {};'.format(str(plot)))
dat = dat.replace('width: 800px;', 'width: {}px;'.format(width))
dat = dat.replace('height: 500px;', 'height: {}px;'.format(height))
fo.write(dat)
webbrowser.open(_open)
return
| 31.186047 | 90 | 0.585011 |
import os
_open = None
_app = None
__src__ = os.path.dirname(os.path.realpath(__file__))
__cwd__ = os.getcwd()
__templates__ = os.path.join(__src__, 'tmpl')
def export(plot, filename, width=800, height=600):
import signal
import uuid
from selenium.webdriver import PhantomJS
global __templates__, __cwd__
phantom = PhantomJS(service_log_path=os.path.devnull)
tmpl = os.path.join(__templates__, 'export.html')
exp = os.path.join(__cwd__, '.' + str(uuid.uuid1()) + '.html')
try:
with open(tmpl, 'r') as fi, open(exp, 'w') as fo:
dat = fi.read()
dat = dat.replace('var plot = undefined;', 'var plot = {};'.format(str(plot)))
dat = dat.replace('width: 800px;', 'width: {}px;'.format(width))
dat = dat.replace('height: 500px;', 'height: {}px;'.format(height))
fo.write(dat)
phantom.get('file://' + exp)
phantom.save_screenshot(filename.replace('.png', '') + '.png')
finally:
phantom.service.process.send_signal(signal.SIGTERM)
phantom.quit()
if os.path.exists(exp):
os.remove(exp)
return
def render(plot, width=800, height=600, append=False):
import webbrowser
global _open, __templates__
if _open is None:
_open = os.path.join('/tmp/quorra-server.html')
tmpl = os.path.join(__templates__, 'render.html')
with open(tmpl, 'r') as fi, open(_open, 'w') as fo:
dat = fi.read()
dat = dat.replace('var plot = undefined;', 'var plot = {};'.format(str(plot)))
dat = dat.replace('width: 800px;', 'width: {}px;'.format(width))
dat = dat.replace('height: 500px;', 'height: {}px;'.format(height))
fo.write(dat)
webbrowser.open(_open)
return
| true | true |
f7fd0c6cb84faeeb14bf0efb52b65decb817042e | 3,495 | py | Python | piano_keys/generate_keys.py | rebpdx/blender-scripts | 7557fc6029da250b53d7e677169b83b80773e62d | [
"MIT"
] | 1 | 2021-12-16T20:26:57.000Z | 2021-12-16T20:26:57.000Z | piano_keys/generate_keys.py | rebpdx/blender-scripts | 7557fc6029da250b53d7e677169b83b80773e62d | [
"MIT"
] | null | null | null | piano_keys/generate_keys.py | rebpdx/blender-scripts | 7557fc6029da250b53d7e677169b83b80773e62d | [
"MIT"
] | null | null | null | """
This script generates an object of basic keys for testing with so that
a full piano does not need to be rendered when testing actuating the keyboard
"""
__author__ = "Robert Brown"
__copyright__ = "Copyright 2018"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Rob Brown"
__email__ = "hi@robbrown.io"
__status__ = "Prototype"
# General notes:
# Piano Key numbering: https://en.wikipedia.org/wiki/Piano_key_frequencies
from math import floor
import bpy
PADDING = 0.05
NUMBER_OF_OCTIVES = 8
STARTING_KEY = 4
NUMBER_OF_WKEYS = NUMBER_OF_OCTIVES * 7
NUMBER_OF_BKEYS = NUMBER_OF_OCTIVES * 5
TOTAL_KEYS = NUMBER_OF_OCTIVES * 12
class GenerateKeys():
"""
Class for Generating White and Black Piano Keys as Rectangles
"""
@staticmethod
def white_key_number(range_number):
"""
Returns white key numbering from a value counting in base 10 to ensure
that a black key number is not returned.
"""
# http://mathforum.org/kb/message.jspa?messageID=7365626
return floor((12 * range_number + 5) / 7)
@staticmethod
def black_key_skip(range_number):
"""
Returns 0 if a black key exists and 1 if a skip should occur
"""
if (range_number % 5 == 0) or ((range_number + 2) % 5 == 0):
return 1
return 0
@staticmethod
def name_key_and_mesh(blender_object, key_number):
"""
Names the object and mesh based on the given object and key number so
that all keys follow the same format
"""
blender_object.name = 'Key ({0:0>2d})'.format(key_number)
blender_object.data.name = 'Mesh ({0:0>2d})'.format(key_number)
def run(self):
"""
Main function call to generate keys
"""
# Stretch existing cube to a rectangle
bpy.ops.transform.resize(value=(2, 0.5, 0.5))
bpy.ops.transform.translate(value=(0, -(NUMBER_OF_WKEYS)/2, 0.5))
# Generate a list of white key numbers
wkey_list = list(map(lambda x: self.white_key_number(x) + STARTING_KEY,
range(0, NUMBER_OF_WKEYS)))
# Duplicate and name each white key
for key_num in wkey_list:
current = bpy.context.object
self.name_key_and_mesh(current, key_num)
bpy.ops.object.duplicate_move(TRANSFORM_OT_translate=({"value": (0, 1 + PADDING, 0)}))
# Manipulate the last extra key into a black key
bpy.ops.transform.resize(value=(0.6, 0.75, 1))
bpy.ops.transform.translate(value=(-0.8, -(1.5 + PADDING), 0.5))
# Generate list of black key numbers
all_keys_list = range(STARTING_KEY, TOTAL_KEYS + STARTING_KEY)
bkey_list = list(set(all_keys_list) - set(wkey_list))
bkey_list.sort(reverse=True)
skip_list = list(map(self.black_key_skip,
range(1, len(bkey_list) + 1)))
# Duplicate and name each black key
for bkey_num, skip in zip(bkey_list[:-1], skip_list[:-1]):
# Name the key based on it's note number
current = bpy.context.object
self.name_key_and_mesh(current, bkey_num)
bpy.ops.object.duplicate_move(TRANSFORM_OT_translate=(
{"value": (0, -((1 + PADDING) * (1 + skip)), 0)}))
# Name the last key
current = bpy.context.object
self.name_key_and_mesh(current, bkey_list[-1])
if __name__ == '__main__':
GenerateKeys().run()
| 32.663551 | 98 | 0.630901 |
__author__ = "Robert Brown"
__copyright__ = "Copyright 2018"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Rob Brown"
__email__ = "hi@robbrown.io"
__status__ = "Prototype"
from math import floor
import bpy
PADDING = 0.05
NUMBER_OF_OCTIVES = 8
STARTING_KEY = 4
NUMBER_OF_WKEYS = NUMBER_OF_OCTIVES * 7
NUMBER_OF_BKEYS = NUMBER_OF_OCTIVES * 5
TOTAL_KEYS = NUMBER_OF_OCTIVES * 12
class GenerateKeys():
@staticmethod
def white_key_number(range_number):
return floor((12 * range_number + 5) / 7)
@staticmethod
def black_key_skip(range_number):
if (range_number % 5 == 0) or ((range_number + 2) % 5 == 0):
return 1
return 0
@staticmethod
def name_key_and_mesh(blender_object, key_number):
blender_object.name = 'Key ({0:0>2d})'.format(key_number)
blender_object.data.name = 'Mesh ({0:0>2d})'.format(key_number)
def run(self):
bpy.ops.transform.resize(value=(2, 0.5, 0.5))
bpy.ops.transform.translate(value=(0, -(NUMBER_OF_WKEYS)/2, 0.5))
wkey_list = list(map(lambda x: self.white_key_number(x) + STARTING_KEY,
range(0, NUMBER_OF_WKEYS)))
for key_num in wkey_list:
current = bpy.context.object
self.name_key_and_mesh(current, key_num)
bpy.ops.object.duplicate_move(TRANSFORM_OT_translate=({"value": (0, 1 + PADDING, 0)}))
bpy.ops.transform.resize(value=(0.6, 0.75, 1))
bpy.ops.transform.translate(value=(-0.8, -(1.5 + PADDING), 0.5))
all_keys_list = range(STARTING_KEY, TOTAL_KEYS + STARTING_KEY)
bkey_list = list(set(all_keys_list) - set(wkey_list))
bkey_list.sort(reverse=True)
skip_list = list(map(self.black_key_skip,
range(1, len(bkey_list) + 1)))
for bkey_num, skip in zip(bkey_list[:-1], skip_list[:-1]):
current = bpy.context.object
self.name_key_and_mesh(current, bkey_num)
bpy.ops.object.duplicate_move(TRANSFORM_OT_translate=(
{"value": (0, -((1 + PADDING) * (1 + skip)), 0)}))
# Name the last key
current = bpy.context.object
self.name_key_and_mesh(current, bkey_list[-1])
if __name__ == '__main__':
GenerateKeys().run()
| true | true |
f7fd0c731492d0af481cee2954e3b25724e77f07 | 7,114 | py | Python | docs/report/cloudmesh-openapi/tests/generator-natural-lang/test_generator_natural_language.py | rickotten/cybertraining-dsc.github.io | c8ea59be4f09fd543040ba0908af118df5820a70 | [
"Apache-2.0"
] | 7 | 2020-02-29T14:53:19.000Z | 2021-01-17T17:08:44.000Z | docs/report/cloudmesh-openapi/tests/generator-natural-lang/test_generator_natural_language.py | rickotten/cybertraining-dsc.github.io | c8ea59be4f09fd543040ba0908af118df5820a70 | [
"Apache-2.0"
] | 27 | 2020-02-29T13:38:11.000Z | 2020-09-02T20:24:59.000Z | docs/report/cloudmesh-openapi/tests/generator-natural-lang/test_generator_natural_language.py | rickotten/cybertraining-dsc.github.io | c8ea59be4f09fd543040ba0908af118df5820a70 | [
"Apache-2.0"
] | 6 | 2020-03-02T17:09:14.000Z | 2020-10-30T22:48:01.000Z | ###############################################################
# pytest -v --capture=no ./tests/generator-natural-lang/test_generator_natural_language.py
# pytest -v ./tests/generator-natural-lang/test_generator_natural_language.py
# pytest -v --capture=no ./tests/generator-natural-lang/test_generator_natural_language.py::TestGenerator::<METHODNAME>
###############################################################
import time, os, pytest, requests
from cloudmesh.common.Shell import Shell
from cloudmesh.common.debug import VERBOSE
from cloudmesh.common.util import HEADING
from cloudmesh.common.variables import Variables
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.compute.vm.Provider import Provider
from cloudmesh.configuration.Config import Config
from cloudmesh.management.configuration.name import Name
import sys
sys.path.append("./tests/lib")
from generator_test import GeneratorBaseTest, ServerBaseTest
sys.path.append("./tests/lib")
filename="./tests/generator-natural-lang/natural-lang-analysis.py"
all_functions=True
import_class=False
#Replace test_dir with location of your cloudmesh install
test_dir = "/Users/andrewgoldfarb/e516-spring/cm/cloudmesh-openapi/tests/generator-natural-lang/"
func_filename = "natural-lang-analysis.py"
yaml_filename = "natural-lang-analysis.yaml"
sample_text_file = "bladerunner-neg.txt"
example_text_directory="/Users/andrewgoldfarb/.cloudmesh/text-cache/"
ssh_keys = "/Users/andrewgoldfarb/Desktop/project-keys.txt"
func_path = test_dir + func_filename
yaml_path = test_dir + yaml_filename
service = 'openapi'
user = Config()["cloudmesh.profile.user"]
variables = Variables()
VERBOSE(variables.dict())
pub_key = variables['pub_key']
priv_key = variables['priv_key']
google_sa = variables['google_sa_path']
cloud = variables.parameter('cloud')
vm_info = {}
print(f"Test run for {cloud}")
if cloud is None:
raise ValueError("cloud is not not set")
name_generator = Name()
name_generator.set(f"test-{user}-vm-" + "{counter}")
name = str(name_generator)
provider = Provider(name=cloud)
username = "andrewgoldfarb"
startup_script = "/Users/andrewgoldfarb/e516-spring/cm/get/ubuntu19.10/index.html"
vm_location_script = "text-analysis-startup-script.sh"
Benchmark.debug()
@pytest.mark.incremental
class TestGenerator():
@pytest.fixture(scope="module")
def generatorBaseTestFixture(self):
gen = GeneratorBaseTest(filename, all_functions, import_class)
return gen
@pytest.fixture(scope="module")
def serverBaseTestFixture(self):
server = ServerBaseTest()
return server
def test_copy_file(self, generatorBaseTestFixture):
generatorBaseTestFixture.copy_py_file()
def test_generate(self, generatorBaseTestFixture):
"""
function to validate paths information
"""
generatorBaseTestFixture.generate()
def test_read_spec(self, generatorBaseTestFixture):
generatorBaseTestFixture.read_spec()
# def test_validate_function(self, generatorBaseTestFixture):
# generatorBaseTestFixture.validate_function()
def test_start_service(self, serverBaseTestFixture):
serverBaseTestFixture.start_service()
def test_run_analyze_google(self):
HEADING()
Benchmark.Start()
res_code = ""
while res_code != "200":
response_google = requests.get(
f"http://localhost:8080/cloudmesh/natural-lang-analysis/analyze?filename={sample_text_file}&cloud=google")
res_code = str(response_google.status_code)
Benchmark.Stop()
assert res_code == "200"
def test_run_analyze_azure(self):
HEADING()
Benchmark.Start()
res_code = ""
while res_code != "200":
response_azure = requests.get(
f"http://localhost:8080/cloudmesh/natural-lang-analysis/analyze?filename={sample_text_file}&cloud=azure")
res_code = str(response_azure.status_code)
Benchmark.Stop()
assert res_code == "200"
# def test_run_translate_google(self):
# HEADING()
# Benchmark.Start()
# res_code = ""
# text = "Testing"
# lang = "it"
#
# while res_code != "200":
# response_google = requests.get(
# f"http://127.0.0.1:8080/cloudmesh/natural-lang-analysis/translate_text?cloud=google&text={text}&lang={lang}")
# print(response_google)
# res_code = str(response_google.status_code)
#
# Benchmark.Stop()
#
# assert res_code == "200"
#
# def test_run_translate_azure(self):
# HEADING()
# Benchmark.Start()
# res_code = ""
# text = "I am testing for cloudmesh on Azure"
# lang = "it"
#
# while res_code != "200":
# response_azure = requests.get(
# f"http://127.0.0.1:8080/cloudmesh/natural-lang-analysis/translate_text?cloud=azure&text={text}&lang={lang}")
# print(response_azure)
# res_code = str(response_azure.status_code)
#
# Benchmark.Stop()
#
# assert res_code == "200"
def test_stop_server(self, serverBaseTestFixture):
serverBaseTestFixture.stop_server()
def test_delete_build_file(self, generatorBaseTestFixture):
generatorBaseTestFixture.delete_file()
def test_benchmark(self):
Benchmark.print(sysinfo=True, csv=True, tag=service)
#
@pytest.mark.incremental
class TestVM:
def test_provider_vm_create(self):
HEADING()
os.system(f"cms vm list --cloud={cloud}")
# replace with provider.list
name_generator.incr()
Benchmark.Start()
data = provider.create(key=pub_key)
# provider.wait()
Benchmark.Stop()
# print(data)
VERBOSE(data)
name = str(Name())
status = provider.status(name=name)[0]
print(f'status: {str(status)}')
if cloud == 'oracle':
assert status["cm.status"] in ['STARTING', 'RUNNING', 'STOPPING',
'STOPPED']
else:
assert status["cm.status"] in ['ACTIVE', 'RUNNING', 'BOOTING',
'TERMINATED', 'STOPPED']
external_IP = data[0]['ip_public']
vm_instance = data[0]['name']
#
#
command = f'scp -r -i {priv_key} {example_text_directory} {username}@{external_IP}:./.cloudmesh/'
upload_service_account = f'scp -i {priv_key} {google_sa} {username}@{external_IP}:.'
google_sa_vm = "./cloudmesh-final-project-53d3e59c4d15.json"
add_ssh_keys = f'gcloud compute project-info add-metadata --metadata-from-file ssh-keys={ssh_keys}'
register_google =f"cms register update --kind=google --service=compute --filename={google_sa_vm}"
Shell.run(command)
Shell.run(upload_service_account)
Shell.run(add_ssh_keys)
command_2 = f'ssh -i {priv_key} {username}@{external_IP} | {register_google}'
Shell.run(command_2)
| 32.336364 | 127 | 0.6535 |
def test_provider_vm_create(self):
HEADING()
os.system(f"cms vm list --cloud={cloud}")
name_generator.incr()
Benchmark.Start()
data = provider.create(key=pub_key)
Benchmark.Stop()
VERBOSE(data)
name = str(Name())
status = provider.status(name=name)[0]
print(f'status: {str(status)}')
if cloud == 'oracle':
assert status["cm.status"] in ['STARTING', 'RUNNING', 'STOPPING',
'STOPPED']
else:
assert status["cm.status"] in ['ACTIVE', 'RUNNING', 'BOOTING',
'TERMINATED', 'STOPPED']
external_IP = data[0]['ip_public']
vm_instance = data[0]['name']
command = f'scp -r -i {priv_key} {example_text_directory} {username}@{external_IP}:./.cloudmesh/'
upload_service_account = f'scp -i {priv_key} {google_sa} {username}@{external_IP}:.'
google_sa_vm = "./cloudmesh-final-project-53d3e59c4d15.json"
add_ssh_keys = f'gcloud compute project-info add-metadata --metadata-from-file ssh-keys={ssh_keys}'
register_google =f"cms register update --kind=google --service=compute --filename={google_sa_vm}"
Shell.run(command)
Shell.run(upload_service_account)
Shell.run(add_ssh_keys)
command_2 = f'ssh -i {priv_key} {username}@{external_IP} | {register_google}'
Shell.run(command_2)
| true | true |
f7fd0d5c8afcedae5b6b321b8947b40cc1efca58 | 5,155 | py | Python | analysis/src/python/evaluation/inspectors/diffs_between_df.py | eartser/hyperstyle-analyze | 58e2d361662e73e1e047919f57ab840055783b7a | [
"Apache-2.0"
] | 1 | 2022-03-15T09:46:06.000Z | 2022-03-15T09:46:06.000Z | analysis/src/python/evaluation/inspectors/diffs_between_df.py | eartser/hyperstyle-analyze | 58e2d361662e73e1e047919f57ab840055783b7a | [
"Apache-2.0"
] | 1 | 2022-02-14T13:53:38.000Z | 2022-02-14T13:53:38.000Z | analysis/src/python/evaluation/inspectors/diffs_between_df.py | eartser/hyperstyle-analyze | 58e2d361662e73e1e047919f57ab840055783b7a | [
"Apache-2.0"
] | 3 | 2022-02-13T16:49:53.000Z | 2022-02-17T13:53:07.000Z | import argparse
from pathlib import Path
import pandas as pd
from hyperstyle.src.python.review.quality.model import QualityType
from analysis.src.python.evaluation.common.pandas_util import (
get_inconsistent_positions, get_issues_by_row, get_solutions_df, get_solutions_df_by_file_path,
)
from analysis.src.python.evaluation.common.args_util import EvaluationRunToolArgument
from analysis.src.python.evaluation.common.csv_util import ColumnName
from analysis.src.python.evaluation.common.file_util import AnalysisExtension, get_parent_folder, \
get_restricted_extension, \
serialize_data_and_write_to_file
def configure_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument(f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.long_name}_old',
type=lambda value: Path(value).absolute(),
help=f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.description}'
f'\nAll code fragments from this file must be graded '
f'(file contains grade and traceback (optional) columns)')
parser.add_argument(f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.long_name}_new',
type=lambda value: Path(value).absolute(),
help=f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.description}'
f'\nAll code fragments from this file must be graded '
f'(file contains grade and traceback (optional) columns)')
# Find difference between two dataframes. Return dict:
# {
# grade: [list_of_fragment_ids],
# decreased_grade: [list_of_fragment_ids],
# user: count_unique_users,
# traceback: {
# fragment_id: [list of issues]
# },
# penalty: {
# fragment_id: [list of issues]
# },
# }
# The key <grade> contains only fragments that increase quality in new df
# The key <decreased_grade> contains only fragments that decrease quality in new df
# The key <user> count number of unique users in the new dataset
# The key <traceback> contains list of new issues for each fragment
# The key <penalty> contains list of issues with not zero influence_on_penalty coefficient
def find_diffs(old_df: pd.DataFrame, new_df: pd.DataFrame) -> dict:
if ColumnName.HISTORY.value in new_df.columns:
del new_df[ColumnName.HISTORY.value]
new_df = new_df.reindex(columns=old_df.columns)
inconsistent_positions = get_inconsistent_positions(old_df, new_df)
diffs = {
ColumnName.GRADE.value: [],
ColumnName.DECREASED_GRADE.value: [],
ColumnName.TRACEBACK.value: {},
ColumnName.PENALTY.value: {},
}
if ColumnName.USER.value in new_df.columns:
diffs[ColumnName.USER.value] = len(new_df[ColumnName.USER.value].unique())
else:
diffs[ColumnName.USER.value] = 0
# Keep only diffs in the TRACEBACK column
for row, _ in filter(lambda t: t[1] == ColumnName.TRACEBACK.value, inconsistent_positions.index):
old_value = old_df.iloc[row][ColumnName.GRADE.value]
new_value = new_df.iloc[row][ColumnName.GRADE.value]
old_quality = QualityType(old_value).to_number()
new_quality = QualityType(new_value).to_number()
fragment_id = old_df.iloc[row][ColumnName.ID.value]
if new_quality > old_quality:
# It is an unexpected keys, we should check the algorithm
diffs[ColumnName.GRADE.value].append(fragment_id)
else:
if new_quality < old_quality:
diffs[ColumnName.DECREASED_GRADE.value].append(fragment_id)
old_issues = get_issues_by_row(old_df, row)
new_issues = get_issues_by_row(new_df, row)
# Find difference between issues
if len(old_issues) > len(new_issues):
raise ValueError(f'New dataframe contains less issues than old for fragment {id}')
difference = set(set(new_issues) - set(old_issues))
if len(difference) > 0:
diffs[ColumnName.TRACEBACK.value][fragment_id] = difference
# Find issues with influence_in_penalty > 0
penalty = set(filter(lambda i: i.influence_on_penalty > 0, new_issues))
if len(penalty) > 0:
diffs[ColumnName.PENALTY.value][fragment_id] = penalty
return diffs
def main() -> None:
parser = argparse.ArgumentParser()
configure_arguments(parser)
args = parser.parse_args()
old_solutions_file_path = args.solutions_file_path_old
output_ext = get_restricted_extension(old_solutions_file_path, [AnalysisExtension.XLSX, AnalysisExtension.CSV])
old_solutions_df = get_solutions_df(output_ext, old_solutions_file_path)
new_solutions_file_path = args.solutions_file_path_new
new_solutions_df = get_solutions_df_by_file_path(new_solutions_file_path)
diffs = find_diffs(old_solutions_df, new_solutions_df)
output_path = get_parent_folder(Path(old_solutions_file_path)) / f'diffs{AnalysisExtension.PICKLE.value}'
serialize_data_and_write_to_file(output_path, diffs)
if __name__ == '__main__':
main()
| 46.863636 | 115 | 0.704559 | import argparse
from pathlib import Path
import pandas as pd
from hyperstyle.src.python.review.quality.model import QualityType
from analysis.src.python.evaluation.common.pandas_util import (
get_inconsistent_positions, get_issues_by_row, get_solutions_df, get_solutions_df_by_file_path,
)
from analysis.src.python.evaluation.common.args_util import EvaluationRunToolArgument
from analysis.src.python.evaluation.common.csv_util import ColumnName
from analysis.src.python.evaluation.common.file_util import AnalysisExtension, get_parent_folder, \
get_restricted_extension, \
serialize_data_and_write_to_file
def configure_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument(f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.long_name}_old',
type=lambda value: Path(value).absolute(),
help=f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.description}'
f'\nAll code fragments from this file must be graded '
f'(file contains grade and traceback (optional) columns)')
parser.add_argument(f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.long_name}_new',
type=lambda value: Path(value).absolute(),
help=f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.description}'
f'\nAll code fragments from this file must be graded '
f'(file contains grade and traceback (optional) columns)')
def find_diffs(old_df: pd.DataFrame, new_df: pd.DataFrame) -> dict:
if ColumnName.HISTORY.value in new_df.columns:
del new_df[ColumnName.HISTORY.value]
new_df = new_df.reindex(columns=old_df.columns)
inconsistent_positions = get_inconsistent_positions(old_df, new_df)
diffs = {
ColumnName.GRADE.value: [],
ColumnName.DECREASED_GRADE.value: [],
ColumnName.TRACEBACK.value: {},
ColumnName.PENALTY.value: {},
}
if ColumnName.USER.value in new_df.columns:
diffs[ColumnName.USER.value] = len(new_df[ColumnName.USER.value].unique())
else:
diffs[ColumnName.USER.value] = 0
for row, _ in filter(lambda t: t[1] == ColumnName.TRACEBACK.value, inconsistent_positions.index):
old_value = old_df.iloc[row][ColumnName.GRADE.value]
new_value = new_df.iloc[row][ColumnName.GRADE.value]
old_quality = QualityType(old_value).to_number()
new_quality = QualityType(new_value).to_number()
fragment_id = old_df.iloc[row][ColumnName.ID.value]
if new_quality > old_quality:
diffs[ColumnName.GRADE.value].append(fragment_id)
else:
if new_quality < old_quality:
diffs[ColumnName.DECREASED_GRADE.value].append(fragment_id)
old_issues = get_issues_by_row(old_df, row)
new_issues = get_issues_by_row(new_df, row)
if len(old_issues) > len(new_issues):
raise ValueError(f'New dataframe contains less issues than old for fragment {id}')
difference = set(set(new_issues) - set(old_issues))
if len(difference) > 0:
diffs[ColumnName.TRACEBACK.value][fragment_id] = difference
penalty = set(filter(lambda i: i.influence_on_penalty > 0, new_issues))
if len(penalty) > 0:
diffs[ColumnName.PENALTY.value][fragment_id] = penalty
return diffs
def main() -> None:
parser = argparse.ArgumentParser()
configure_arguments(parser)
args = parser.parse_args()
old_solutions_file_path = args.solutions_file_path_old
output_ext = get_restricted_extension(old_solutions_file_path, [AnalysisExtension.XLSX, AnalysisExtension.CSV])
old_solutions_df = get_solutions_df(output_ext, old_solutions_file_path)
new_solutions_file_path = args.solutions_file_path_new
new_solutions_df = get_solutions_df_by_file_path(new_solutions_file_path)
diffs = find_diffs(old_solutions_df, new_solutions_df)
output_path = get_parent_folder(Path(old_solutions_file_path)) / f'diffs{AnalysisExtension.PICKLE.value}'
serialize_data_and_write_to_file(output_path, diffs)
if __name__ == '__main__':
main()
| true | true |
f7fd0e264e2bcd977dbd22a91f28692ff3e6303c | 90 | py | Python | test/__init__.py | sjbrownrigg/mediafile | b851e032869462ecb79e83bffa6a71350229c9a1 | [
"MIT"
] | 79 | 2016-06-15T09:18:38.000Z | 2022-03-19T16:30:05.000Z | test/__init__.py | sjbrownrigg/mediafile | b851e032869462ecb79e83bffa6a71350229c9a1 | [
"MIT"
] | 56 | 2016-07-03T02:41:55.000Z | 2022-03-30T11:44:08.000Z | test/__init__.py | sjbrownrigg/mediafile | b851e032869462ecb79e83bffa6a71350229c9a1 | [
"MIT"
] | 27 | 2016-07-03T02:29:51.000Z | 2022-01-16T08:42:27.000Z | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
| 22.5 | 64 | 0.744444 |
from __future__ import division, absolute_import, print_function
| true | true |
f7fd0ef23d781430f2fcb7377e4252960dfdb48b | 6,287 | py | Python | sdk/python/pulumi_azure_nextgen/compute/v20190701/gallery.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/compute/v20190701/gallery.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/compute/v20190701/gallery.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['Gallery']
class Gallery(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
gallery_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Specifies information about the Shared Image Gallery that you want to create or update.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of this Shared Image Gallery resource. This property is updatable.
:param pulumi.Input[str] gallery_name: The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods allowed in the middle. The maximum length is 80 characters.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['gallery_name'] = gallery_name
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['identifier'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/latest:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/v20200930:Gallery")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Gallery, __self__).__init__(
'azure-nextgen:compute/v20190701:Gallery',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Gallery':
"""
Get an existing Gallery resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Gallery(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of this Shared Image Gallery resource. This property is updatable.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def identifier(self) -> pulumi.Output[Optional['outputs.GalleryIdentifierResponse']]:
"""
Describes the gallery unique name.
"""
return pulumi.get(self, "identifier")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 40.824675 | 418 | 0.642914 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['Gallery']
class Gallery(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
gallery_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['gallery_name'] = gallery_name
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['identifier'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/latest:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:Gallery"), pulumi.Alias(type_="azure-nextgen:compute/v20200930:Gallery")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Gallery, __self__).__init__(
'azure-nextgen:compute/v20190701:Gallery',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Gallery':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Gallery(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def identifier(self) -> pulumi.Output[Optional['outputs.GalleryIdentifierResponse']]:
return pulumi.get(self, "identifier")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7fd0fb94825ccf6cf45cea8de36bcaf49154ed7 | 1,944 | py | Python | words_stat.py | grinya007/streaming_rnn | 71ad0c1640c5e482da1b74e28625025c6c7b3d5a | [
"MIT"
] | null | null | null | words_stat.py | grinya007/streaming_rnn | 71ad0c1640c5e482da1b74e28625025c6c7b3d5a | [
"MIT"
] | null | null | null | words_stat.py | grinya007/streaming_rnn | 71ad0c1640c5e482da1b74e28625025c6c7b3d5a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
from data import read_texts_csv, strip_words
from plot import save_plot
from static_vocabulary import StaticVocabulary
from dynamic_vocabulary_2q import DynamicVocabulary2Q
VOCABSIZE = 110000
FILLVOCAB = 10000000
RUNTESTON = 100000000
PLOTEVERY = 2000000
def plot_unknown_words_ratio(input_csv):
static = StaticVocabulary(VOCABSIZE)
dynamic = DynamicVocabulary2Q(VOCABSIZE)
# feeding vocabularies
i = 0
words = []
text_gen = read_texts_csv(input_csv, 'content')
for text in text_gen:
if i >= FILLVOCAB:
break
for word in strip_words(text):
words.append(word)
dynamic.word2idx(word)
i += 1
static.fill(words)
# 2016-02-19
# test
i = 0
count = 0
static_unknowns = 0
dynamic_unknowns = 0
stat = {
'StaticVocabulary': [],
'DynamicVocabulary2Q': []
}
for text in text_gen:
if i >= RUNTESTON:
break
for word in strip_words(text):
# NOTE index 0 means that a word is unknown
static_unknowns += 1 if static.word2idx(word) == 0 else 0
dynamic_unknowns += 1 if dynamic.word2idx(word) == 0 else 0
if count == PLOTEVERY:
stat['StaticVocabulary'].append(100*static_unknowns/count)
stat['DynamicVocabulary2Q'].append(100*dynamic_unknowns/count)
static_unknowns = 0
dynamic_unknowns = 0
count = 0
count += 1
i += 1
save_plot('Unknown word ratio', 'word count', '%', stat, PLOTEVERY, 'uwr.png')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_csv',
type=str,
help="Path to the input csv file",
)
args = parser.parse_args()
plot_unknown_words_ratio(args.input_csv)
if __name__ == '__main__':
main()
| 24.607595 | 82 | 0.610082 |
import argparse
from data import read_texts_csv, strip_words
from plot import save_plot
from static_vocabulary import StaticVocabulary
from dynamic_vocabulary_2q import DynamicVocabulary2Q
VOCABSIZE = 110000
FILLVOCAB = 10000000
RUNTESTON = 100000000
PLOTEVERY = 2000000
def plot_unknown_words_ratio(input_csv):
static = StaticVocabulary(VOCABSIZE)
dynamic = DynamicVocabulary2Q(VOCABSIZE)
i = 0
words = []
text_gen = read_texts_csv(input_csv, 'content')
for text in text_gen:
if i >= FILLVOCAB:
break
for word in strip_words(text):
words.append(word)
dynamic.word2idx(word)
i += 1
static.fill(words)
i = 0
count = 0
static_unknowns = 0
dynamic_unknowns = 0
stat = {
'StaticVocabulary': [],
'DynamicVocabulary2Q': []
}
for text in text_gen:
if i >= RUNTESTON:
break
for word in strip_words(text):
static_unknowns += 1 if static.word2idx(word) == 0 else 0
dynamic_unknowns += 1 if dynamic.word2idx(word) == 0 else 0
if count == PLOTEVERY:
stat['StaticVocabulary'].append(100*static_unknowns/count)
stat['DynamicVocabulary2Q'].append(100*dynamic_unknowns/count)
static_unknowns = 0
dynamic_unknowns = 0
count = 0
count += 1
i += 1
save_plot('Unknown word ratio', 'word count', '%', stat, PLOTEVERY, 'uwr.png')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_csv',
type=str,
help="Path to the input csv file",
)
args = parser.parse_args()
plot_unknown_words_ratio(args.input_csv)
if __name__ == '__main__':
main()
| true | true |
f7fd0ff6e498597ff0bf1f48967a9c926ad4a86e | 216 | py | Python | t1.py | paulc/streamlit-hello | f41623efe049db982ac63af725ee2316477360e3 | [
"MIT"
] | null | null | null | t1.py | paulc/streamlit-hello | f41623efe049db982ac63af725ee2316477360e3 | [
"MIT"
] | null | null | null | t1.py | paulc/streamlit-hello | f41623efe049db982ac63af725ee2316477360e3 | [
"MIT"
] | null | null | null |
import streamlit as st
import pandas as pd
st.write("Here's our first attempt at using data to create a table:")
st.write(pd.DataFrame({
'first column': [1, 2, 3, 4],
'second column': [10, 20, 30, 40]
}))
| 19.636364 | 69 | 0.648148 |
import streamlit as st
import pandas as pd
st.write("Here's our first attempt at using data to create a table:")
st.write(pd.DataFrame({
'first column': [1, 2, 3, 4],
'second column': [10, 20, 30, 40]
}))
| true | true |
f7fd10e30d6ec15ffd4a350ad33fe5232cf11eda | 3,174 | py | Python | src/reading_stimuli.py | aslansd/DNNforVPL | 7cda3eb327050f98b0867a4eca4cadb813d2c466 | [
"MIT"
] | null | null | null | src/reading_stimuli.py | aslansd/DNNforVPL | 7cda3eb327050f98b0867a4eca4cadb813d2c466 | [
"MIT"
] | null | null | null | src/reading_stimuli.py | aslansd/DNNforVPL | 7cda3eb327050f98b0867a4eca4cadb813d2c466 | [
"MIT"
] | null | null | null | """
Created by Aslan Satary Dizaji (a.satarydizaji@eni-g.de)
"""
import gc
import glob
import numpy as np
import os
import random
import torch
import torchvision.transforms as transforms
from PIL import Image
def reading_stimuli(file_names, file_name_paths, orientation, spatial_frequency)
# Define the main variables
x_val = np.zeros((len(spatial_frequency) * len(orientation) * 180, 224, 224, 3), dtype = np.float32)
y_val = np.zeros((len(spatial_frequency) * len(orientation) * 180, 1), dtype = np.int64)
z_val = np.zeros((len(spatial_frequency), len(orientation), 180), dtype = np.int64)
x_tensor = []
y_tensor = []
counter = -1
for i in range(len(file_names)):
# Construct the main descriptive variables
name_digits = file_names[i].split('_')
flag_image_name = False
for j in range(len(spatial_frequency)):
for k in range(len(orientation)):
SFplusOri = str(spatial_frequency[j]) + str(orientation[k])
if (SFplusOri) in name_digits[0]:
Phase = int(name_digits[0].replace(SFplusOri,''))
if Phase % 2 == 1:
counter = counter + 1
flag_image_name = True
if k <= int(len(orientation) / 2 - 1):
y_val[counter] = 0
else:
y_val[counter] = 1
z_val[j][k][((Phase + 1) // 2) - 1] = counter
if flag_image_name:
# Load image
img = Image.open(file_name_paths[i]).convert('RGB')
# Resize image
width, height = img.size
new_width = width * 256 // min(img.size)
new_height = height * 256 // min(img.size)
img = img.resize((new_width, new_height), Image.BILINEAR)
# Center crop image
width, height = img.size
startx = width // 2 - (224 // 2)
starty = height // 2 - (224 // 2)
img = np.asarray(img).reshape(height, width, 3)
img = img[starty:starty + 224, startx:startx + 224]
assert img.shape[0] == 224 and img.shape[1] == 224, (img.shape, height, width)
# Save image
x_val[counter, :, :, :] = img[:, :, :]
# Convert image to tensor and normalize
x_temp = torch.from_numpy(np.transpose(x_val[counter, :, :, :], (2, 0, 1)))
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
x_tensor.append(normalize(x_temp))
# Convert target to tensor
y_tensor.append(torch.from_numpy(y_val[counter]))
x_tensor = torch.stack(x_tensor)
y_tensor = torch.stack(y_tensor)
return x_val, y_val, z_val, x_tensor, y_tensor | 37.785714 | 105 | 0.499685 | """
Created by Aslan Satary Dizaji (a.satarydizaji@eni-g.de)
"""
import gc
import glob
import numpy as np
import os
import random
import torch
import torchvision.transforms as transforms
from PIL import Image
def reading_stimuli(file_names, file_name_paths, orientation, spatial_frequency)
x_val = np.zeros((len(spatial_frequency) * len(orientation) * 180, 224, 224, 3), dtype = np.float32)
y_val = np.zeros((len(spatial_frequency) * len(orientation) * 180, 1), dtype = np.int64)
z_val = np.zeros((len(spatial_frequency), len(orientation), 180), dtype = np.int64)
x_tensor = []
y_tensor = []
counter = -1
for i in range(len(file_names)):
name_digits = file_names[i].split('_')
flag_image_name = False
for j in range(len(spatial_frequency)):
for k in range(len(orientation)):
SFplusOri = str(spatial_frequency[j]) + str(orientation[k])
if (SFplusOri) in name_digits[0]:
Phase = int(name_digits[0].replace(SFplusOri,''))
if Phase % 2 == 1:
counter = counter + 1
flag_image_name = True
if k <= int(len(orientation) / 2 - 1):
y_val[counter] = 0
else:
y_val[counter] = 1
z_val[j][k][((Phase + 1) // 2) - 1] = counter
if flag_image_name:
img = Image.open(file_name_paths[i]).convert('RGB')
width, height = img.size
new_width = width * 256 // min(img.size)
new_height = height * 256 // min(img.size)
img = img.resize((new_width, new_height), Image.BILINEAR)
width, height = img.size
startx = width // 2 - (224 // 2)
starty = height // 2 - (224 // 2)
img = np.asarray(img).reshape(height, width, 3)
img = img[starty:starty + 224, startx:startx + 224]
assert img.shape[0] == 224 and img.shape[1] == 224, (img.shape, height, width)
x_val[counter, :, :, :] = img[:, :, :]
x_temp = torch.from_numpy(np.transpose(x_val[counter, :, :, :], (2, 0, 1)))
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
x_tensor.append(normalize(x_temp))
y_tensor.append(torch.from_numpy(y_val[counter]))
x_tensor = torch.stack(x_tensor)
y_tensor = torch.stack(y_tensor)
return x_val, y_val, z_val, x_tensor, y_tensor | false | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.