content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2018.5
# Email : muyanru345@163.com
###################################################################
from dayu_widgets.item_model import MSortFilterModel, MTableModel
from dayu_widgets.item_view import MTableView, MTreeView, MBigView, MListView
from dayu_widgets.line_edit import MLineEdit
from dayu_widgets.tool_button import MToolButton
from dayu_widgets.qt import QWidget, QModelIndex, Signal, QVBoxLayout, QApplication, Qt, Slot, QHBoxLayout
class MItemViewSet(QWidget):
sig_double_clicked = Signal(QModelIndex)
sig_left_clicked = Signal(QModelIndex)
TableViewType = MTableView
BigViewType = MBigView
TreeViewType = MTreeView
ListViewType = MListView
def __init__(self, view_type=None, parent=None):
super(MItemViewSet, self).__init__(parent)
self._main_lay = QVBoxLayout()
self._main_lay.setSpacing(5)
self._main_lay.setContentsMargins(0, 0, 0, 0)
self.sort_filter_model = MSortFilterModel()
self.source_model = MTableModel()
self.sort_filter_model.setSourceModel(self.source_model)
view_class = view_type or MItemViewSet.TableViewType
self.item_view = view_class()
self.item_view.doubleClicked.connect(self.sig_double_clicked)
self.item_view.pressed.connect(self.slot_left_clicked)
self.item_view.setModel(self.sort_filter_model)
self._search_line_edit = MLineEdit().search().small()
self._search_attr_button = MToolButton().icon_only().svg('down_fill.svg').small()
self._search_line_edit.set_prefix_widget(self._search_attr_button)
self._search_line_edit.textChanged.connect(self.sort_filter_model.set_search_pattern)
self._search_line_edit.setVisible(False)
_search_lay = QHBoxLayout()
_search_lay.setContentsMargins(0, 0, 0, 0)
_search_lay.addStretch()
_search_lay.addWidget(self._search_line_edit)
self._main_lay.addLayout(_search_lay)
self._main_lay.addWidget(self.item_view)
self.setLayout(self._main_lay)
@Slot(QModelIndex)
def slot_left_clicked(self, start_index):
button = QApplication.mouseButtons()
if button == Qt.LeftButton:
real_index = self.sort_filter_model.mapToSource(start_index)
self.sig_left_clicked.emit(real_index)
def set_header_list(self, header_list):
self.source_model.set_header_list(header_list)
self.sort_filter_model.set_header_list(header_list)
self.sort_filter_model.setSourceModel(self.source_model)
self.item_view.set_header_list(header_list)
@Slot()
def setup_data(self, data_list):
self.source_model.clear()
if data_list:
self.source_model.set_data_list(data_list)
def get_data(self):
return self.source_model.get_data_list()
def searchable(self):
"""Enable search line edit visible."""
self._search_line_edit.setVisible(True)
return self
| dayu_widgets/item_view_set.py | 3,120 | Enable search line edit visible.
!/usr/bin/env python -*- coding: utf-8 -*- Author: Mu yanru Date : 2018.5 Email : muyanru345@163.com | 135 | en | 0.263348 |
# Generated by Django 2.2 on 2019-05-08 20:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| app/core/migrations/0001_initial.py | 1,699 | Generated by Django 2.2 on 2019-05-08 20:45 | 43 | en | 0.559877 |
import time
import datetime
import json
import hashlib
from .env import Env
from .server import Server
from .hardware import Hardware
class Metric(object):
def __init__(self):
# format of report data
self._version = '0.1'
self._type = 'metric'
self.run_id = None
self.mode = None
self.server = Server()
self.hardware = Hardware()
self.env = Env()
self.status = "INIT"
self.err_message = ""
self.collection = {}
self.index = {}
self.search = {}
self.run_params = {}
self.metrics = {
"type": "",
"value": None,
}
self.datetime = str(datetime.datetime.now())
def set_run_id(self):
# Get current time as run id, which uniquely identifies this test
self.run_id = int(time.time())
def set_mode(self, mode):
# Set the deployment mode of milvus
self.mode = mode
# including: metric, suite_metric
def set_case_metric_type(self):
self._type = "case"
def json_md5(self):
json_str = json.dumps(vars(self), sort_keys=True)
return hashlib.md5(json_str.encode('utf-8')).hexdigest()
def update_status(self, status):
# Set the final result of the test run: RUN_SUCC or RUN_FAILED
self.status = status
def update_result(self, result):
self.metrics["value"].update(result)
def update_message(self, err_message):
self.err_message = err_message | tests/benchmark/milvus_benchmark/metrics/models/metric.py | 1,516 | format of report data Get current time as run id, which uniquely identifies this test Set the deployment mode of milvus including: metric, suite_metric Set the final result of the test run: RUN_SUCC or RUN_FAILED | 212 | en | 0.852689 |
from credentials import credentials
import unittest
import pyperclip
class TestUser(unittest.TestCase):
'''
Test that defines test cases for the User class
Args:
unitest.Testcase: Testcase that helps in creating test cases for class User.
'''
def setUp(self):
'''
Set up method to run before each test case
'''
self.new_user = credentials("Paul", "123")
def test__init__(self):
'''
test__init__ test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user.user_name, "Paul")
self.assertEqual(self.new_user.password, "123")
def test__save_user(self):
'''
test to see if the user is saved
'''
self.new_credentials.save_credentials()
self.assertEqual(len(credentials.user_list), 1)
if __name__ == "__main__":
unittest.main()
| credentials_test.py | 918 | Test that defines test cases for the User class
Args:
unitest.Testcase: Testcase that helps in creating test cases for class User.
Set up method to run before each test case
test__init__ test case to test if the object is initialized properly
test to see if the user is saved | 279 | en | 0.783039 |
import inspect
import sys
from enum import IntEnum
from pathlib import Path
from time import time
from logging import getLevelName
from typing import Tuple, Union, Any, List, Iterable, TextIO, Optional
from . import logging
from .logging import _set_log_level, _set_log_file, RootLogger
_VERBOSITY_TO_LOGLEVEL = {
'error': 'ERROR',
'warning': 'WARNING',
'info': 'INFO',
'hint': 'HINT',
'debug': 'DEBUG',
}
# Python 3.7 ensures iteration order
for v, level in enumerate(list(_VERBOSITY_TO_LOGLEVEL.values())):
_VERBOSITY_TO_LOGLEVEL[v] = level
class Verbosity(IntEnum):
error = 0
warn = 1
info = 2
hint = 3
debug = 4
@property
def level(self) -> int:
# getLevelName(str) returns the int level…
return getLevelName(_VERBOSITY_TO_LOGLEVEL[self])
def _type_check(var: Any, varname: str, types: Union[type, Tuple[type, ...]]):
if isinstance(var, types):
return
if isinstance(types, type):
possible_types_str = types.__name__
else:
type_names = [t.__name__ for t in types]
possible_types_str = "{} or {}".format(
", ".join(type_names[:-1]), type_names[-1]
)
raise TypeError(f"{varname} must be of type {possible_types_str}")
class ScanpyConfig:
"""Config manager for scanpy.
"""
def __init__(
self,
*,
verbosity: str = "warning",
plot_suffix: str = "",
file_format_data: str = "h5ad",
file_format_figs: str = "pdf",
autosave: bool = False,
autoshow: bool = True,
writedir: Union[str, Path] = "./write/",
cachedir: Union[str, Path] = "./cache/",
datasetdir: Union[str, Path] = "./data/",
figdir: Union[str, Path] = "./figures/",
max_memory=15,
n_jobs=1,
logfile: Union[str, Path, None] = None,
categories_to_ignore: Iterable[str] = ("N/A", "dontknow", "no_gate", "?"),
_frameon: bool = True,
_vector_friendly: bool = False,
_low_resolution_warning: bool = True,
):
# logging
self._root_logger = RootLogger(logging.INFO) # level will be replaced
self.logfile = logfile
self.verbosity = verbosity
# rest
self.plot_suffix = plot_suffix
self.file_format_data = file_format_data
self.file_format_figs = file_format_figs
self.autosave = autosave
self.autoshow = autoshow
self.writedir = writedir
self.cachedir = cachedir
self.datasetdir = datasetdir
self.figdir = figdir
self.max_memory = max_memory
self.n_jobs = n_jobs
self.categories_to_ignore = categories_to_ignore
self._frameon = _frameon
"""bool: See set_figure_params."""
self._vector_friendly = _vector_friendly
"""Set to true if you want to include pngs in svgs and pdfs."""
self._low_resolution_warning = _low_resolution_warning
"""Print warning when saving a figure with low resolution."""
self._start = time()
"""Time when the settings module is first imported."""
self._previous_time = self._start
"""Variable for timing program parts."""
self._previous_memory_usage = -1
"""Stores the previous memory usage."""
@property
def verbosity(self) -> Verbosity:
"""
Verbosity level (default `warning`)
Level 0: only show 'error' messages.
Level 1: also show 'warning' messages.
Level 2: also show 'info' messages.
Level 3: also show 'hint' messages.
Level 4: also show very detailed progress for 'debug'ging.
"""
return self._verbosity
@verbosity.setter
def verbosity(self, verbosity: Union[Verbosity, int, str]):
verbosity_str_options = [
v for v in _VERBOSITY_TO_LOGLEVEL
if isinstance(v, str)
]
if isinstance(verbosity, Verbosity):
self._verbosity = verbosity
elif isinstance(verbosity, int):
self._verbosity = Verbosity(verbosity)
elif isinstance(verbosity, str):
verbosity = verbosity.lower()
if verbosity not in verbosity_str_options:
raise ValueError(
f"Cannot set verbosity to {verbosity}. "
f"Accepted string values are: {verbosity_str_options}"
)
else:
self._verbosity = Verbosity(verbosity_str_options.index(verbosity))
else:
_type_check(verbosity, "verbosity", (str, int))
_set_log_level(self, _VERBOSITY_TO_LOGLEVEL[self._verbosity])
@property
def plot_suffix(self) -> str:
"""Global suffix that is appended to figure filenames.
"""
return self._plot_suffix
@plot_suffix.setter
def plot_suffix(self, plot_suffix: str):
_type_check(plot_suffix, "plot_suffix", str)
self._plot_suffix = plot_suffix
@property
def file_format_data(self) -> str:
"""File format for saving AnnData objects.
Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad'
(hdf5) for lossless saving.
"""
return self._file_format_data
@file_format_data.setter
def file_format_data(self, file_format: str):
_type_check(file_format, "file_format_data", str)
file_format_options = {"txt", "csv", "h5ad"}
if file_format not in file_format_options:
raise ValueError(
f"Cannot set file_format_data to {file_format}. "
f"Must be one of {file_format_options}"
)
self._file_format_data = file_format
@property
def file_format_figs(self) -> str:
"""File format for saving figures.
For example 'png', 'pdf' or 'svg'. Many other formats work as well (see
`matplotlib.pyplot.savefig`).
"""
return self._file_format_figs
@file_format_figs.setter
def file_format_figs(self, figure_format: str):
_type_check(figure_format, "figure_format_data", str)
self._file_format_figs = figure_format
@property
def autosave(self) -> bool:
"""\
Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`).
Do not show plots/figures interactively.
"""
return self._autosave
@autosave.setter
def autosave(self, autosave: bool):
_type_check(autosave, "autosave", bool)
self._autosave = autosave
@property
def autoshow(self) -> bool:
"""\
Automatically show figures if `autosave == False` (default `True`).
There is no need to call the matplotlib pl.show() in this case.
"""
return self._autoshow
@autoshow.setter
def autoshow(self, autoshow: bool):
_type_check(autoshow, "autoshow", bool)
self._autoshow = autoshow
@property
def writedir(self) -> Path:
"""\
Directory where the function scanpy.write writes to by default.
"""
return self._writedir
@writedir.setter
def writedir(self, writedir: Union[str, Path]):
_type_check(writedir, "writedir", (str, Path))
self._writedir = Path(writedir)
@property
def cachedir(self) -> Path:
"""\
Directory for cache files (default `'./cache/'`).
"""
return self._cachedir
@cachedir.setter
def cachedir(self, cachedir: Union[str, Path]):
_type_check(cachedir, "cachedir", (str, Path))
self._cachedir = Path(cachedir)
@property
def datasetdir(self) -> Path:
"""\
Directory for example :mod:`~scanpy.datasets` (default `'./data/'`).
"""
return self._datasetdir
@datasetdir.setter
def datasetdir(self, datasetdir: Union[str, Path]):
_type_check(datasetdir, "datasetdir", (str, Path))
self._datasetdir = Path(datasetdir).resolve()
@property
def figdir(self) -> Path:
"""\
Directory for saving figures (default `'./figures/'`).
"""
return self._figdir
@figdir.setter
def figdir(self, figdir: Union[str, Path]):
_type_check(figdir, "figdir", (str, Path))
self._figdir = Path(figdir)
@property
def max_memory(self) -> Union[int, float]:
"""\
Maximal memory usage in Gigabyte.
Is currently not well respected....
"""
return self._max_memory
@max_memory.setter
def max_memory(self, max_memory: Union[int, float]):
_type_check(max_memory, "max_memory", (int, float))
self._max_memory = max_memory
@property
def n_jobs(self) -> int:
"""\
Default number of jobs/ CPUs to use for parallel computing.
"""
return self._n_jobs
@n_jobs.setter
def n_jobs(self, n_jobs: int):
_type_check(n_jobs, "n_jobs", int)
self._n_jobs = n_jobs
@property
def logpath(self) -> Optional[Path]:
"""\
The file path `logfile` was set to.
"""
return self._logpath
@logpath.setter
def logpath(self, logpath: Union[str, Path, None]):
_type_check(logpath, "logfile", (str, Path))
# set via “file object” branch of logfile.setter
self.logfile = Path(logpath).open('a')
self._logpath = Path(logpath)
@property
def logfile(self) -> TextIO:
"""\
The open file to write logs to.
Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one.
The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks
and to :obj:`sys.stderr` otherwise.
For backwards compatibility, setting it to `''` behaves like setting it to `None`.
"""
return self._logfile
@logfile.setter
def logfile(self, logfile: Union[str, Path, TextIO, None]):
if not hasattr(logfile, 'write') and logfile:
self.logpath = logfile
else: # file object
if not logfile: # None or ''
logfile = sys.stdout if self._is_run_from_ipython() else sys.stderr
self._logfile = logfile
self._logpath = None
_set_log_file(self)
@property
def categories_to_ignore(self) -> List[str]:
"""\
Categories that are omitted in plotting etc.
"""
return self._categories_to_ignore
@categories_to_ignore.setter
def categories_to_ignore(self, categories_to_ignore: Iterable[str]):
categories_to_ignore = list(categories_to_ignore)
for i, cat in enumerate(categories_to_ignore):
_type_check(cat, f"categories_to_ignore[{i}]", str)
self._categories_to_ignore = categories_to_ignore
# --------------------------------------------------------------------------------
# Functions
# --------------------------------------------------------------------------------
def set_figure_params(
self,
scanpy: bool = True,
dpi: int = 80,
dpi_save: int = 150,
frameon: bool = True,
vector_friendly: bool = True,
fontsize: int = 14,
color_map: Optional[str] = None,
format: Union[str, Iterable[str]] = "pdf",
transparent: bool = False,
ipython_format: str = "png2x",
):
"""\
Set resolution/size, styling and format of figures.
Parameters
----------
scanpy
Init default values for :obj:`matplotlib.rcParams` suited for Scanpy.
dpi
Resolution of rendered figures - this influences the size of figures in notebooks.
dpi_save
Resolution of saved figures. This should typically be higher to achieve
publication quality.
frameon
Add frames and axes labels to scatter plots.
vector_friendly
Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`.
fontsize
Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`.
color_map
Convenience method for setting the default color map. Ignored if `scanpy=False`.
format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`)
This sets the default format for saving figures: `file_format_figs`.
transparent
Save figures with transparent back ground. Sets
`rcParams['savefig.transparent']`.
ipython_format
Only concerns the notebook/IPython environment; see
:func:`~IPython.display.set_matplotlib_formats` for details.
"""
try:
import IPython
if isinstance(ipython_format, str):
ipython_format = [ipython_format]
IPython.display.set_matplotlib_formats(*ipython_format)
except Exception:
pass
from matplotlib import rcParams
self._vector_friendly = vector_friendly
self.file_format_figs = format
if dpi is not None:
rcParams["figure.dpi"] = dpi
if dpi_save is not None:
rcParams["savefig.dpi"] = dpi_save
if transparent is not None:
rcParams["savefig.transparent"] = transparent
if scanpy:
from .plotting._rcmod import set_rcParams_scanpy
set_rcParams_scanpy(fontsize=fontsize, color_map=color_map)
self._frameon = frameon
@staticmethod
def _is_run_from_ipython():
"""Determines whether run from Ipython.
Only affects progress bars.
"""
try:
__IPYTHON__
return True
except NameError:
return False
def __str__(self) -> str:
return '\n'.join(
f'{k} = {v!r}'
for k, v in inspect.getmembers(self)
if not k.startswith("_") and not k == 'getdoc'
)
settings = ScanpyConfig()
| scanpy/_settings.py | 14,034 | Config manager for scanpy.
Determines whether run from Ipython.
Only affects progress bars.
Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`).
Do not show plots/figures interactively.
Automatically show figures if `autosave == False` (default `True`).
There is no need to call the matplotlib pl.show() in this case.
Directory for cache files (default `'./cache/'`).
Categories that are omitted in plotting etc.
Directory for example :mod:`~scanpy.datasets` (default `'./data/'`).
Directory for saving figures (default `'./figures/'`).
File format for saving AnnData objects.
Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad'
(hdf5) for lossless saving.
File format for saving figures.
For example 'png', 'pdf' or 'svg'. Many other formats work as well (see
`matplotlib.pyplot.savefig`).
The open file to write logs to.
Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one.
The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks
and to :obj:`sys.stderr` otherwise.
For backwards compatibility, setting it to `''` behaves like setting it to `None`.
The file path `logfile` was set to.
Maximal memory usage in Gigabyte.
Is currently not well respected....
Default number of jobs/ CPUs to use for parallel computing.
Global suffix that is appended to figure filenames.
Set resolution/size, styling and format of figures.
Parameters
----------
scanpy
Init default values for :obj:`matplotlib.rcParams` suited for Scanpy.
dpi
Resolution of rendered figures - this influences the size of figures in notebooks.
dpi_save
Resolution of saved figures. This should typically be higher to achieve
publication quality.
frameon
Add frames and axes labels to scatter plots.
vector_friendly
Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`.
fontsize
Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`.
color_map
Convenience method for setting the default color map. Ignored if `scanpy=False`.
format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`)
This sets the default format for saving figures: `file_format_figs`.
transparent
Save figures with transparent back ground. Sets
`rcParams['savefig.transparent']`.
ipython_format
Only concerns the notebook/IPython environment; see
:func:`~IPython.display.set_matplotlib_formats` for details.
Verbosity level (default `warning`)
Level 0: only show 'error' messages.
Level 1: also show 'warning' messages.
Level 2: also show 'info' messages.
Level 3: also show 'hint' messages.
Level 4: also show very detailed progress for 'debug'ging.
Directory where the function scanpy.write writes to by default.
Python 3.7 ensures iteration order getLevelName(str) returns the int level… logging level will be replaced rest set via “file object” branch of logfile.setter file object None or '' -------------------------------------------------------------------------------- Functions -------------------------------------------------------------------------------- | 3,199 | en | 0.574073 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'DeepCTR'
copyright = '2017-present, Weichen Shen'
author = 'Weichen Shen'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.7.4'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepCTRdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepCTR.tex', 'DeepCTR Documentation',
'Weichen Shen', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepctr', 'DeepCTR Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepCTR', 'DeepCTR Documentation',
author, 'DeepCTR', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
| docs/source/conf.py | 5,046 | -*- coding: utf-8 -*- Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The short X.Y version The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string:source_suffix = '.rst' The master toctree document. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path . The name of the Pygments (syntax highlighting) style to use. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. The default sidebars (for documents that don't match any pattern) are defined by theme itself. Builtin themes are using these templates by default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']``. html_sidebars = {} -- Options for HTMLHelp output --------------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Extension configuration ------------------------------------------------- | 3,704 | en | 0.621105 |
from typing import Optional
import torch
from torch import Tensor
@torch.jit._overload # noqa
def fps(src, batch=None, ratio=None, random_start=True): # noqa
# type: (Tensor, Optional[Tensor], Optional[float], bool) -> Tensor
pass # pragma: no cover
@torch.jit._overload # noqa
def fps(src, batch=None, ratio=None, random_start=True): # noqa
# type: (Tensor, Optional[Tensor], Optional[Tensor], bool) -> Tensor
pass # pragma: no cover
def fps(src: torch.Tensor, batch=None, ratio=None, random_start=True): # noqa
r""""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature
Learning on Point Sets in a Metric Space"
<https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the
most distant point with regard to the rest points.
Args:
src (Tensor): Point feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
ratio (float or Tensor, optional): Sampling ratio.
(default: :obj:`0.5`)
random_start (bool, optional): If set to :obj:`False`, use the first
node in :math:`\mathbf{X}` as starting node. (default: obj:`True`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_cluster import fps
src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch = torch.tensor([0, 0, 0, 0])
index = fps(src, batch, ratio=0.5)
"""
r: Optional[Tensor] = None
if ratio is None:
r = torch.tensor(0.5, dtype=src.dtype, device=src.device)
elif isinstance(ratio, float):
r = torch.tensor(ratio, dtype=src.dtype, device=src.device)
else:
r = ratio
assert r is not None
if batch is not None:
assert src.size(0) == batch.numel()
batch_size = int(batch.max()) + 1
deg = src.new_zeros(batch_size, dtype=torch.long)
deg.scatter_add_(0, batch, torch.ones_like(batch))
ptr = deg.new_zeros(batch_size + 1)
torch.cumsum(deg, 0, out=ptr[1:])
else:
ptr = torch.tensor([0, src.size(0)], device=src.device)
return torch.ops.torch_cluster.fps(src, ptr, r, random_start)
| torch_cluster/fps.py | 2,374 | "A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature
Learning on Point Sets in a Metric Space"
<https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the
most distant point with regard to the rest points.
Args:
src (Tensor): Point feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
ratio (float or Tensor, optional): Sampling ratio.
(default: :obj:`0.5`)
random_start (bool, optional): If set to :obj:`False`, use the first
node in :math:`\mathbf{X}` as starting node. (default: obj:`True`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_cluster import fps
src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch = torch.tensor([0, 0, 0, 0])
index = fps(src, batch, ratio=0.5)
noqa noqa type: (Tensor, Optional[Tensor], Optional[float], bool) -> Tensor pragma: no cover noqa noqa type: (Tensor, Optional[Tensor], Optional[Tensor], bool) -> Tensor pragma: no cover noqa | 1,193 | en | 0.474461 |
import pytest
import click
from click.testing import CliRunner
from click._compat import PY2
# Use the most reasonable io that users would use for the python version.
if PY2:
from cStringIO import StringIO as ReasonableBytesIO
else:
from io import BytesIO as ReasonableBytesIO
def test_runner():
@click.command()
def test():
i = click.get_binary_stream('stdin')
o = click.get_binary_stream('stdout')
while 1:
chunk = i.read(4096)
if not chunk:
break
o.write(chunk)
o.flush()
runner = CliRunner()
result = runner.invoke(test, input='Hello World!\n')
assert not result.exception
assert result.output == 'Hello World!\n'
runner = CliRunner(echo_stdin=True)
result = runner.invoke(test, input='Hello World!\n')
assert not result.exception
assert result.output == 'Hello World!\nHello World!\n'
def test_runner_with_stream():
@click.command()
def test():
i = click.get_binary_stream('stdin')
o = click.get_binary_stream('stdout')
while 1:
chunk = i.read(4096)
if not chunk:
break
o.write(chunk)
o.flush()
runner = CliRunner()
result = runner.invoke(test, input=ReasonableBytesIO(b'Hello World!\n'))
assert not result.exception
assert result.output == 'Hello World!\n'
runner = CliRunner(echo_stdin=True)
result = runner.invoke(test, input=ReasonableBytesIO(b'Hello World!\n'))
assert not result.exception
assert result.output == 'Hello World!\nHello World!\n'
def test_prompts():
@click.command()
@click.option('--foo', prompt=True)
def test(foo):
click.echo('foo=%s' % foo)
runner = CliRunner()
result = runner.invoke(test, input='wau wau\n')
assert not result.exception
assert result.output == 'Foo: wau wau\nfoo=wau wau\n'
@click.command()
@click.option('--foo', prompt=True, hide_input=True)
def test(foo):
click.echo('foo=%s' % foo)
runner = CliRunner()
result = runner.invoke(test, input='wau wau\n')
assert not result.exception
assert result.output == 'Foo: \nfoo=wau wau\n'
def test_getchar():
@click.command()
def continue_it():
click.echo(click.getchar())
runner = CliRunner()
result = runner.invoke(continue_it, input='y')
assert not result.exception
assert result.output == 'y\n'
def test_catch_exceptions():
class CustomError(Exception):
pass
@click.command()
def cli():
raise CustomError(1)
runner = CliRunner()
result = runner.invoke(cli)
assert isinstance(result.exception, CustomError)
assert type(result.exc_info) is tuple
assert len(result.exc_info) == 3
with pytest.raises(CustomError):
runner.invoke(cli, catch_exceptions=False)
CustomError = SystemExit
result = runner.invoke(cli)
assert result.exit_code == 1
| vendor/packages/click/tests/test_testing.py | 2,994 | Use the most reasonable io that users would use for the python version. | 71 | en | 0.939259 |
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API '''
''' nonstoptimm@gmail.com '''
# Import required packages
import os
import glob
import json
import logging
import codecs
import helper as he
import azure.cognitiveservices.speech as speechsdk
import params as pa
# Load and set configuration parameters
pa.get_config()
def request_endpoint(audio, speech_config, output_directory, lexical):
"""Request the speech service endpoint
Args:
audio: Input data frame
speech_config: Choice between scoring and
output_folder: LUIS app ID
case: LUIS subscription key
lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00
Returns:
df: Scoring data frame with predicted intents and scores
Raises:
ConnectionError: If file is not found
"""
audio_config = speechsdk.audio.AudioConfig(filename = audio)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config)
result = speech_recognizer.recognize_once()
filename = audio[audio.rindex('\\')+1:]
text = process_recognition(result, filename, output_directory, lexical)
return text, filename
def process_recognition(result, filename, output_directory, lexical):
"""Process recognition received from the speech service
Args:
result: Result object returned by STT-service
filename: Filename for output file
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
Returns:
text: Processed recognition as string
"""
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
if lexical:
text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}"
else:
text = f"{format(result.text)}"
logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}")
elif result.reason == speechsdk.ResultReason.NoMatch:
logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}")
text = ""
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
logging.error(f"Error details: {cancellation_details.error_details}")
text = ""
return text
# General Function
def write_transcription(output_directory, text):
"""Write transcription to file
Args:
text: Processed recognition as string
output_directory: Output directory for the file
Returns:
Writes output to file
"""
if not os.path.exists(f'{output_directory}/transcriptions.txt'):
transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig')
transfile.close()
logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.')
with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile:
transfile.write(f'{text}\n')
transfile.close()
def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv):
"""Main function for STT-functionality
Args:
speech_files: Directory of audio files to be transcribed
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
enable_proxy: Boolean to enable proxy function in case you need it
*argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str
Returns:
zip(filenames, results): Zipped lists of filenames and STT-results as string
"""
try:
speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region'])
except RuntimeError:
logging.error("[ERROR] - Could not retrieve speech config")
# If necessary, you can enable a proxy here:
# set_proxy(hostname: str, port: str, username: str, password: str)
if enable_proxy:
speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3])
# Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted
speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter)
if pa.config_data['stt_endpoint'] != "":
speech_config.endpoint_id = pa.config_data['stt_endpoint']
logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files')
results = []
filenames = []
for audio in glob.iglob(f'{speech_files}*av'):
result, filename = request_endpoint(audio, speech_config, output_directory, lexical)
results.append(result)
filenames.append(filename)
# Check the result
return zip(filenames, results)
if __name__ == '__main__':
main("input/audio/", "output/test/") | src/stt.py | 5,176 | Main function for STT-functionality
Args:
speech_files: Directory of audio files to be transcribed
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
enable_proxy: Boolean to enable proxy function in case you need it
*argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str
Returns:
zip(filenames, results): Zipped lists of filenames and STT-results as string
Process recognition received from the speech service
Args:
result: Result object returned by STT-service
filename: Filename for output file
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
Returns:
text: Processed recognition as string
Request the speech service endpoint
Args:
audio: Input data frame
speech_config: Choice between scoring and
output_folder: LUIS app ID
case: LUIS subscription key
lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00
Returns:
df: Scoring data frame with predicted intents and scores
Raises:
ConnectionError: If file is not found
Write transcription to file
Args:
text: Processed recognition as string
output_directory: Output directory for the file
Returns:
Writes output to file
SPEECH-TO-TEXT USING MICROSOFT SPEECH API
Import required packages Load and set configuration parameters General Function If necessary, you can enable a proxy here: set_proxy(hostname: str, port: str, username: str, password: str) Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted Check the result | 1,733 | en | 0.72817 |
# Generated by Django 3.2.8 on 2021-11-29 09:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20211125_1330'),
]
operations = [
migrations.DeleteModel(
name='VehicleLog',
),
]
| django_budget/budget/migrations/0005_delete_vehiclelog.py | 298 | Generated by Django 3.2.8 on 2021-11-29 09:01 | 45 | en | 0.697935 |
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang
# Mingshuang Luo)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from pathlib import Path
from shutil import copyfile
from typing import Optional, Tuple
import k2
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from asr_datamodule import LibriSpeechAsrDataModule
from lhotse.utils import fix_random_seed
from model import TdnnLstm
from torch import Tensor
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from icefall.checkpoint import load_checkpoint
from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
from icefall.dist import cleanup_dist, setup_dist
from icefall.graph_compiler import CtcTrainingGraphCompiler
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
MetricsTracker,
encode_supervisions,
get_env_info,
setup_logger,
str2bool,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--world-size",
type=int,
default=1,
help="Number of GPUs for DDP training.",
)
parser.add_argument(
"--master-port",
type=int,
default=12354,
help="Master port to use for DDP training.",
)
parser.add_argument(
"--tensorboard",
type=str2bool,
default=True,
help="Should various information be logged in tensorboard.",
)
parser.add_argument(
"--num-epochs",
type=int,
default=20,
help="Number of epochs to train.",
)
parser.add_argument(
"--start-epoch",
type=int,
default=0,
help="""Resume training from from this epoch.
If it is positive, it will load checkpoint from
tdnn_lstm_ctc/exp/epoch-{start_epoch-1}.pt
""",
)
return parser
def get_params() -> AttributeDict:
"""Return a dict containing training parameters.
All training related parameters that are not passed from the commandline
is saved in the variable `params`.
Commandline options are merged into `params` after they are parsed, so
you can also access them via `params`.
Explanation of options saved in `params`:
- exp_dir: It specifies the directory where all training related
files, e.g., checkpoints, log, etc, are saved
- lang_dir: It contains language related input files such as
"lexicon.txt"
- lr: It specifies the initial learning rate
- feature_dim: The model input dim. It has to match the one used
in computing features.
- weight_decay: The weight_decay for the optimizer.
- subsampling_factor: The subsampling factor for the model.
- best_train_loss: Best training loss so far. It is used to select
the model that has the lowest training loss. It is
updated during the training.
- best_valid_loss: Best validation loss so far. It is used to select
the model that has the lowest validation loss. It is
updated during the training.
- best_train_epoch: It is the epoch that has the best training loss.
- best_valid_epoch: It is the epoch that has the best validation loss.
- batch_idx_train: Used to writing statistics to tensorboard. It
contains number of batches trained so far across
epochs.
- log_interval: Print training loss if batch_idx % log_interval` is 0
- reset_interval: Reset statistics if batch_idx % reset_interval is 0
- valid_interval: Run validation if batch_idx % valid_interval` is 0
- beam_size: It is used in k2.ctc_loss
- reduction: It is used in k2.ctc_loss
- use_double_scores: It is used in k2.ctc_loss
"""
params = AttributeDict(
{
"exp_dir": Path("tdnn_lstm_ctc/exp"),
"lang_dir": Path("data/lang_phone"),
"lr": 1e-3,
"feature_dim": 80,
"weight_decay": 5e-4,
"subsampling_factor": 3,
"best_train_loss": float("inf"),
"best_valid_loss": float("inf"),
"best_train_epoch": -1,
"best_valid_epoch": -1,
"batch_idx_train": 0,
"log_interval": 10,
"reset_interval": 200,
"valid_interval": 1000,
"beam_size": 10,
"reduction": "sum",
"use_double_scores": True,
"env_info": get_env_info(),
}
)
return params
def load_checkpoint_if_available(
params: AttributeDict,
model: nn.Module,
optimizer: Optional[torch.optim.Optimizer] = None,
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
) -> None:
"""Load checkpoint from file.
If params.start_epoch is positive, it will load the checkpoint from
`params.start_epoch - 1`. Otherwise, this function does nothing.
Apart from loading state dict for `model`, `optimizer` and `scheduler`,
it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
and `best_valid_loss` in `params`.
Args:
params:
The return value of :func:`get_params`.
model:
The training model.
optimizer:
The optimizer that we are using.
scheduler:
The learning rate scheduler we are using.
Returns:
Return None.
"""
if params.start_epoch <= 0:
return
filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
saved_params = load_checkpoint(
filename,
model=model,
optimizer=optimizer,
scheduler=scheduler,
)
keys = [
"best_train_epoch",
"best_valid_epoch",
"batch_idx_train",
"best_train_loss",
"best_valid_loss",
]
for k in keys:
params[k] = saved_params[k]
return saved_params
def save_checkpoint(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler._LRScheduler,
rank: int = 0,
) -> None:
"""Save model, optimizer, scheduler and training stats to file.
Args:
params:
It is returned by :func:`get_params`.
model:
The training model.
"""
if rank != 0:
return
filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
save_checkpoint_impl(
filename=filename,
model=model,
params=params,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
if params.best_train_epoch == params.cur_epoch:
best_train_filename = params.exp_dir / "best-train-loss.pt"
copyfile(src=filename, dst=best_train_filename)
if params.best_valid_epoch == params.cur_epoch:
best_valid_filename = params.exp_dir / "best-valid-loss.pt"
copyfile(src=filename, dst=best_valid_filename)
def compute_loss(
params: AttributeDict,
model: nn.Module,
batch: dict,
graph_compiler: CtcTrainingGraphCompiler,
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
Compute CTC loss given the model and its inputs.
Args:
params:
Parameters for training. See :func:`get_params`.
model:
The model for training. It is an instance of TdnnLstm in our case.
batch:
A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
for the content in it.
graph_compiler:
It is used to build a decoding graph from a ctc topo and training
transcript. The training transcript is contained in the given `batch`,
while the ctc topo is built when this compiler is instantiated.
is_training:
True for training. False for validation. When it is True, this
function enables autograd during computation; when it is False, it
disables autograd.
"""
device = graph_compiler.device
feature = batch["inputs"]
# at entry, feature is (N, T, C)
feature = feature.permute(0, 2, 1) # now feature is (N, C, T)
assert feature.ndim == 3
feature = feature.to(device)
with torch.set_grad_enabled(is_training):
nnet_output = model(feature)
# nnet_output is (N, T, C)
# NOTE: We need `encode_supervisions` to sort sequences with
# different duration in decreasing order, required by
# `k2.intersect_dense` called in `k2.ctc_loss`
supervisions = batch["supervisions"]
supervision_segments, texts = encode_supervisions(
supervisions, subsampling_factor=params.subsampling_factor
)
decoding_graph = graph_compiler.compile(texts)
dense_fsa_vec = k2.DenseFsaVec(
nnet_output,
supervision_segments,
allow_truncate=params.subsampling_factor - 1,
)
loss = k2.ctc_loss(
decoding_graph=decoding_graph,
dense_fsa_vec=dense_fsa_vec,
output_beam=params.beam_size,
reduction=params.reduction,
use_double_scores=params.use_double_scores,
)
assert loss.requires_grad == is_training
info = MetricsTracker()
info["frames"] = supervision_segments[:, 2].sum().item()
info["loss"] = loss.detach().cpu().item()
return loss, info
def compute_validation_loss(
params: AttributeDict,
model: nn.Module,
graph_compiler: CtcTrainingGraphCompiler,
valid_dl: torch.utils.data.DataLoader,
world_size: int = 1,
) -> MetricsTracker:
"""Run the validation process. The validation loss
is saved in `params.valid_loss`.
"""
model.eval()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(valid_dl):
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=False,
)
assert loss.requires_grad is False
tot_loss = tot_loss + loss_info
if world_size > 1:
tot_loss.reduce(loss.device)
loss_value = tot_loss["loss"] / tot_loss["frames"]
if loss_value < params.best_valid_loss:
params.best_valid_epoch = params.cur_epoch
params.best_valid_loss = loss_value
return tot_loss
def train_one_epoch(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
graph_compiler: CtcTrainingGraphCompiler,
train_dl: torch.utils.data.DataLoader,
valid_dl: torch.utils.data.DataLoader,
tb_writer: Optional[SummaryWriter] = None,
world_size: int = 1,
) -> None:
"""Train the model for one epoch.
The training loss from the mean of all frames is saved in
`params.train_loss`. It runs the validation process every
`params.valid_interval` batches.
Args:
params:
It is returned by :func:`get_params`.
model:
The model for training.
optimizer:
The optimizer we are using.
graph_compiler:
It is used to convert transcripts to FSAs.
train_dl:
Dataloader for the training dataset.
valid_dl:
Dataloader for the validation dataset.
tb_writer:
Writer to write log messages to tensorboard.
world_size:
Number of nodes in DDP training. If it is 1, DDP is disabled.
"""
model.train()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(train_dl):
params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=True,
)
# summary stats.
tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), 5.0, 2.0)
optimizer.step()
if batch_idx % params.log_interval == 0:
logging.info(
f"Epoch {params.cur_epoch}, "
f"batch {batch_idx}, loss[{loss_info}], "
f"tot_loss[{tot_loss}], batch size: {batch_size}"
)
if batch_idx % params.log_interval == 0:
if tb_writer is not None:
loss_info.write_summary(
tb_writer, "train/current_", params.batch_idx_train
)
tot_loss.write_summary(
tb_writer, "train/tot_", params.batch_idx_train
)
if batch_idx > 0 and batch_idx % params.valid_interval == 0:
valid_info = compute_validation_loss(
params=params,
model=model,
graph_compiler=graph_compiler,
valid_dl=valid_dl,
world_size=world_size,
)
model.train()
logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}")
if tb_writer is not None:
valid_info.write_summary(
tb_writer,
"train/valid_",
params.batch_idx_train,
)
loss_value = tot_loss["loss"] / tot_loss["frames"]
params.train_loss = loss_value
if params.train_loss < params.best_train_loss:
params.best_train_epoch = params.cur_epoch
params.best_train_loss = params.train_loss
def run(rank, world_size, args):
"""
Args:
rank:
It is a value between 0 and `world_size-1`, which is
passed automatically by `mp.spawn()` in :func:`main`.
The node with rank 0 is responsible for saving checkpoint.
world_size:
Number of GPUs for DDP training.
args:
The return value of get_parser().parse_args()
"""
params = get_params()
params.update(vars(args))
fix_random_seed(42)
if world_size > 1:
setup_dist(rank, world_size, params.master_port)
setup_logger(f"{params.exp_dir}/log/log-train")
logging.info("Training started")
logging.info(params)
if args.tensorboard and rank == 0:
tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
else:
tb_writer = None
lexicon = Lexicon(params.lang_dir)
max_phone_id = max(lexicon.tokens)
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", rank)
graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device)
model = TdnnLstm(
num_features=params.feature_dim,
num_classes=max_phone_id + 1, # +1 for the blank symbol
subsampling_factor=params.subsampling_factor,
)
checkpoints = load_checkpoint_if_available(params=params, model=model)
model.to(device)
if world_size > 1:
model = DDP(model, device_ids=[rank])
optimizer = optim.AdamW(
model.parameters(),
lr=params.lr,
weight_decay=params.weight_decay,
)
scheduler = StepLR(optimizer, step_size=8, gamma=0.1)
if checkpoints:
optimizer.load_state_dict(checkpoints["optimizer"])
scheduler.load_state_dict(checkpoints["scheduler"])
librispeech = LibriSpeechAsrDataModule(args)
train_dl = librispeech.train_dataloaders()
valid_dl = librispeech.valid_dataloaders()
for epoch in range(params.start_epoch, params.num_epochs):
train_dl.sampler.set_epoch(epoch)
if epoch > params.start_epoch:
logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}")
if tb_writer is not None:
tb_writer.add_scalar(
"train/lr",
scheduler.get_last_lr()[0],
params.batch_idx_train,
)
tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
params.cur_epoch = epoch
train_one_epoch(
params=params,
model=model,
optimizer=optimizer,
graph_compiler=graph_compiler,
train_dl=train_dl,
valid_dl=valid_dl,
tb_writer=tb_writer,
world_size=world_size,
)
scheduler.step()
save_checkpoint(
params=params,
model=model,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
logging.info("Done!")
if world_size > 1:
torch.distributed.barrier()
cleanup_dist()
def main():
parser = get_parser()
LibriSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
world_size = args.world_size
assert world_size >= 1
if world_size > 1:
mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
else:
run(rank=0, world_size=1, args=args)
if __name__ == "__main__":
main()
| egs/librispeech/ASR/tdnn_lstm_ctc/train.py | 17,862 | Compute CTC loss given the model and its inputs.
Args:
params:
Parameters for training. See :func:`get_params`.
model:
The model for training. It is an instance of TdnnLstm in our case.
batch:
A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
for the content in it.
graph_compiler:
It is used to build a decoding graph from a ctc topo and training
transcript. The training transcript is contained in the given `batch`,
while the ctc topo is built when this compiler is instantiated.
is_training:
True for training. False for validation. When it is True, this
function enables autograd during computation; when it is False, it
disables autograd.
Run the validation process. The validation loss
is saved in `params.valid_loss`.
Return a dict containing training parameters.
All training related parameters that are not passed from the commandline
is saved in the variable `params`.
Commandline options are merged into `params` after they are parsed, so
you can also access them via `params`.
Explanation of options saved in `params`:
- exp_dir: It specifies the directory where all training related
files, e.g., checkpoints, log, etc, are saved
- lang_dir: It contains language related input files such as
"lexicon.txt"
- lr: It specifies the initial learning rate
- feature_dim: The model input dim. It has to match the one used
in computing features.
- weight_decay: The weight_decay for the optimizer.
- subsampling_factor: The subsampling factor for the model.
- best_train_loss: Best training loss so far. It is used to select
the model that has the lowest training loss. It is
updated during the training.
- best_valid_loss: Best validation loss so far. It is used to select
the model that has the lowest validation loss. It is
updated during the training.
- best_train_epoch: It is the epoch that has the best training loss.
- best_valid_epoch: It is the epoch that has the best validation loss.
- batch_idx_train: Used to writing statistics to tensorboard. It
contains number of batches trained so far across
epochs.
- log_interval: Print training loss if batch_idx % log_interval` is 0
- reset_interval: Reset statistics if batch_idx % reset_interval is 0
- valid_interval: Run validation if batch_idx % valid_interval` is 0
- beam_size: It is used in k2.ctc_loss
- reduction: It is used in k2.ctc_loss
- use_double_scores: It is used in k2.ctc_loss
Load checkpoint from file.
If params.start_epoch is positive, it will load the checkpoint from
`params.start_epoch - 1`. Otherwise, this function does nothing.
Apart from loading state dict for `model`, `optimizer` and `scheduler`,
it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
and `best_valid_loss` in `params`.
Args:
params:
The return value of :func:`get_params`.
model:
The training model.
optimizer:
The optimizer that we are using.
scheduler:
The learning rate scheduler we are using.
Returns:
Return None.
Args:
rank:
It is a value between 0 and `world_size-1`, which is
passed automatically by `mp.spawn()` in :func:`main`.
The node with rank 0 is responsible for saving checkpoint.
world_size:
Number of GPUs for DDP training.
args:
The return value of get_parser().parse_args()
Save model, optimizer, scheduler and training stats to file.
Args:
params:
It is returned by :func:`get_params`.
model:
The training model.
Train the model for one epoch.
The training loss from the mean of all frames is saved in
`params.train_loss`. It runs the validation process every
`params.valid_interval` batches.
Args:
params:
It is returned by :func:`get_params`.
model:
The model for training.
optimizer:
The optimizer we are using.
graph_compiler:
It is used to convert transcripts to FSAs.
train_dl:
Dataloader for the training dataset.
valid_dl:
Dataloader for the validation dataset.
tb_writer:
Writer to write log messages to tensorboard.
world_size:
Number of nodes in DDP training. If it is 1, DDP is disabled.
!/usr/bin/env python3 Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang Mingshuang Luo) See ../../../../LICENSE for clarification regarding multiple authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. at entry, feature is (N, T, C) now feature is (N, C, T) nnet_output is (N, T, C) NOTE: We need `encode_supervisions` to sort sequences with different duration in decreasing order, required by `k2.intersect_dense` called in `k2.ctc_loss` summary stats. +1 for the blank symbol | 5,388 | en | 0.815464 |
from collections import OrderedDict, defaultdict
from typing import Optional, Dict, Tuple, List
import ariadne
from irrd.rpki.status import RPKIStatus
from irrd.rpsl.fields import RPSLFieldListMixin, RPSLTextField, RPSLReferenceField
from irrd.rpsl.rpsl_objects import (lookup_field_names, OBJECT_CLASS_MAPPING, RPSLAutNum,
RPSLInetRtr, RPSLPerson, RPSLRole)
from irrd.scopefilter.status import ScopeFilterStatus
from irrd.utils.text import snake_to_camel_case
class SchemaGenerator:
def __init__(self):
"""
The schema generator generates a GraphQL schema.
The purpose is to provide a schema to which resolvers are then
attached, which is then given to Ariadne, and for resolvers to
have information about expected types.
For RPSL queries and types, this is dynamically generated based on
the RPSL objects from irrd.rpsl. Other parts are fixed.
This means that the schema is always the same for a given IRRd
codebase - there are no runtime or user configurable parts.
Along with generating the schema, some metadata is saved, e.g.
self.graphql_types which allows resolvers to learn the GraphQL
type for a certain field.
This generator also creates Ariadne object types on self, which
are used to attach resolvers to them.
"""
self._set_rpsl_query_fields()
self._set_rpsl_object_interface_schema()
self._set_rpsl_contact_schema()
self._set_rpsl_object_schemas()
self._set_enums()
schema = self.enums
schema += """
scalar ASN
scalar IP
schema {
query: Query
}
type Query {
rpslObjects(""" + self.rpsl_query_fields + """): [RPSLObject!]
databaseStatus(sources: [String!]): [DatabaseStatus]
asnPrefixes(asns: [ASN!]!, ipVersion: Int, sources: [String!]): [ASNPrefixes!]
asSetPrefixes(setNames: [String!]!, ipVersion: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [AsSetPrefixes!]
recursiveSetMembers(setNames: [String!]!, depth: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [SetMembers!]
}
type DatabaseStatus {
source: String!
authoritative: Boolean!
objectClassFilter: [String!]
rpkiRovFilter: Boolean!
scopefilterEnabled: Boolean!
localJournalKept: Boolean!
serialOldestJournal: Int
serialNewestJournal: Int
serialLastExport: Int
serialNewestMirror: Int
lastUpdate: String
synchronisedSerials: Boolean!
}
type RPSLJournalEntry {
rpslPk: String!
source: String!
serialNrtm: Int!
operation: String!
origin: String
objectClass: String!
objectText: String!
timestamp: String!
}
type ASNPrefixes {
asn: ASN!
prefixes: [IP!]
}
type AsSetPrefixes {
rpslPk: String!
prefixes: [IP!]
}
type SetMembers {
rpslPk: String!
members: [String!]
}
"""
schema += self.rpsl_object_interface_schema
schema += self.rpsl_contact_schema
schema += ''.join(self.rpsl_object_schemas.values())
schema += 'union RPSLContactUnion = RPSLPerson | RPSLRole'
self.type_defs = ariadne.gql(schema)
self.query_type = ariadne.QueryType()
self.rpsl_object_type = ariadne.InterfaceType("RPSLObject")
self.rpsl_contact_union_type = ariadne.UnionType("RPSLContactUnion")
self.asn_scalar_type = ariadne.ScalarType("ASN")
self.ip_scalar_type = ariadne.ScalarType("IP")
self.object_types = [self.query_type, self.rpsl_object_type, self.rpsl_contact_union_type,
self.asn_scalar_type, self.ip_scalar_type]
for name in self.rpsl_object_schemas.keys():
self.object_types.append(ariadne.ObjectType(name))
self.object_types.append(ariadne.ObjectType("ASNPrefixes"))
self.object_types.append(ariadne.ObjectType("AsSetPrefixes"))
self.object_types.append(ariadne.ObjectType("SetMembers"))
self.object_types.append(ariadne.EnumType("RPKIStatus", RPKIStatus))
self.object_types.append(ariadne.EnumType("ScopeFilterStatus", ScopeFilterStatus))
def _set_rpsl_query_fields(self):
"""
Create a sub-schema for the fields that can be queried for RPSL objects.
This includes all fields from all objects, along with a few
special fields.
"""
string_list_fields = {'rpsl_pk', 'sources', 'object_class'}.union(lookup_field_names())
params = [snake_to_camel_case(p) + ': [String!]' for p in sorted(string_list_fields)]
params += [
'ipExact: IP',
'ipLessSpecific: IP',
'ipLessSpecificOneLevel: IP',
'ipMoreSpecific: IP',
'ipAny: IP',
'asn: [ASN!]',
'rpkiStatus: [RPKIStatus!]',
'scopeFilterStatus: [ScopeFilterStatus!]',
'textSearch: String',
'recordLimit: Int',
'sqlTrace: Boolean',
]
self.rpsl_query_fields = ', '.join(params)
def _set_enums(self):
"""
Create the schema for enums, current RPKI and scope filter status.
"""
self.enums = ''
for enum in [RPKIStatus, ScopeFilterStatus]:
self.enums += f'enum {enum.__name__} {{\n'
for value in enum:
self.enums += f' {value.name}\n'
self.enums += '}\n\n'
def _set_rpsl_object_interface_schema(self):
"""
Create the schema for RPSLObject, which contains only fields that
are common to every known RPSL object, along with meta
"""
common_fields = None
for rpsl_object_class in OBJECT_CLASS_MAPPING.values():
if common_fields is None:
common_fields = set(rpsl_object_class.fields.keys())
else:
common_fields = common_fields.intersection(set(rpsl_object_class.fields.keys()))
common_fields = list(common_fields)
common_fields = ['rpslPk', 'objectClass', 'objectText', 'updated'] + common_fields
common_field_dict = self._dict_for_common_fields(common_fields)
common_field_dict['journal'] = '[RPSLJournalEntry]'
schema = self._generate_schema_str('RPSLObject', 'interface', common_field_dict)
self.rpsl_object_interface_schema = schema
def _set_rpsl_contact_schema(self):
"""
Create the schema for RPSLContact. This contains shared fields between
RPSLPerson and RPSLRole, as they are so similar.
"""
common_fields = set(RPSLPerson.fields.keys()).intersection(set(RPSLRole.fields.keys()))
common_fields = common_fields.union({'rpslPk', 'objectClass', 'objectText', 'updated'})
common_field_dict = self._dict_for_common_fields(list(common_fields))
schema = self._generate_schema_str('RPSLContact', 'interface', common_field_dict)
self.rpsl_contact_schema = schema
def _dict_for_common_fields(self, common_fields: List[str]):
common_field_dict = OrderedDict()
for field_name in sorted(common_fields):
try:
# These fields are present in all relevant object, so this is a safe check
rpsl_field = RPSLPerson.fields[field_name]
graphql_type = self._graphql_type_for_rpsl_field(rpsl_field)
reference_name, reference_type = self._grapql_type_for_reference_field(
field_name, rpsl_field)
if reference_name and reference_type:
common_field_dict[reference_name] = reference_type
except KeyError:
graphql_type = 'String'
common_field_dict[snake_to_camel_case(field_name)] = graphql_type
return common_field_dict
def _set_rpsl_object_schemas(self):
"""
Create the schemas for each specific RPSL object class.
Each of these implements RPSLObject, and RPSLPerson/RPSLRole
implement RPSLContact as well.
"""
self.graphql_types = defaultdict(dict)
schemas = OrderedDict()
for object_class, klass in OBJECT_CLASS_MAPPING.items():
object_name = klass.__name__
graphql_fields = OrderedDict()
graphql_fields['rpslPk'] = 'String'
graphql_fields['objectClass'] = 'String'
graphql_fields['objectText'] = 'String'
graphql_fields['updated'] = 'String'
graphql_fields['journal'] = '[RPSLJournalEntry]'
for field_name, field in klass.fields.items():
graphql_type = self._graphql_type_for_rpsl_field(field)
graphql_fields[snake_to_camel_case(field_name)] = graphql_type
self.graphql_types[snake_to_camel_case(object_name)][field_name] = graphql_type
reference_name, reference_type = self._grapql_type_for_reference_field(field_name, field)
if reference_name and reference_type:
graphql_fields[reference_name] = reference_type
self.graphql_types[object_name][reference_name] = reference_type
for field_name in klass.field_extracts:
if field_name.startswith('asn'):
graphql_type = 'ASN'
elif field_name == 'prefix':
graphql_type = 'IP'
elif field_name == 'prefix_length':
graphql_type = 'Int'
else:
graphql_type = 'String'
graphql_fields[snake_to_camel_case(field_name)] = graphql_type
if klass.rpki_relevant:
graphql_fields['rpkiStatus'] = 'RPKIStatus'
graphql_fields['rpkiMaxLength'] = 'Int'
self.graphql_types[object_name]['rpki_max_length'] = 'Int'
implements = 'RPSLContact & RPSLObject' if klass in [RPSLPerson, RPSLRole] else 'RPSLObject'
schema = self._generate_schema_str(object_name, 'type', graphql_fields, implements)
schemas[object_name] = schema
self.rpsl_object_schemas = schemas
def _graphql_type_for_rpsl_field(self, field: RPSLTextField) -> str:
"""
Return the GraphQL type for a regular RPSL field.
This is always a list of strings if the field is a list and/or
can occur multiple times.
"""
if RPSLFieldListMixin in field.__class__.__bases__ or field.multiple:
return '[String!]'
return 'String'
def _grapql_type_for_reference_field(self, field_name: str, rpsl_field: RPSLTextField) -> Tuple[Optional[str], Optional[str]]:
"""
Return the GraphQL name and type for a reference field.
For example, for a field "admin-c" that refers to person/role,
returns ('adminC', '[RPSLContactUnion!]').
Some fields are excluded because they are syntactical references,
not real references.
"""
if isinstance(rpsl_field, RPSLReferenceField) and getattr(rpsl_field, 'referring', None):
rpsl_field.resolve_references()
graphql_name = snake_to_camel_case(field_name) + 'Objs'
grapql_referring = set(rpsl_field.referring_object_classes)
if RPSLAutNum in grapql_referring:
grapql_referring.remove(RPSLAutNum)
if RPSLInetRtr in grapql_referring:
grapql_referring.remove(RPSLInetRtr)
if grapql_referring == {RPSLPerson, RPSLRole}:
graphql_type = '[RPSLContactUnion!]'
else:
graphql_type = '[' + grapql_referring.pop().__name__ + '!]'
return graphql_name, graphql_type
return None, None
def _generate_schema_str(self, name: str, graphql_type: str, fields: Dict[str, str], implements: Optional[str]=None) -> str:
"""
Generate a schema string for a given name, object type and dict of fields.
"""
schema = f'{graphql_type} {name} '
if implements:
schema += f'implements {implements} '
schema += '{\n'
for field, field_type in fields.items():
schema += f' {field}: {field_type}\n'
schema += '}\n\n'
return schema
| irrd/server/graphql/schema_generator.py | 12,840 | The schema generator generates a GraphQL schema.
The purpose is to provide a schema to which resolvers are then
attached, which is then given to Ariadne, and for resolvers to
have information about expected types.
For RPSL queries and types, this is dynamically generated based on
the RPSL objects from irrd.rpsl. Other parts are fixed.
This means that the schema is always the same for a given IRRd
codebase - there are no runtime or user configurable parts.
Along with generating the schema, some metadata is saved, e.g.
self.graphql_types which allows resolvers to learn the GraphQL
type for a certain field.
This generator also creates Ariadne object types on self, which
are used to attach resolvers to them.
Generate a schema string for a given name, object type and dict of fields.
Return the GraphQL type for a regular RPSL field.
This is always a list of strings if the field is a list and/or
can occur multiple times.
Return the GraphQL name and type for a reference field.
For example, for a field "admin-c" that refers to person/role,
returns ('adminC', '[RPSLContactUnion!]').
Some fields are excluded because they are syntactical references,
not real references.
Create the schema for enums, current RPKI and scope filter status.
Create the schema for RPSLContact. This contains shared fields between
RPSLPerson and RPSLRole, as they are so similar.
Create the schema for RPSLObject, which contains only fields that
are common to every known RPSL object, along with meta
Create the schemas for each specific RPSL object class.
Each of these implements RPSLObject, and RPSLPerson/RPSLRole
implement RPSLContact as well.
Create a sub-schema for the fields that can be queried for RPSL objects.
This includes all fields from all objects, along with a few
special fields.
These fields are present in all relevant object, so this is a safe check | 1,859 | en | 0.9122 |
"""
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.exec_action import ExecAction
from argo_workflows.model.http_get_action import HTTPGetAction
from argo_workflows.model.tcp_socket_action import TCPSocketAction
globals()['ExecAction'] = ExecAction
globals()['HTTPGetAction'] = HTTPGetAction
globals()['TCPSocketAction'] = TCPSocketAction
class LifecycleHandler(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'_exec': (ExecAction,), # noqa: E501
'http_get': (HTTPGetAction,), # noqa: E501
'tcp_socket': (TCPSocketAction,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'_exec': 'exec', # noqa: E501
'http_get': 'httpGet', # noqa: E501
'tcp_socket': 'tcpSocket', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""LifecycleHandler - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_exec (ExecAction): [optional] # noqa: E501
http_get (HTTPGetAction): [optional] # noqa: E501
tcp_socket (TCPSocketAction): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""LifecycleHandler - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_exec (ExecAction): [optional] # noqa: E501
http_get (HTTPGetAction): [optional] # noqa: E501
tcp_socket (TCPSocketAction): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| sdks/python/client/argo_workflows/model/lifecycle_handler.py | 12,058 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
LifecycleHandler - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_exec (ExecAction): [optional] # noqa: E501
http_get (HTTPGetAction): [optional] # noqa: E501
tcp_socket (TCPSocketAction): [optional] # noqa: E501
LifecycleHandler - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_exec (ExecAction): [optional] # noqa: E501
http_get (HTTPGetAction): [optional] # noqa: E501
tcp_socket (TCPSocketAction): [optional] # noqa: E501
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
noqa: F401 noqa: F401 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 discard variable. noqa: E501 discard variable. | 6,233 | en | 0.796123 |
import grpc
import threading
import proto.connection_pb2_grpc
from libs.core.Log import Log
from libs.core.Switch import Switch
from libs.core.Event import Event
from libs.Configuration import Configuration
class SwitchConnection:
def __init__(self, grpc_address=None):
self.channel = grpc.insecure_channel(grpc_address)
self.stub = proto.connection_pb2_grpc.LocalServerStub(self.channel)
response = self.stub.Hello(proto.connection_pb2.HelloMessage(ip="127.0.0.1", port=int(Configuration.get('listen_port'))))
self.name = response.name.encode('utf-8')
Event.trigger('new_switch_connection',
name=self.name, device=Switch(name=self.name, ip=response.ip.encode('utf-8'), mac=response.mac.encode('utf-8'), bfr_id=response.bfr_id))
def addTableEntry(self, tableEntry=None):
"""
Add a table entry to the switch
"""
response = self.stub.AddEntry(tableEntry)
if response.code == 0:
Log.error("Error for entry:", tableEntry, "on switch", self.name)
def removeTableEntry(self, tableEntry=None):
"""
Remove a table entry from the switch
"""
response = self.stub.RemoveEntry(tableEntry)
if response.code == 0:
Log.error("Error while removing entry:", tableEntry, "on switch", self.name)
| Controller-Implementation/libs/core/SwitchConnection.py | 1,364 | Add a table entry to the switch
Remove a table entry from the switch | 68 | en | 0.466569 |
from torch import jit
from syft.execution.placeholder import PlaceHolder
from syft.execution.translation.abstract import AbstractPlanTranslator
class PlanTranslatorTorchscript(AbstractPlanTranslator):
"""Performs translation from 'list of ops' Plan into torchscript Plan"""
def __init__(self, plan):
super().__init__(plan)
def translate(self):
translation_plan = self.plan.copy()
translation_plan.forward = None
args = translation_plan.create_dummy_args()
# jit.trace clones input args and can change their type, so we have to skip types check
# TODO see if type check can be made less strict,
# e.g. tensor/custom tensor/nn.Parameter could be considered same type
translation_plan.validate_input_types = False
# To avoid storing Plan state tensors in torchscript, they will be sent as parameters
# we trace wrapper func, which accepts state parameters as last arg
# and sets them into the Plan before executing the Plan
def wrap_stateful_plan(*args):
role = translation_plan.role
state = args[-1]
if 0 < len(role.state.state_placeholders) == len(state) and isinstance(
state, (list, tuple)
):
state_placeholders = tuple(
role.placeholders[ph.id.value] for ph in role.state.state_placeholders
)
PlaceHolder.instantiate_placeholders(role.state.state_placeholders, state)
PlaceHolder.instantiate_placeholders(state_placeholders, state)
return translation_plan(*args[:-1])
plan_params = translation_plan.parameters()
if len(plan_params) > 0:
torchscript_plan = jit.trace(wrap_stateful_plan, (*args, plan_params))
else:
torchscript_plan = jit.trace(translation_plan, args)
self.plan.torchscript = torchscript_plan
return self.plan
def remove(self):
self.plan.torchscript = None
return self.plan
| syft/execution/translation/torchscript.py | 2,056 | Performs translation from 'list of ops' Plan into torchscript Plan
jit.trace clones input args and can change their type, so we have to skip types check TODO see if type check can be made less strict, e.g. tensor/custom tensor/nn.Parameter could be considered same type To avoid storing Plan state tensors in torchscript, they will be sent as parameters we trace wrapper func, which accepts state parameters as last arg and sets them into the Plan before executing the Plan | 476 | en | 0.881727 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.gen
import bcrypt
__all__ = ["create_new_user"]
@tornado.gen.coroutine
def get_next_id(db, collection):
counter = yield db.counters.find_and_modify(
{"_id": "{}id".format(collection)},
{"$inc": {"seq": 1}},
new=True,
)
raise tornado.gen.Return(counter["seq"])
@tornado.gen.coroutine
def create_new_user(db, email, password, group):
password = bcrypt.hashpw(password.encode(), bcrypt.gensalt(8))
id = yield get_next_id(db, "user")
yield db.users.insert({
"_id": id, "email": email, "hash": password, "group": group})
| trebol/interface.py | 640 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import sys
import numpy as np
import pandas as pd
def run(args):
data = pd.read_csv(sys.stdin)
# Find maximum rank value and increase by one to use as a fill_value
# on the pivot with cluster by day
# notfound_value = grouped['rank'].max()+1
# #create pivot table and fill non existing with high number i.e:200
pivot = pd.pivot_table(data,
values='rank',
index='Cluster ID',
columns=['day'],
fill_value=args.notfound_value,
aggfunc=np.sum)
# Write output
pivot.to_csv(sys.stdout)
if __name__ == '__main__':
# Parse command-line arguments.
parser = argparse.ArgumentParser(
description="Pivot table by cluster and day of the poll")
parser.add_argument('--notfound_value',
type=int,
help="value to assign to N/A values on pivot table",
required=True)
args = parser.parse_args()
run(args)
| scripts/pivot_cluster_day.py | 1,122 | !/usr/bin/env python -*- coding: utf-8 -*- Find maximum rank value and increase by one to use as a fill_value on the pivot with cluster by day notfound_value = grouped['rank'].max()+1 create pivot table and fill non existing with high number i.e:200 Write output Parse command-line arguments. | 292 | en | 0.679139 |
import sys
import random
from collections import deque
def printGrid(grid, wallChar, emptyChar):
finalstr = ""
finalstr += "\n"
for i in range(len(grid[0])):
for j in range(len(grid)):
if grid[j][i]==1:
finalstr += wallChar
else:
finalstr += emptyChar
finalstr += "\n"
finalstr += "\n"
print(finalstr)
def makeGrid(width, height):
newgrid = [[0 for x in range(height)] for y in range(width)]
for i in range(len(newgrid)):
for j in range(len(newgrid[i])):
if i==0 or j==0 or i==len(newgrid)-1 or j==len(newgrid[0])-1:
newgrid[i][j]=1
return newgrid
def populateGrid(grid, chance):
for i in range(len(grid)): # reminder to test with: for index, value in enumerate(grid)
for j in range(len(grid[0])):
if(random.randint(0,100)<=chance): # test with list comprehension instead??
grid[i][j]=1
return grid
def automataIteration(grid, minCount, makePillars):
new_grid = [row[:] for row in grid]
for i in range(1, len(grid)-1):
for j in range(1, len(grid[0])-1):
count = 0
for k in range(-1,2):
for l in range(-1,2):
if grid[i+k][j+l]==1:
count+=1
if count>=minCount or (count==0 and makePillars==1):
new_grid[i][j]=1
else:
new_grid[i][j]=0
return new_grid
def floodFindEmpty(grid, tries, goal):
times_remade = 0
percentage = 0
while times_remade<tries and percentage<goal:
copy_grid = [row[:] for row in grid]
open_count = 0
times_remade+=1
unvisited = deque([])
new_grid = [[1 for x in range(len(grid[0]))] for y in range(len(grid))]
#find a random empty space, hope it's the biggest cave
randx = random.randint(0,len(grid)-1)
randy = random.randint(0,len(grid[0])-1)
while(grid[randx][randy] == 1):
randx = random.randint(0,len(grid)-1)
randy = random.randint(0,len(grid[0])-1)
unvisited.append([randx, randy])
while len(unvisited)>0:
current = unvisited.popleft()
new_grid[current[0]][current[1]] = 0
for k in range(-1,2):
for l in range(-1,2):
if current[0]+k >= 0 and current[0]+k<len(grid) and current[1]+l >= 0 and current[1]+l < len(grid[0]): #if we're not out of bounds
if copy_grid[current[0]+k][current[1]+l]==0: #if it's an empty space
copy_grid[current[0]+k][current[1]+l]=2 #mark visited
open_count += 1
unvisited.append([current[0]+k, current[1]+l])
percentage = open_count*100/(len(grid)*len(grid[0]))
print("counted {0}, {1}%...".format(open_count,percentage))
return new_grid, percentage
def main():
width = int(input("Enter the width: "))
height = int(input("Enter the height: "))
#chance = 100 - int(input("Enter the percentage chance of randomly generating a wall: "))
#count = int(input("Enter the min count of surrounding walls for the automata rules: "))
chance = 40
count = 5
iterations = int(input("Enter the number of regular iterations: "))
pillarIterations = int(input("Enter the number of pillar-generating iterations: "))
floodTries = 5
goalPercentage = 30 # above 30% seems to be a good target
grid = makeGrid(width, height)
print("\nRandomly populated grid:")
grid = populateGrid(grid, chance)
printGrid(grid, '# ', '· ')
for i in range(pillarIterations):
print("{0} iteration(s) of automata with pillars:".format(i+1))
grid = automataIteration(grid, count, 1)
printGrid(grid, '# ', '· ')
for i in range(iterations):
print("{0} iteration(s) of regular automata:".format(i+1))
grid = automataIteration(grid, count, 0)
printGrid(grid, '# ', '· ')
print("\nAfter flood algorithm to find the biggest cave:")
grid, percentage = floodFindEmpty(grid, floodTries, goalPercentage)
if percentage<goalPercentage:
print("Failed to produce a big enough cave after {0} tries...".format(floodTries))
else:
print("Percentage of open space: {0}%".format(percentage))
printGrid(grid, '# ', '· ')
# self reminder to try checking map size https://stackoverflow.com/questions/1331471/in-memory-size-of-a-python-structure
print("")
main()
if __name__ == "__main__":
main()
| cellularcaves.py | 4,658 | reminder to test with: for index, value in enumerate(grid) test with list comprehension instead??find a random empty space, hope it's the biggest caveif we're not out of boundsif it's an empty spacemark visitedchance = 100 - int(input("Enter the percentage chance of randomly generating a wall: "))count = int(input("Enter the min count of surrounding walls for the automata rules: ")) above 30% seems to be a good target self reminder to try checking map size https://stackoverflow.com/questions/1331471/in-memory-size-of-a-python-structure | 541 | en | 0.750347 |
## ********Day 55 Start**********
## Advanced Python Decorator Functions
class User:
def __init__(self, name):
self.name = name
self.is_logged_in = False
def is_authenticated_decorator(function):
def wrapper(*args, **kwargs):
if args[0].is_logged_in == True:
function(args[0])
return wrapper
@is_authenticated_decorator
def create_blog_post(user):
print(f"This is {user.name}'s new blog post.")
new_user = User("Edgar")
new_user.is_logged_in = True
create_blog_post(new_user) | Day_55/sandbox.py | 532 | ********Day 55 Start********** Advanced Python Decorator Functions | 66 | en | 0.480668 |
#!/usr/bin/env python
import io
import sys
from datetime import datetime
# To make sure all packet types are available
import scapy.all # noqa
import scapy.packet
from scapy.layers.l2 import Ether
import pcapng
from pcapng.blocks import EnhancedPacket, InterfaceDescription, SectionHeader
def col256(text, fg=None, bg=None, bold=False):
def _get_color(col):
return "8;5;{0:d}".format(_to_color(col))
def _to_color(num):
if isinstance(num, int):
return num # Assume it is already a color
if isinstance(num, str) and len(num) <= 3:
return 16 + int(num, 6)
raise ValueError("Invalid color: {0!r}".format(num))
if not isinstance(text, str):
text = repr(text)
buf = io.StringIO()
if bold:
buf.write("\x1b[1m")
if fg is not None:
buf.write("\x1b[3{0}m".format(_get_color(fg)))
if bg is not None:
buf.write("\x1b[4{0}m".format(_get_color(bg)))
buf.write(text)
buf.write("\x1b[0m")
return buf.getvalue()
def dump_information(scanner):
for block in scanner:
if isinstance(block, SectionHeader):
pprint_sectionheader(block)
elif isinstance(block, InterfaceDescription):
pprint_interfacedesc(block)
elif isinstance(block, EnhancedPacket):
pprint_enhanced_packet(block)
else:
print(" " + str(block))
def pprint_options(options):
if len(options):
yield "--"
for key, values in options.iter_all_items():
for value in values:
yield col256(key + ":", bold=True, fg="453")
yield col256(str(value), fg="340")
def pprint_sectionheader(block):
endianness_desc = {
"<": "Little endian",
">": "Big endian",
"!": "Network (Big endian)",
"=": "Native",
}
text = [
col256(" Section ", bg="400", fg="550"),
col256("version:", bold=True),
col256(".".join(str(x) for x in block.version), fg="145"),
# col256('endianness:', bold=True),
"-",
col256(endianness_desc.get(block.endianness, "Unknown endianness"), bold=True),
"-",
]
if block.length < 0:
text.append(col256("unspecified size", bold=True))
else:
text.append(col256("length:", bold=True))
text.append(col256(str(block.length), fg="145"))
text.extend(pprint_options(block.options))
print(" ".join(text))
def pprint_interfacedesc(block):
text = [
col256(" Interface #{0} ".format(block.interface_id), bg="010", fg="453"),
col256("Link type:", bold=True),
col256(str(block.link_type), fg="140"),
col256(block.link_type_description, fg="145"),
col256("Snap length:", bold=True),
col256(str(block.snaplen), fg="145"),
]
text.extend(pprint_options(block.options))
print(" ".join(text))
def pprint_enhanced_packet(block):
text = [
col256(" Packet+ ", bg="001", fg="345"),
# col256('NIC:', bold=True),
# col256(str(block.interface_id), fg='145'),
col256(str(block.interface.options["if_name"]), fg="140"),
col256(
str(
datetime.utcfromtimestamp(block.timestamp).strftime("%Y-%m-%d %H:%M:%S")
),
fg="455",
),
]
try:
text.extend(
[
col256("NIC:", bold=True),
col256(block.interface_id, fg="145"),
col256(block.interface.options["if_name"], fg="140"),
]
)
except KeyError:
pass
text.extend(
[
# col256('Size:', bold=True),
col256(str(block.packet_len) + " bytes", fg="025")
]
)
if block.captured_len != block.packet_len:
text.extend(
[
col256("Truncated to:", bold=True),
col256(str(block.captured_len) + "bytes", fg="145"),
]
)
text.extend(pprint_options(block.options))
print(" ".join(text))
if block.interface.link_type == 1:
# print(repr(block.packet_data))
# print(col256(repr(Ether(block.packet_data)), fg='255'))
_info = format_packet_information(block.packet_data)
print("\n".join(" " + line for line in _info))
else:
print(" Printing information for non-ethernet packets")
print(" is not supported yet.")
# print('\n'.join(' ' + line
# for line in format_binary_data(block.packet_data)))
def format_packet_information(packet_data):
decoded = Ether(packet_data)
return format_scapy_packet(decoded)
def format_scapy_packet(packet):
fields = []
for f in packet.fields_desc:
# if isinstance(f, ConditionalField) and not f._evalcond(self):
# continue
if f.name in packet.fields:
val = f.i2repr(packet, packet.fields[f.name])
elif f.name in packet.overloaded_fields:
val = f.i2repr(packet, packet.overloaded_fields[f.name])
else:
continue
fields.append("{0}={1}".format(col256(f.name, "542"), col256(val, "352")))
yield "{0} {1}".format(col256(packet.__class__.__name__, "501"), " ".join(fields))
if packet.payload:
if isinstance(packet.payload, scapy.packet.Raw):
raw_data = str(packet.payload)
for line in make_printable(raw_data).splitlines():
yield " " + line
# for line in format_binary_data(raw_data):
# yield ' ' + line
elif isinstance(packet.payload, scapy.packet.Packet):
for line in format_scapy_packet(packet.payload):
yield " " + line
else:
for line in repr(packet.payload).splitlines():
yield " " + line
def make_printable(data): # todo: preserve unicode
stream = io.StringIO()
for ch in data:
if ch == "\\":
stream.write("\\\\")
elif ch in "\n\r" or (32 <= ord(ch) <= 126):
stream.write(ch)
else:
stream.write("\\x{0:02x}".format(ord(ch)))
return stream.getvalue()
def format_binary_data(data):
stream = io.BytesIO(data)
row_offset = 0
row_size = 16 # bytes
while True:
data = stream.read(row_size)
if not data:
return
hexrow = io.BytesIO()
asciirow = io.BytesIO()
for i, byte in enumerate(data):
if 32 <= ord(byte) <= 126:
asciirow.write(byte)
else:
asciirow.write(".")
hexrow.write(format(ord(byte), "02x"))
if i < 15:
if i % 2 == 1:
hexrow.write(" ")
if i % 8 == 7:
hexrow.write(" ")
row_offset += 1
yield "{0:08x}: {1:40s} {2:16s}".format(
row_offset, hexrow.getvalue(), asciirow.getvalue()
)
def main():
if (len(sys.argv) > 1) and (sys.argv[1] != "-"):
with open(sys.argv[1], "rb") as fp:
scanner = pcapng.FileScanner(fp)
dump_information(scanner)
else:
scanner = pcapng.FileScanner(sys.stdin)
dump_information(scanner)
if __name__ == "__main__":
main()
| examples/dump_pcapng_info_pretty.py | 7,368 | !/usr/bin/env python To make sure all packet types are available noqa Assume it is already a color col256('endianness:', bold=True), col256('NIC:', bold=True), col256(str(block.interface_id), fg='145'), col256('Size:', bold=True), print(repr(block.packet_data)) print(col256(repr(Ether(block.packet_data)), fg='255')) print('\n'.join(' ' + line for line in format_binary_data(block.packet_data))) if isinstance(f, ConditionalField) and not f._evalcond(self): continue for line in format_binary_data(raw_data): yield ' ' + line todo: preserve unicode bytes | 597 | en | 0.375558 |
# -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2013-2018
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2020
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013-2014
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2021
# - Mario Lassnig <mario.lassnig@cern.ch>, 2014-2021
# - David Cameron <david.cameron@cern.ch>, 2014
# - Thomas Beermann <thomas.beermann@cern.ch>, 2014-2021
# - Wen Guan <wen.guan@cern.ch>, 2014-2015
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Dimitrios Christidis <dimitrios.christidis@cern.ch>, 2019-2021
# - Robert Illingworth <illingwo@fnal.gov>, 2019
# - James Perry <j.perry@epcc.ed.ac.uk>, 2019
# - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Ilija Vukotic <ivukotic@cern.ch>, 2020-2021
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Tomas Javurek <tomas.javurek@cern.ch>, 2020
# - Luc Goossens <luc.goossens@cern.ch>, 2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Eric Vaandering <ewv@fnal.gov>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021
# - Gabriele Fronzé <sucre.91@hotmail.it>, 2021
from __future__ import print_function
import heapq
import logging
import random
from collections import defaultdict
from copy import deepcopy
from curses.ascii import isprint
from datetime import datetime, timedelta
from hashlib import sha256
from json import dumps
from re import match
from struct import unpack
from traceback import format_exc
import requests
from dogpile.cache import make_region
from dogpile.cache.api import NO_VALUE
from six import string_types
from sqlalchemy import func, and_, or_, exists, not_
from sqlalchemy.exc import DatabaseError, IntegrityError
from sqlalchemy.orm import aliased
from sqlalchemy.orm.exc import FlushError, NoResultFound
from sqlalchemy.sql import label
from sqlalchemy.sql.expression import case, select, text, false, true
import rucio.core.did
import rucio.core.lock
from rucio.common import exception
from rucio.common.types import InternalScope
from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query
from rucio.core.config import get as config_get
from rucio.core.credential import get_signed_url
from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses
from rucio.core.rse_counter import decrease, increase
from rucio.core.rse_expression_parser import parse_expression
from rucio.db.sqla import models, filter_thread_work
from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability,
BadFilesStatus, RuleState, BadPFNStatus)
from rucio.db.sqla.session import (read_session, stream_session, transactional_session,
DEFAULT_SCHEMA_NAME, BASE)
from rucio.rse import rsemanager as rsemgr
REGION = make_region().configure('dogpile.cache.memory', expiration_time=60)
@read_session
def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None):
"""
List the bad file replicas summary. Method used by the rucio-ui.
:param rse_expression: The RSE expression.
:param from_date: The start date.
:param to_date: The end date.
:param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}
:param session: The database session in use.
"""
result = []
incidents = {}
rse_clause = []
if rse_expression:
for rse in parse_expression(expression=rse_expression, filter=filter, session=session):
rse_clause.append(models.BadReplicas.rse_id == rse['id'])
elif filter:
# Ensure we limit results to current VO even if we don't specify an RSE expression
for rse in list_rses(filters=filter, session=session):
rse_clause.append(models.BadReplicas.rse_id == rse['id'])
if session.bind.dialect.name == 'oracle':
to_days = func.trunc(models.BadReplicas.created_at, str('DD'))
elif session.bind.dialect.name == 'mysql':
to_days = func.date(models.BadReplicas.created_at)
elif session.bind.dialect.name == 'postgresql':
to_days = func.date_trunc('day', models.BadReplicas.created_at)
else:
to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d')
query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason)
# To be added : HINTS
if rse_clause != []:
query = query.filter(or_(*rse_clause))
if from_date:
query = query.filter(models.BadReplicas.created_at > from_date)
if to_date:
query = query.filter(models.BadReplicas.created_at < to_date)
summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all()
for row in summary:
if (row[2], row[1], row[4]) not in incidents:
incidents[(row[2], row[1], row[4])] = {}
incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0]
for incident in incidents:
res = incidents[incident]
res['rse_id'] = incident[0]
res['rse'] = get_rse_name(rse_id=incident[0], session=session)
res['created_at'] = incident[1]
res['reason'] = incident[2]
result.append(res)
return result
@read_session
def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None):
"""
Internal method to check if a replica exists at a given site.
:param rse_id: The RSE id.
:param scope: The scope of the file.
:param name: The name of the file.
:param path: The path of the replica.
:param session: The database session in use.
"""
already_declared = False
if path:
path_clause = [models.RSEFileAssociation.path == path]
if path.startswith('/'):
path_clause.append(models.RSEFileAssociation.path == path[1:])
else:
path_clause.append(models.RSEFileAssociation.path == '/%s' % path)
query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\
with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\
filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause))
else:
query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\
filter_by(rse_id=rse_id, scope=scope, name=name)
if query.count():
result = query.first()
path, scope, name, rse_id, size = result
# Now we check that the replica is not already declared bad
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\
filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD)
if query.count():
already_declared = True
return True, scope, name, already_declared, size
else:
return False, None, None, already_declared, None
@read_session
def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None):
"""
List the bad file replicas history states. Method used by the rucio-ui.
:param state: The state of the file (SUSPICIOUS or BAD).
:param rse_id: The RSE id.
:param younger_than: datetime object to select bad replicas younger than this date.
:param older_than: datetime object to select bad replicas older than this date.
:param limit: The maximum number of replicas returned.
:param vo: The VO to find replicas from.
:param session: The database session in use.
"""
result = []
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at)
if state:
query = query.filter(models.BadReplicas.state == state)
if rse_id:
query = query.filter(models.BadReplicas.rse_id == rse_id)
if younger_than:
query = query.filter(models.BadReplicas.created_at >= younger_than)
if older_than:
query = query.filter(models.BadReplicas.created_at <= older_than)
if limit:
query = query.limit(limit)
for badfile in query.yield_per(1000):
if badfile.scope.vo == vo:
if list_pfns:
result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE})
else:
result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at})
if list_pfns:
reps = []
for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session):
pfn = None
if rse_id in rep['rses'] and rep['rses'][rse_id]:
pfn = rep['rses'][rse_id][0]
if pfn and pfn not in reps:
reps.append(pfn)
else:
reps.extend([item for row in rep['rses'].values() for item in row])
list(set(reps))
result = reps
return result
@read_session
def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None):
"""
List the bad file replicas history. Method only used by necromancer
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this necromancer.
:param total_threads: The total number of threads of all necromancers.
:param session: The database session in use.
"""
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\
filter(models.BadReplicas.state == BadFilesStatus.BAD)
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name')
query = query.limit(limit)
bad_replicas = {}
for scope, name, rse_id in query.yield_per(1000):
if rse_id not in bad_replicas:
bad_replicas[rse_id] = []
bad_replicas[rse_id].append({'scope': scope, 'name': name})
return bad_replicas
@transactional_session
def update_bad_replicas_history(dids, rse_id, session=None):
"""
Update the bad file replicas history. Method only used by necromancer
:param dids: The list of DIDs.
:param rse_id: The rse_id.
:param session: The database session in use.
"""
for did in dids:
# Check if the replica is still there
try:
result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one()
state = result.state
if state == ReplicaState.AVAILABLE:
# If yes, and replica state is AVAILABLE, update BadReplicas
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False)
elif state != ReplicaState.BAD:
# If the replica state is not AVAILABLE check if other replicas for the same file are still there.
try:
session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one()
except NoResultFound:
# No replicas are available for this file. Reset the replica state to BAD
update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session)
session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False)
else:
# Here that means that the file has not been processed by the necro. Just pass
pass
except NoResultFound:
# We end-up here if the replica is not registered anymore on the RSE
try:
result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one()
# If yes, the final state depends on DIDAvailability
state = result.availability
final_state = None
if state == DIDAvailability.LOST:
final_state = BadFilesStatus.LOST
elif state == DIDAvailability.DELETED:
final_state = BadFilesStatus.DELETED
elif state == DIDAvailability.AVAILABLE:
final_state = BadFilesStatus.DELETED
else:
# For completness, it shouldn't happen.
print('Houston we have a problem.')
final_state = BadFilesStatus.DELETED
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False)
except NoResultFound:
# If no, the replica is marked as LOST in BadFilesStatus
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False)
@transactional_session
def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param rse_id: The RSE id.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param status: Either BAD or SUSPICIOUS.
:param scheme: The scheme of the PFNs.
:param session: The database session in use.
"""
unknown_replicas = []
declared_replicas = []
rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session)
replicas = []
proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme)
if rse_info['deterministic']:
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
# WARNING : this part is ATLAS specific and must be changed
path = parsed_pfn[pfn]['path']
if path.startswith('/user') or path.startswith('/group'):
scope = '%s.%s' % (path.split('/')[1], path.split('/')[2])
name = parsed_pfn[pfn]['name']
elif path.startswith('/'):
scope = path.split('/')[1]
name = parsed_pfn[pfn]['name']
else:
scope = path.split('/')[0]
name = parsed_pfn[pfn]['name']
scope = InternalScope(scope, vo=issuer.vo)
__exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session)
if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS):
replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
declared_replicas.append(pfn)
else:
if already_declared:
unknown_replicas.append('%s %s' % (pfn, 'Already declared'))
else:
no_hidden_char = True
for char in str(pfn):
if not isprint(char):
unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars'))
no_hidden_char = False
break
if no_hidden_char:
unknown_replicas.append('%s %s' % (pfn, 'Unknown replica'))
if status == BadFilesStatus.BAD:
# For BAD file, we modify the replica state, not for suspicious
try:
# there shouldn't be any exceptions since all replicas exist
update_replicas_states(replicas, session=session)
except exception.UnsupportedOperation:
raise exception.ReplicaNotFound("One or several replicas don't exist.")
else:
path_clause = []
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name'])
__exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session)
if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS):
replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
declared_replicas.append(pfn)
path_clause.append(models.RSEFileAssociation.path == path)
if path.startswith('/'):
path_clause.append(models.RSEFileAssociation.path == path[1:])
else:
path_clause.append(models.RSEFileAssociation.path == '/%s' % path)
else:
if already_declared:
unknown_replicas.append('%s %s' % (pfn, 'Already declared'))
else:
no_hidden_char = True
for char in str(pfn):
if not isprint(char):
unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars'))
no_hidden_char = False
break
if no_hidden_char:
unknown_replicas.append('%s %s' % (pfn, 'Unknown replica'))
if status == BadFilesStatus.BAD and declared_replicas != []:
# For BAD file, we modify the replica state, not for suspicious
query = session.query(models.RSEFileAssociation) \
.with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \
.filter(models.RSEFileAssociation.rse_id == rse_id) \
.filter(or_(*path_clause))
rowcount = query.update({'state': ReplicaState.BAD})
if rowcount != len(declared_replicas):
# there shouldn't be any exceptions since all replicas exist
print(rowcount, len(declared_replicas), declared_replicas)
raise exception.ReplicaNotFound("One or several replicas don't exist.")
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
raise exception.RucioException(error.args)
return unknown_replicas
@transactional_session
def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None):
"""
Declare a list of bad replicas.
:param dids: The list of DIDs.
:param rse_id: The RSE id.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param state: BadFilesStatus.BAD
:param session: The database session in use.
"""
unknown_replicas = []
replicas_for_update = []
for did in dids:
scope = InternalScope(did['scope'], vo=issuer.vo)
name = did['name']
replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None,
session=session)
if replica_exists and not already_declared:
replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state,
account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name,
rse_id=rse_id).delete(synchronize_session=False)
else:
if already_declared:
unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared'))
else:
unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica'))
if state == BadFilesStatus.BAD:
try:
update_replicas_states(replicas_for_update, session=session)
except exception.UnsupportedOperation:
raise exception.ReplicaNotFound("One or several replicas don't exist.")
try:
session.flush()
except (IntegrityError, DatabaseError, FlushError) as error:
raise exception.RucioException(error.args)
return unknown_replicas
@transactional_session
def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param status: The status of the file (SUSPICIOUS or BAD).
:param session: The database session in use.
"""
scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session)
for rse_id in files_to_declare:
notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session)
if notdeclared:
unknown_replicas[rse_id] = notdeclared
return unknown_replicas
@read_session
def get_pfn_to_rse(pfns, vo='def', session=None):
"""
Get the RSE associated to a list of PFNs.
:param pfns: The list of pfn.
:param vo: The VO to find RSEs at.
:param session: The database session in use.
:returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}.
"""
unknown_replicas = {}
storage_elements = []
se_condition = []
dict_rse = {}
surls = clean_surls(pfns)
scheme = surls[0].split(':')[0] if surls else None
for surl in surls:
if surl.split(':')[0] != scheme:
raise exception.InvalidType('The PFNs specified must have the same protocol')
split_se = surl.split('/')[2].split(':')
storage_element = split_se[0]
if storage_element not in storage_elements:
storage_elements.append(storage_element)
se_condition.append(models.RSEProtocols.hostname == storage_element)
query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\
filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false())
protocols = {}
for rse_id, protocol, hostname, port, prefix in query.yield_per(10000):
protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix))
hint = None
for surl in surls:
if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1):
dict_rse[hint].append(surl)
else:
mult_rse_match = 0
for rse_id in protocols:
if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo:
mult_rse_match += 1
if mult_rse_match > 1:
print('ERROR, multiple matches : %s at %s' % (surl, rse_id))
raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session)))
hint = rse_id
if hint not in dict_rse:
dict_rse[hint] = []
dict_rse[hint].append(surl)
if mult_rse_match == 0:
if 'unknown' not in unknown_replicas:
unknown_replicas['unknown'] = []
unknown_replicas['unknown'].append(surl)
return scheme, dict_rse, unknown_replicas
@read_session
def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None):
"""
List RSE File replicas with no locks.
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this necromancer.
:param total_threads: The total number of threads of all necromancers.
:param session: The database session in use.
:returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}.
"""
schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else ''
if session.bind.dialect.name == 'oracle':
# The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used.
query = session.query(models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.rse_id).\
with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\
filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot,
schema_dot))). \
filter(models.RSEFileAssociation.state == ReplicaState.BAD)
else:
query = session.query(models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.rse_id).\
filter(models.RSEFileAssociation.state == ReplicaState.BAD)
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot))
query = query.join(models.DataIdentifier,
and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope,
models.DataIdentifier.name == models.RSEFileAssociation.name)).\
filter(models.DataIdentifier.availability != DIDAvailability.LOST)
query = query.limit(limit)
rows = []
for scope, name, rse_id in query.yield_per(1000):
rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)})
return rows
@stream_session
def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None):
"""
Get the DIDs associated to a PFN on one given RSE
:param pfns: The list of PFNs.
:param rse_id: The RSE id.
:param vo: The VO to get DIDs from.
:param session: The database session in use.
:returns: A dictionary {pfn: {'scope': scope, 'name': name}}
"""
dict_rse = {}
if not rse_id:
scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session)
if unknown_replicas:
raise Exception
else:
scheme = 'srm'
dict_rse[rse_id] = pfns
for rse_id in dict_rse:
pfns = dict_rse[rse_id]
rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session)
pfndict = {}
proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme)
if rse_info['deterministic']:
parsed_pfn = proto.parse_pfns(pfns=pfns)
# WARNING : this part is ATLAS specific and must be changed
for pfn in parsed_pfn:
path = parsed_pfn[pfn]['path']
if path.startswith('/user') or path.startswith('/group'):
scope = '%s.%s' % (path.split('/')[1], path.split('/')[2])
name = parsed_pfn[pfn]['name']
elif path.startswith('/'):
scope = path.split('/')[1]
name = parsed_pfn[pfn]['name']
else:
scope = path.split('/')[0]
name = parsed_pfn[pfn]['name']
scope = InternalScope(scope, vo)
yield {pfn: {'scope': scope, 'name': name}}
else:
condition = []
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name'])
pfndict[path] = pfn
condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id))
for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)):
yield {pfndict[pfn]: {'scope': scope, 'name': name}}
def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session):
"""
Resolve list of DIDs into a list of conditions.
:param dids: The list of data identifiers (DIDs).
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param ignore_availability: Ignore the RSE blocklisting.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param resolve_archives: When set to true, find archives which contain the replicas.
:param session: The database session in use.
"""
did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], []
# Accumulate all the dids which were requested explicitly (not via a container/dataset).
# If any replicas for these dids will be found latter, the associated did will be removed from the list,
# leaving, at the end, only the requested dids which didn't have any replicas at all.
files_wo_replica = []
for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]:
if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member
files_wo_replica.append({'scope': did['scope'], 'name': did['name']})
file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'],
models.RSEFileAssociation.name == did['name']))
else:
did_clause.append(and_(models.DataIdentifier.scope == did['scope'],
models.DataIdentifier.name == did['name']))
if did_clause:
for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope,
models.DataIdentifier.name,
models.DataIdentifier.did_type,
models.DataIdentifier.constituent)\
.with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\
.filter(or_(*did_clause)):
if resolve_archives and constituent:
constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope,
models.ConstituentAssociation.child_name == name))
if did_type == DIDType.FILE:
files_wo_replica.append({'scope': scope, 'name': name})
file_clause.append(and_(models.RSEFileAssociation.scope == scope,
models.RSEFileAssociation.name == name))
elif did_type == DIDType.DATASET:
dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name))
else: # Container
content_query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.child_type)
content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle')
child_dids = [(scope, name)]
while child_dids:
s, n = child_dids.pop()
for tmp_did in content_query.filter_by(scope=s, name=n):
if tmp_did.child_type == DIDType.DATASET:
dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope,
models.DataIdentifierAssociation.name == tmp_did.child_name))
else:
child_dids.append((tmp_did.child_scope, tmp_did.child_name))
state_clause = None
if not all_states:
if not unavailable:
state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)
else:
state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE,
models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE,
models.RSEFileAssociation.state == ReplicaState.COPYING)
return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica
def _pick_n_random(nrandom, generator):
"""
Select n random elements from the generator
"""
if not nrandom:
# pass-through the data unchanged
yield from generator
return
# A "reservoir sampling" algorithm:
# Copy the N first files from the generator. After that, following element may be picked to substitute
# one of the previously selected element with a probability which decreases as the number of encountered elements grows.
selected = []
i = 0
iterator = iter(generator)
try:
for _ in range(nrandom):
selected.append(next(iterator))
i += 1
while True:
element = next(iterator)
i += 1
index_to_substitute = random.randint(0, i)
if index_to_substitute < nrandom:
selected[index_to_substitute] = element
except StopIteration:
pass
for r in selected:
yield r
def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session):
"""
List file replicas for a list of datasets.
:param session: The database session in use.
"""
if not dataset_clause:
return
replica_query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation.path,
models.RSEFileAssociation.state,
models.RSE.id,
models.RSE.rse,
models.RSE.rse_type,
models.RSE.volatile).\
with_hint(models.RSEFileAssociation,
text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
dialect_name='oracle').\
outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\
join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\
filter(models.RSE.deleted == false()).\
filter(or_(*dataset_clause)).\
order_by(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name)
if not ignore_availability:
replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7)))
if state_clause is not None:
replica_query = replica_query.filter(and_(state_clause))
if rse_clause is not None:
replica_query = replica_query.filter(or_(*rse_clause))
if updated_after:
replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after)
for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500):
yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile
def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session):
"""
List file replicas for archive constituents.
"""
if not constituent_clause:
return
constituent_query = session.query(models.ConstituentAssociation.child_scope,
models.ConstituentAssociation.child_name,
models.ConstituentAssociation.scope,
models.ConstituentAssociation.name,
models.ConstituentAssociation.bytes,
models.ConstituentAssociation.md5,
models.ConstituentAssociation.adler32,
models.RSEFileAssociation.path,
models.RSEFileAssociation.state,
models.RSE.id,
models.RSE.rse,
models.RSE.rse_type,
models.RSE.volatile). \
with_hint(models.RSEFileAssociation,
text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
dialect_name='oracle'). \
with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \
outerjoin(models.RSEFileAssociation,
and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope,
models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \
join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \
filter(models.RSE.deleted == false()). \
filter(or_(*constituent_clause)). \
order_by(models.ConstituentAssociation.child_scope,
models.ConstituentAssociation.child_name)
if not ignore_availability:
constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7)))
if state_clause is not None:
constituent_query = constituent_query.filter(and_(state_clause))
if rse_clause is not None:
constituent_query = constituent_query.filter(or_(*rse_clause))
if updated_after:
constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after)
for replica in constituent_query.yield_per(500):
scope, name = replica[0], replica[1]
{'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name})
yield replica
def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session):
"""
List file replicas for a list of files.
:param session: The database session in use.
"""
if not file_clause:
return
for replica_condition in chunks(file_clause, 50):
filters = [
models.RSEFileAssociation.rse_id == models.RSE.id,
models.RSE.deleted == false(),
or_(*replica_condition),
]
if not ignore_availability:
filters.append(models.RSE.availability.in_((4, 5, 6, 7)))
if state_clause is not None:
filters.append(state_clause)
if rse_clause:
filters.append(or_(*rse_clause))
if updated_after:
filters.append(models.RSEFileAssociation.updated_at >= updated_after)
replica_query = session.query(
models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.bytes,
models.RSEFileAssociation.md5,
models.RSEFileAssociation.adler32,
models.RSEFileAssociation.path,
models.RSEFileAssociation.state,
models.RSE.id,
models.RSE.rse,
models.RSE.rse_type,
models.RSE.volatile,
) \
.filter(and_(*filters)) \
.order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \
.with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle')
for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all():
{'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name})
yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile
def _list_files_wo_replicas(files_wo_replica, session):
if files_wo_replica:
file_wo_clause = []
for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])):
file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name']))
files_wo_replicas_query = session.query(models.DataIdentifier.scope,
models.DataIdentifier.name,
models.DataIdentifier.bytes,
models.DataIdentifier.md5,
models.DataIdentifier.adler32).\
filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\
with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle')
for scope, name, bytes, md5, adler32 in files_wo_replicas_query:
yield scope, name, bytes, md5, adler32
def get_vp_endpoint():
"""
VP endpoint is the Virtual Placement server.
Once VP is integrated in Rucio it won't be needed.
"""
vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='')
return vp_endpoint
def get_multi_cache_prefix(cache_site, filename, logger=logging.log):
"""
for a givent cache site and filename, return address of the cache node that
should be prefixed.
:param cache_site: Cache site
:param filename: Filename
"""
vp_endpoint = get_vp_endpoint()
if not vp_endpoint:
return ''
x_caches = REGION.get('CacheSites')
if x_caches is NO_VALUE:
try:
response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False)
if response.ok:
x_caches = response.json()
REGION.set('CacheSites', x_caches)
else:
REGION.set('CacheSites', {'could not reload': ''})
return ''
except requests.exceptions.RequestException as re:
REGION.set('CacheSites', {'could not reload': ''})
logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re))
return ''
if cache_site not in x_caches:
return ''
xcache_site = x_caches[cache_site]
h = float(
unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64
for irange in xcache_site['ranges']:
if h < irange[1]:
return xcache_site['servers'][irange[0]][0]
return ''
def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns,
schemes, files_wo_replica, rse_clause, client_location, domain,
sign_urls, signature_lifetime, constituent_clause, resolve_parents,
updated_after, filters, ignore_availability,
session):
# iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory
replicas = heapq.merge(
_list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session),
_list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session),
_list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session),
key=lambda t: (t[0], t[1]), # sort by scope, name
)
# we need to retain knowledge of the original domain selection by the user
# in case we have to loop over replicas with a potential outgoing proxy
original_domain = deepcopy(domain)
# find all RSEs local to the client's location in autoselect mode (i.e., when domain is None)
local_rses = []
if domain is None:
if client_location and 'site' in client_location and client_location['site']:
try:
local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)]
except Exception:
pass # do not hard fail if site cannot be resolved or is empty
file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {}
for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas:
pfns = []
# reset the domain selection to original user's choice (as this could get overwritten each iteration)
domain = deepcopy(original_domain)
if show_pfns and rse_id:
if rse_id not in rse_info:
rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session)
# assign scheme priorities, and don't forget to exclude disabled protocols
# 0 in RSE protocol definition = disabled, 1 = highest priority
rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0}
rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0}
# select the lan door in autoselect mode, otherwise use the wan door
if domain is None:
domain = 'wan'
if local_rses and rse_id in local_rses:
domain = 'lan'
if rse_id not in tmp_protocols:
rse_schemes = schemes or []
if not rse_schemes:
try:
if domain == 'all':
rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id],
operation='read',
domain='wan')['scheme'])
rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id],
operation='read',
domain='lan')['scheme'])
else:
rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id],
operation='read',
domain=domain)['scheme'])
except exception.RSEProtocolNotSupported:
pass # no need to be verbose
except Exception:
print(format_exc())
if archive_scope and archive_name and 'root' not in rse_schemes:
rse_schemes.append('root')
protocols = []
for s in rse_schemes:
try:
if domain == 'all':
protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id],
operation='read',
scheme=s,
domain='lan'),
rse_info[rse_id]['priority_lan'][s]))
protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id],
operation='read',
scheme=s,
domain='wan'),
rse_info[rse_id]['priority_wan'][s]))
else:
protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id],
operation='read',
scheme=s,
domain=domain),
rse_info[rse_id]['priority_%s' % domain][s]))
except exception.RSEProtocolNotSupported:
pass # no need to be verbose
except Exception:
print(format_exc())
tmp_protocols[rse_id] = protocols
# get pfns
for tmp_protocol in tmp_protocols[rse_id]:
# If the current "replica" is a constituent inside an archive, we must construct the pfn for the
# parent (archive) file and append the xrdcl.unzip query string to it.
if archive_scope and archive_name:
t_scope = archive_scope
t_name = archive_name
else:
t_scope = scope
t_name = name
protocol = tmp_protocol[1]
if 'determinism_type' in protocol.attributes: # PFN is cachable
try:
path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)]
except KeyError: # No cache entry scope:name found for this protocol
path = protocol._get_path(t_scope, t_name)
pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path
try:
pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external,
'name': t_name,
'path': path}).values())[0]
# do we need to sign the URLs?
if sign_urls and protocol.attributes['scheme'] == 'https':
service = get_rse_attribute('sign_url',
rse_id=rse_id,
session=session)
if service and isinstance(service, list):
pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime)
# server side root proxy handling if location is set.
# supports root and http destinations
# cannot be pushed into protocols because we need to lookup rse attributes.
# ultra-conservative implementation.
if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location:
if 'site' in client_location and client_location['site']:
# is the RSE site-configured?
rse_site_attr = get_rse_attribute('site', rse_id, session=session)
replica_site = ['']
if isinstance(rse_site_attr, list) and rse_site_attr:
replica_site = rse_site_attr[0]
# does it match with the client? if not, it's an outgoing connection
# therefore the internal proxy must be prepended
if client_location['site'] != replica_site:
cache_site = config_get('clientcachemap', client_location['site'], default='', session=session)
if cache_site != '':
# print('client', client_location['site'], 'has cache:', cache_site)
# print('filename', name)
selected_prefix = get_multi_cache_prefix(cache_site, t_name)
if selected_prefix:
pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://')
else:
# print('site:', client_location['site'], 'has no cache')
# print('lets check if it has defined an internal root proxy ')
root_proxy_internal = config_get('root-proxy-internal', # section
client_location['site'], # option
default='', # empty string to circumvent exception
session=session)
if root_proxy_internal:
# TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs.
# For now -> skip prepending XCache for GCS.
if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn:
pass # ATLAS HACK
else:
# don't forget to mangle gfal-style davs URL into generic https URL
pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://')
# PFNs don't have concepts, therefore quickly encapsulate in a tuple
# ('pfn', 'domain', 'priority', 'client_extract')
t_domain = tmp_protocol[0]
t_priority = tmp_protocol[2]
t_client_extract = False
if archive_scope and archive_name:
t_domain = 'zip'
pfn = add_url_query(pfn, {'xrdcl.unzip': name})
if protocol.attributes['scheme'] == 'root':
# xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot.
t_client_extract = False
t_priority = -1
else:
t_client_extract = True
pfns.append((pfn, t_domain, t_priority, t_client_extract))
except Exception:
# never end up here
print(format_exc())
if protocol.attributes['scheme'] == 'srm':
try:
file['space_token'] = protocol.attributes['extended_attributes']['space_token']
except KeyError:
file['space_token'] = None
if 'scope' in file and 'name' in file:
if file['scope'] == scope and file['name'] == name:
# extract properly the pfn from the tuple
file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns]))
file['states'][rse_id] = str(state.name if state else state)
if resolve_parents:
file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name'])
for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)]
for tmp_pfn in pfns:
file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id,
'rse': rse,
'type': str(rse_type.name),
'volatile': volatile,
'domain': tmp_pfn[1],
'priority': tmp_pfn[2],
'client_extract': tmp_pfn[3]}
else:
if resolve_parents:
file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name'])
for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)]
# quick exit, but don't forget to set the total order for the priority
# --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically
# and use 1-indexing to be compatible with metalink
tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']])
for i in range(0, len(tmp)):
file['pfns'][tmp[i][2]]['priority'] = i + 1
file['rses'] = {}
rse_pfns = []
for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]:
rse_pfns.append((t_rse, t_priority, t_pfn))
rse_pfns = sorted(rse_pfns)
for t_rse, t_priority, t_pfn in rse_pfns:
if t_rse in file['rses']:
file['rses'][t_rse].append(t_pfn)
else:
file['rses'][t_rse] = [t_pfn]
yield file
file = {}
if not ('scope' in file and 'name' in file):
file['scope'], file['name'] = scope, name
file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32
file['pfns'], file['rses'] = {}, defaultdict(list)
file['states'] = {rse_id: str(state.name if state else state)}
if resolve_parents:
file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name'])
for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)]
if rse_id:
# extract properly the pfn from the tuple
file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns]))
for tmp_pfn in pfns:
file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id,
'rse': rse,
'type': str(rse_type.name),
'volatile': volatile,
'domain': tmp_pfn[1],
'priority': tmp_pfn[2],
'client_extract': tmp_pfn[3]}
# set the total order for the priority
# --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically
# and use 1-indexing to be compatible with metalink
if 'pfns' in file:
tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']])
for i in range(0, len(tmp)):
file['pfns'][tmp[i][2]]['priority'] = i + 1
if 'scope' in file and 'name' in file:
file['rses'] = {}
# don't forget to resolve parents for the last replica
if resolve_parents:
file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name'])
for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)]
# also sort the pfns inside the rse structure
rse_pfns = []
for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]:
rse_pfns.append((t_rse, t_priority, t_pfn))
rse_pfns = sorted(rse_pfns)
for t_rse, t_priority, t_pfn in rse_pfns:
if t_rse in file['rses']:
file['rses'][t_rse].append(t_pfn)
else:
file['rses'][t_rse] = [t_pfn]
yield file
file = {}
for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session):
yield {
'scope': scope,
'name': name,
'bytes': bytes,
'md5': md5,
'adler32': adler32,
'pfns': {},
'rses': defaultdict(list)
}
@stream_session
def list_replicas(dids, schemes=None, unavailable=False, request_id=None,
ignore_availability=True, all_states=False, pfns=True,
rse_expression=None, client_location=None, domain=None,
sign_urls=False, signature_lifetime=None, resolve_archives=True,
resolve_parents=False, nrandom=None,
updated_after=None,
session=None):
"""
List file replicas for a list of data identifiers (DIDs).
:param dids: The list of data identifiers (DIDs).
:param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param request_id: ID associated with the request for debugging.
:param ignore_availability: Ignore the RSE blocklisting.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs.
:param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}
:param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan']
:param sign_urls: If set, will sign the PFNs if necessary.
:param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.
:param resolve_archives: When set to true, find archives which contain the replicas.
:param resolve_parents: When set to true, find all parent datasets which contain the replicas.
:param updated_after: datetime (UTC time), only return replicas updated after this time
:param session: The database session in use.
"""
if dids:
filter = {'vo': dids[0]['scope'].vo}
else:
filter = {'vo': 'def'}
file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids(
dids=dids,
unavailable=unavailable,
ignore_availability=ignore_availability,
all_states=all_states,
resolve_archives=resolve_archives,
session=session
)
rse_clause = []
if rse_expression:
for rse in parse_expression(expression=rse_expression, filter=filter, session=session):
rse_clause.append(models.RSEFileAssociation.rse_id == rse['id'])
yield from _pick_n_random(
nrandom,
_list_replicas(dataset_clause, file_clause, state_clause, pfns,
schemes, files_wo_replica, rse_clause, client_location, domain,
sign_urls, signature_lifetime, constituent_clause, resolve_parents,
updated_after, filter, ignore_availability,
session)
)
@transactional_session
def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None):
"""
Bulk add new dids.
:param dids: the list of new files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
"""
for file in files:
new_did = models.DataIdentifier(scope=file['scope'], name=file['name'],
account=file.get('account') or account,
did_type=DIDType.FILE, bytes=file['bytes'],
md5=file.get('md5'), adler32=file.get('adler32'),
is_new=None)
new_did.save(session=session, flush=False)
if 'meta' in file and file['meta']:
rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session)
if dataset_meta:
rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session)
try:
session.flush()
except IntegrityError as error:
if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \
or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \
or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \
or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \
or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \
or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \
or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]):
raise exception.ScopeNotFound('Scope not found!')
raise exception.RucioException(error.args)
except DatabaseError as error:
if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]):
raise exception.ScopeNotFound('Scope not found!')
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!')
raise exception.RucioException(error.args)
return True
@transactional_session
def __bulk_add_file_dids(files, account, dataset_meta=None, session=None):
"""
Bulk add new dids.
:param dids: the list of files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
"""
condition = []
for f in files:
condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE))
q = session.query(models.DataIdentifier.scope,
models.DataIdentifier.name,
models.DataIdentifier.bytes,
models.DataIdentifier.adler32,
models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition))
available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q]
new_files = list()
for file in files:
found = False
for available_file in available_files:
if file['scope'] == available_file['scope'] and file['name'] == available_file['name']:
found = True
break
if not found:
new_files.append(file)
__bulk_add_new_file_dids(files=new_files, account=account,
dataset_meta=dataset_meta,
session=session)
return new_files + available_files
def tombstone_from_delay(tombstone_delay):
# Tolerate None for tombstone_delay
if not tombstone_delay:
return None
if not isinstance(tombstone_delay, timedelta):
try:
tombstone_delay = timedelta(seconds=int(tombstone_delay))
except ValueError:
return None
if not tombstone_delay:
return None
if tombstone_delay < timedelta(0):
return datetime(1970, 1, 1)
return datetime.utcnow() + tombstone_delay
@transactional_session
def __bulk_add_replicas(rse_id, files, account, session=None):
"""
Bulk add new dids.
:param rse_id: the RSE id.
:param dids: the list of files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
"""
nbfiles, bytes = 0, 0
# Check for the replicas already available
condition = []
for f in files:
condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id))
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\
with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\
filter(or_(*condition))
available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query]
default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None)
default_tombstone = tombstone_from_delay(default_tombstone_delay)
new_replicas = []
for file in files:
found = False
for available_replica in available_replicas:
if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']:
found = True
break
if not found:
nbfiles += 1
bytes += file['bytes']
new_replicas.append({'rse_id': rse_id, 'scope': file['scope'],
'name': file['name'], 'bytes': file['bytes'],
'path': file.get('path'),
'state': ReplicaState(file.get('state', 'A')),
'md5': file.get('md5'), 'adler32': file.get('adler32'),
'lock_cnt': file.get('lock_cnt', 0),
'tombstone': file.get('tombstone') or default_tombstone})
try:
new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation,
new_replicas)
session.flush()
return nbfiles, bytes
except IntegrityError as error:
if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \
or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \
or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \
or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]):
raise exception.Duplicate("File replica already exists!")
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
@transactional_session
def add_replicas(rse_id, files, account, ignore_availability=True,
dataset_meta=None, session=None):
"""
Bulk add file replicas.
:param rse_id: The RSE id.
:param files: The list of files.
:param account: The account owner.
:param ignore_availability: Ignore the RSE blocklisting.
:param session: The database session in use.
:returns: True is successful.
"""
def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None):
p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr)
expected_pfns = p.lfns2pfns(lfns)
return clean_surls(expected_pfns.values())
replica_rse = get_rse(rse_id=rse_id, session=session)
if replica_rse.volatile is True:
raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse))
if not (replica_rse.availability & 2) and not ignore_availability:
raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse)
replicas = __bulk_add_file_dids(files=files, account=account,
dataset_meta=dataset_meta,
session=session)
pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]}
for file in files:
if 'pfn' not in file:
if not replica_rse.deterministic:
raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse))
else:
scheme = file['pfn'].split(':')[0]
pfns.setdefault(scheme, []).append(file['pfn'])
if pfns:
rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session)
for scheme in pfns.keys():
if not replica_rse.deterministic:
p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme)
pfns[scheme] = p.parse_pfns(pfns=pfns[scheme])
for file in files:
if file['pfn'].startswith(scheme):
tmp = pfns[scheme][file['pfn']]
file['path'] = ''.join([tmp['path'], tmp['name']])
else:
# Check that the pfns match to the expected pfns
lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)]
pfns[scheme] = clean_surls(pfns[scheme])
# Check wan first
found_on_wan = False
available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan')
expected_pfns_wan = None
for protocol_attr in available_wan_protocols:
pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr)
if not expected_pfns_wan and pfns_wan_buffer:
expected_pfns_wan = pfns_wan_buffer
found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme])
if found_on_wan:
break
if not found_on_wan:
# Check lan
found_on_lan = False
available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan')
for protocol_attr in available_lan_protocols:
pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr)
found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme])
if found_on_lan:
break
if found_on_lan == pfns[scheme]:
# Registration always with wan
pfns[scheme] = expected_pfns_wan
else:
raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns)))
nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session)
increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session)
return replicas
@transactional_session
def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None):
"""
Add File replica.
:param rse_id: the rse id.
:param scope: the scope name.
:param name: The data identifier name.
:param bytes: the size of the file.
:param account: The account owner.
:param md5: The md5 checksum.
:param adler32: The adler32 checksum.
:param pfn: Physical file name (for nondeterministic rse).
:param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary.
:param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].
:param tombstone: If True, create replica with a tombstone.
:param session: The database session in use.
:returns: True is successful.
"""
if meta is None:
meta = {}
file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone}
if pfn:
file['pfn'] = pfn
return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session)
@transactional_session
def delete_replicas(rse_id, files, ignore_availability=True, session=None):
"""
Delete file replicas.
:param rse_id: the rse id.
:param files: the list of files to delete.
:param ignore_availability: Ignore the RSE blocklisting.
:param session: The database session in use.
"""
replica_rse = get_rse(rse_id=rse_id, session=session)
if not (replica_rse.availability & 1) and not ignore_availability:
raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable'
'for deleting' % replica_rse.rse)
replica_condition, src_condition = [], []
for file in files:
replica_condition.append(
and_(models.RSEFileAssociation.scope == file['scope'],
models.RSEFileAssociation.name == file['name']))
src_condition.append(
and_(models.Source.scope == file['scope'],
models.Source.name == file['name'],
models.Source.rse_id == rse_id))
delta, bytes, rowcount = 0, 0, 0
# WARNING : This should not be necessary since that would mean the replica is used as a source.
for chunk in chunks(src_condition, 10):
rowcount = session.query(models.Source). \
filter(or_(*chunk)). \
delete(synchronize_session=False)
rowcount = 0
for chunk in chunks(replica_condition, 10):
for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \
with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)):
bytes += replica_bytes
delta += 1
rowcount += session.query(models.RSEFileAssociation). \
filter(models.RSEFileAssociation.rse_id == rse_id). \
filter(or_(*chunk)). \
delete(synchronize_session=False)
if rowcount != len(files):
raise exception.ReplicaNotFound("One or several replicas don't exist.")
__cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session)
# Decrease RSE counter
decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session)
@transactional_session
def __cleanup_after_replica_deletion(rse_id, files, session=None):
"""
Perform update of collections/archive associations/dids after the removal of their replicas
:param rse_id: the rse id
:param files: list of files whose replica got deleted
:param session: The database session in use.
"""
parent_condition, did_condition = [], []
clt_replica_condition, dst_replica_condition = [], []
incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], []
for file in files:
# Schedule update of all collections containing this file and having a collection replica in the RSE
dst_replica_condition.append(
and_(models.DataIdentifierAssociation.child_scope == file['scope'],
models.DataIdentifierAssociation.child_name == file['name'],
exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where(
and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope,
models.CollectionReplica.name == models.DataIdentifierAssociation.name,
models.CollectionReplica.rse_id == rse_id))))
# If the file doesn't have any replicas anymore, we should perform cleanups of objects
# related to this file. However, if the file is "lost", it's removal wasn't intentional,
# so we want to skip deleting the metadata here. Perform cleanups:
# 1) schedule removal of this file from all parent datasets
parent_condition.append(
and_(models.DataIdentifierAssociation.child_scope == file['scope'],
models.DataIdentifierAssociation.child_name == file['name'],
~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name'],
models.DataIdentifier.availability == DIDAvailability.LOST)),
~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where(
and_(models.RSEFileAssociation.scope == file['scope'],
models.RSEFileAssociation.name == file['name'])),
~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where(
and_(models.ConstituentAssociation.child_scope == file['scope'],
models.ConstituentAssociation.child_name == file['name']))))
# 2) schedule removal of this file from the DID table
did_condition.append(
and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name'],
models.DataIdentifier.availability != DIDAvailability.LOST,
~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where(
and_(models.RSEFileAssociation.scope == file['scope'],
models.RSEFileAssociation.name == file['name'])),
~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where(
and_(models.ConstituentAssociation.child_scope == file['scope'],
models.ConstituentAssociation.child_name == file['name']))))
# 3) if the file is an archive, schedule cleanup on the files from inside the archive
archive_contents_condition.append(
and_(models.ConstituentAssociation.scope == file['scope'],
models.ConstituentAssociation.name == file['name'],
~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name'],
models.DataIdentifier.availability == DIDAvailability.LOST)),
~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where(
and_(models.RSEFileAssociation.scope == file['scope'],
models.RSEFileAssociation.name == file['name']))))
# Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica
if dst_replica_condition:
for chunk in chunks(dst_replica_condition, 10):
query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\
filter(or_(*chunk)).\
distinct()
for parent_scope, parent_name in query:
models.UpdatedCollectionReplica(scope=parent_scope,
name=parent_name,
did_type=DIDType.DATASET,
rse_id=rse_id).\
save(session=session, flush=False)
# Delete did from the content for the last did
while parent_condition:
child_did_condition, tmp_parent_condition = [], []
for chunk in chunks(parent_condition, 10):
query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name,
models.DataIdentifierAssociation.did_type,
models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\
filter(or_(*chunk))
for parent_scope, parent_name, did_type, child_scope, child_name in query:
# Schedule removal of child file/dataset/container from the parent dataset/container
child_did_condition.append(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name,
models.DataIdentifierAssociation.child_scope == child_scope,
models.DataIdentifierAssociation.child_name == child_name))
# Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore
clt_is_not_archive_condition.append(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name,
exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope,
models.DataIdentifier.name == models.DataIdentifierAssociation.name,
models.DataIdentifier.is_archive == true())),
~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope,
models.DataIdentifier.name == models.DataIdentifierAssociation.child_name,
models.DataIdentifier.is_archive == true()))))
# If the parent dataset/container becomes empty as a result of the child removal
# (it was the last children), metadata cleanup has to be done:
#
# 1) Schedule to remove the replicas of this empty collection
clt_replica_condition.append(
and_(models.CollectionReplica.scope == parent_scope,
models.CollectionReplica.name == parent_name,
exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == parent_scope,
models.DataIdentifier.name == parent_name,
models.DataIdentifier.is_open == False)), # NOQA
~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name))))
# 2) Schedule removal of this empty collection from its own parent collections
tmp_parent_condition.append(
and_(models.DataIdentifierAssociation.child_scope == parent_scope,
models.DataIdentifierAssociation.child_name == parent_name,
~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name))))
# 3) Schedule removal of the entry from the DIDs table
did_condition.append(
and_(models.DataIdentifier.scope == parent_scope,
models.DataIdentifier.name == parent_name,
models.DataIdentifier.is_open == False, # NOQA
~exists([1]).where(
and_(models.DataIdentifierAssociation.child_scope == parent_scope,
models.DataIdentifierAssociation.child_name == parent_name)),
~exists([1]).where(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name))))
if child_did_condition:
# get the list of modified parent scope, name
for chunk in chunks(child_did_condition, 10):
modifieds = session.query(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name,
models.DataIdentifierAssociation.did_type).\
distinct().\
with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\
filter(or_(*chunk)).\
filter(exists(select([1]).
prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).
where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope,
models.DataIdentifierAssociation.name == models.DataIdentifier.name,
or_(models.DataIdentifier.complete == true(),
models.DataIdentifier.complete is None))))
for parent_scope, parent_name, parent_did_type in modifieds:
message = {'scope': parent_scope,
'name': parent_name,
'did_type': parent_did_type,
'event_type': 'INCOMPLETE'}
if message not in messages:
messages.append(message)
incomplete_condition.append(
and_(models.DataIdentifier.scope == parent_scope,
models.DataIdentifier.name == parent_name,
models.DataIdentifier.did_type == parent_did_type))
for chunk in chunks(child_did_condition, 10):
rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session)
session.query(models.DataIdentifierAssociation).\
filter(or_(*chunk)).\
delete(synchronize_session=False)
parent_condition = tmp_parent_condition
for chunk in chunks(clt_replica_condition, 10):
session.query(models.CollectionReplica).\
filter(or_(*chunk)).\
delete(synchronize_session=False)
# Update incomplete state
for chunk in chunks(incomplete_condition, 10):
session.query(models.DataIdentifier).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
filter(or_(*chunk)).\
filter(models.DataIdentifier.complete != false()).\
update({'complete': False}, synchronize_session=False)
# delete empty dids
messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], []
for chunk in chunks(did_condition, 100):
query = session.query(models.DataIdentifier.scope,
models.DataIdentifier.name,
models.DataIdentifier.did_type).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
filter(or_(*chunk))
for scope, name, did_type in query:
if did_type == DIDType.DATASET:
messages.append({'event_type': 'ERASE',
'payload': dumps({'scope': scope.external,
'name': name,
'account': 'root'})})
deleted_rules.append(and_(models.ReplicationRule.scope == scope,
models.ReplicationRule.name == name))
deleted_dids.append(and_(models.DataIdentifier.scope == scope,
models.DataIdentifier.name == name))
if session.bind.dialect.name == 'oracle':
oracle_version = int(session.connection().connection.version.split('.')[0])
if oracle_version >= 12:
deleted_did_meta.append(and_(models.DidMeta.scope == scope,
models.DidMeta.name == name))
else:
deleted_did_meta.append(and_(models.DidMeta.scope == scope,
models.DidMeta.name == name))
# Remove Archive Constituents
removed_constituents = []
constituents_to_delete_condition = []
for chunk in chunks(archive_contents_condition, 30):
query = session.query(models.ConstituentAssociation). \
with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \
filter(or_(*chunk))
for constituent in query:
removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name})
constituents_to_delete_condition.append(
and_(models.ConstituentAssociation.scope == constituent.scope,
models.ConstituentAssociation.name == constituent.name,
models.ConstituentAssociation.child_scope == constituent.child_scope,
models.ConstituentAssociation.child_name == constituent.child_name))
models.ConstituentAssociationHistory(
child_scope=constituent.child_scope,
child_name=constituent.child_name,
scope=constituent.scope,
name=constituent.name,
bytes=constituent.bytes,
adler32=constituent.adler32,
md5=constituent.md5,
guid=constituent.guid,
length=constituent.length,
updated_at=constituent.updated_at,
created_at=constituent.created_at,
).save(session=session, flush=False)
if len(constituents_to_delete_condition) > 200:
session.query(models.ConstituentAssociation).\
with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\
filter(or_(*constituents_to_delete_condition)).\
delete(synchronize_session=False)
constituents_to_delete_condition.clear()
__cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session)
removed_constituents.clear()
if constituents_to_delete_condition:
session.query(models.ConstituentAssociation). \
with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \
filter(or_(*constituents_to_delete_condition)). \
delete(synchronize_session=False)
__cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session)
# Remove rules in Waiting for approval or Suspended
for chunk in chunks(deleted_rules, 100):
session.query(models.ReplicationRule).\
with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\
filter(or_(*chunk)).\
filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED,
RuleState.WAITING_APPROVAL))).\
delete(synchronize_session=False)
# Remove DID Metadata
for chunk in chunks(deleted_did_meta, 100):
session.query(models.DidMeta).\
filter(or_(*chunk)).\
delete(synchronize_session=False)
for chunk in chunks(messages, 100):
session.bulk_insert_mappings(models.Message, chunk)
for chunk in chunks(deleted_dids, 100):
session.query(models.DataIdentifier).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
filter(or_(*chunk)).\
delete(synchronize_session=False)
if session.bind.dialect.name != 'oracle':
rucio.core.did.insert_deleted_dids(chunk, session=session)
# Set is_archive = false on collections which don't have archive children anymore
for chunk in chunks(clt_is_not_archive_condition, 100):
clt_to_update = list(session
.query(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name)
.distinct(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name)
.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle')
.filter(or_(*chunk)))
if clt_to_update:
session.query(models.DataIdentifier).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
filter(or_(and_(models.DataIdentifier.scope == scope,
models.DataIdentifier.name == name,
models.DataIdentifier.is_archive == true())
for scope, name in clt_to_update)).\
update({'is_archive': False}, synchronize_session=False)
@transactional_session
def get_replica(rse_id, scope, name, session=None):
"""
Get File replica.
:param rse_id: The RSE Id.
:param scope: the scope name.
:param name: The data identifier name.
:param session: The database session in use.
:returns: A dictionary with the list of replica attributes.
"""
try:
row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one()
result = {}
for column in row.__table__.columns:
result[column.name] = getattr(row, column.name)
return result
except NoResultFound:
raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session)))
@transactional_session
def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None):
"""
List RSE File replicas with no locks.
:param limit: Number of replicas returned.
:param bytes: The amount of needed bytes.
:param rse_id: The rse_id.
:param delay_seconds: The delay to query replicas in BEING_DELETED state
:param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone
:param session: The database session in use.
:returns: a list of dictionary replica.
"""
none_value = None # Hack to get pep8 happy...
query = session.query(models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.path,
models.RSEFileAssociation.bytes,
models.RSEFileAssociation.tombstone,
models.RSEFileAssociation.state).\
with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\
filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\
filter(models.RSEFileAssociation.lock_cnt == 0).\
filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\
filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)),
and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\
filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle')
.where(and_(models.RSEFileAssociation.scope == models.Source.scope,
models.RSEFileAssociation.name == models.Source.name,
models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\
with_for_update(skip_locked=True).\
order_by(models.RSEFileAssociation.tombstone)
needed_space = bytes
total_bytes, total_files = 0, 0
rows = []
replica_clause = []
for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000):
# Check if more than one replica is available
replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one()
if replica_cnt[0] > 1:
if state != ReplicaState.UNAVAILABLE:
if tombstone != OBSOLETE:
if only_delete_obsolete:
break
if needed_space is not None and total_bytes > needed_space:
break
total_bytes += bytes
total_files += 1
if total_files > limit:
break
rows.append({'scope': scope, 'name': name, 'path': path,
'bytes': bytes, 'tombstone': tombstone,
'state': state})
replica_clause.append(and_(models.RSEFileAssociation.scope == scope,
models.RSEFileAssociation.name == name,
models.RSEFileAssociation.rse_id == rse_id))
else:
# If this is the last replica, check if there are some requests
request_cnt = session.query(func.count()).\
with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\
filter(and_(models.Request.scope == scope,
models.Request.name == name)).one()
if request_cnt[0] == 0:
if tombstone != OBSOLETE:
if only_delete_obsolete:
break
if needed_space is not None and total_bytes > needed_space:
break
total_bytes += bytes
total_files += 1
if total_files > limit:
break
rows.append({'scope': scope, 'name': name, 'path': path,
'bytes': bytes, 'tombstone': tombstone,
'state': state})
replica_clause.append(and_(models.RSEFileAssociation.scope == scope,
models.RSEFileAssociation.name == name,
models.RSEFileAssociation.rse_id == rse_id))
for chunk in chunks(replica_clause, 100):
session.query(models.RSEFileAssociation).filter(or_(*chunk)).\
with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\
update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False)
return rows
@transactional_session
def update_replicas_states(replicas, nowait=False, session=None):
"""
Update File replica information and state.
:param replicas: The list of replicas.
:param nowait: Nowait parameter for the for_update queries.
:param session: The database session in use.
"""
for replica in replicas:
query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name'])
try:
if nowait:
query.with_for_update(nowait=True).one()
except NoResultFound:
# remember scope, name and rse
raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session)))
if isinstance(replica['state'], string_types):
replica['state'] = ReplicaState(replica['state'])
values = {'state': replica['state']}
if replica['state'] == ReplicaState.BEING_DELETED:
query = query.filter_by(lock_cnt=0)
# Exclude replicas use as sources
stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope,
models.RSEFileAssociation.name == models.Source.name,
models.RSEFileAssociation.rse_id == models.Source.rse_id))
query = query.filter(not_(stmt))
values['tombstone'] = OBSOLETE
elif replica['state'] == ReplicaState.AVAILABLE:
rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session)
elif replica['state'] == ReplicaState.UNAVAILABLE:
rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'],
error_message=replica.get('error_message', None),
broken_rule_id=replica.get('broken_rule_id', None),
broken_message=replica.get('broken_message', None),
nowait=nowait, session=session)
elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE:
query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE))
if 'path' in replica and replica['path']:
values['path'] = replica['path']
if not query.update(values, synchronize_session=False):
if 'rse' not in replica:
replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session)
raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica)
return True
@transactional_session
def touch_replica(replica, session=None):
"""
Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked.
:param replica: a dictionary with the information of the affected replica.
:param session: The database session in use.
:returns: True, if successful, False otherwise.
"""
try:
accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None
session.query(models.RSEFileAssociation).\
filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
with_for_update(nowait=True).one()
session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
update({'accessed_at': accessed_at,
'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value,
models.RSEFileAssociation.tombstone != OBSOLETE),
accessed_at)],
else_=models.RSEFileAssociation.tombstone)},
synchronize_session=False)
session.query(models.DataIdentifier).\
filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
with_for_update(nowait=True).one()
session.query(models.DataIdentifier).\
filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
update({'accessed_at': accessed_at}, synchronize_session=False)
except DatabaseError:
return False
except NoResultFound:
return True
return True
@transactional_session
def update_replica_state(rse_id, scope, name, state, session=None):
"""
Update File replica information and state.
:param rse_id: the rse id.
:param scope: the tag name.
:param name: The data identifier name.
:param state: The state.
:param session: The database session in use.
"""
return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session)
@transactional_session
def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None):
"""
Get file replicas for a specific scope:name.
:param scope: The scope of the did.
:param name: The name of the did.
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param session: The db session in use.
:returns: List of SQLAlchemy Replica Objects
"""
query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)
if restrict_rses is not None:
if len(restrict_rses) < 10:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = query.filter(or_(*rse_clause))
return query.with_for_update(nowait=nowait).all()
@transactional_session
def get_source_replicas(scope, name, source_rses=None, session=None):
"""
Get soruce replicas for a specific scope:name.
:param scope: The scope of the did.
:param name: The name of the did.
:param soruce_rses: Possible RSE_ids to filter on.
:param session: The db session in use.
:returns: List of SQLAlchemy Replica Objects
"""
query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)
if source_rses:
if len(source_rses) < 10:
rse_clause = []
for rse_id in source_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = query.filter(or_(*rse_clause))
return [a[0] for a in query.all()]
@transactional_session
def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None,
total_threads=None, thread_id=None,
session=None):
"""
Get file replicas for all files of a dataset.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param total_threads: Total threads
:param thread_id: This thread
:param session: The db session in use.
:returns: (files in dataset, replicas in dataset)
"""
files, replicas = {}, {}
if session.bind.dialect.name == 'postgresql':
# Get content
content_query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32).\
with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle').\
filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if total_threads and total_threads > 1:
content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads,
thread_id=thread_id, hash_variable='child_name')
for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000):
files[(child_scope, child_name)] = {'scope': child_scope,
'name': child_name,
'bytes': bytes,
'md5': md5,
'adler32': adler32}
replicas[(child_scope, child_name)] = []
# Get replicas and lock them
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation)\
.with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle')\
.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if restrict_rses is not None:
if len(restrict_rses) < 10:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation)\
.with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle')\
.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED,
or_(*rse_clause)))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
else:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation)\
.with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle') \
.with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\
.outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\
filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if restrict_rses is not None:
if len(restrict_rses) < 10:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation)\
.with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle')\
.outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED,
or_(*rse_clause)))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if total_threads and total_threads > 1:
query = filter_thread_work(session=session, query=query, total_threads=total_threads,
thread_id=thread_id, hash_variable='child_name')
query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt)
for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000):
if (child_scope, child_name) not in files:
files[(child_scope, child_name)] = {'scope': child_scope,
'name': child_name,
'bytes': bytes,
'md5': md5,
'adler32': adler32}
if (child_scope, child_name) in replicas:
if replica is not None:
replicas[(child_scope, child_name)].append(replica)
else:
replicas[(child_scope, child_name)] = []
if replica is not None:
replicas[(child_scope, child_name)].append(replica)
return (list(files.values()), replicas)
@transactional_session
def get_source_replicas_for_dataset(scope, name, source_rses=None,
total_threads=None, thread_id=None,
session=None):
"""
Get file replicas for all files of a dataset.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param source_rses: Possible source RSE_ids to filter on.
:param total_threads: Total threads
:param thread_id: This thread
:param session: The db session in use.
:returns: (files in dataset, replicas in dataset)
"""
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.RSEFileAssociation.rse_id)\
.with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\
.outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\
filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)
if source_rses:
if len(source_rses) < 10:
rse_clause = []
for rse_id in source_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.RSEFileAssociation.rse_id)\
.with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\
.outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state == ReplicaState.AVAILABLE,
or_(*rse_clause)))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if total_threads and total_threads > 1:
query = filter_thread_work(session=session, query=query, total_threads=total_threads,
thread_id=thread_id, hash_variable='child_name')
replicas = {}
for child_scope, child_name, rse_id in query:
if (child_scope, child_name) in replicas:
if rse_id:
replicas[(child_scope, child_name)].append(rse_id)
else:
replicas[(child_scope, child_name)] = []
if rse_id:
replicas[(child_scope, child_name)].append(rse_id)
return replicas
@read_session
def get_replica_atime(replica, session=None):
"""
Get the accessed_at timestamp for a replica. Just for testing.
:param replicas: List of dictionaries {scope, name, rse_id, path}
:param session: Database session to use.
:returns: A datetime timestamp with the last access time.
"""
return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\
with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0]
@transactional_session
def touch_collection_replicas(collection_replicas, session=None):
"""
Update the accessed_at timestamp of the given collection replicas.
:param collection_replicas: the list of collection replicas.
:param session: The database session in use.
:returns: True, if successful, False otherwise.
"""
now = datetime.utcnow()
for collection_replica in collection_replicas:
try:
session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\
update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False)
except DatabaseError:
return False
return True
@stream_session
def list_dataset_replicas(scope, name, deep=False, session=None):
"""
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param session: Database session to use.
:returns: A list of dictionaries containing the dataset replicas
with associated metrics and timestamps
"""
if not deep:
query = session.query(models.CollectionReplica.scope,
models.CollectionReplica.name,
models.RSE.rse,
models.CollectionReplica.rse_id,
models.CollectionReplica.bytes,
models.CollectionReplica.length,
models.CollectionReplica.available_bytes,
models.CollectionReplica.available_replicas_cnt.label("available_length"),
models.CollectionReplica.state,
models.CollectionReplica.created_at,
models.CollectionReplica.updated_at,
models.CollectionReplica.accessed_at)\
.filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\
.filter(models.CollectionReplica.rse_id == models.RSE.id)\
.filter(models.RSE.deleted == false())
for row in query:
yield row._asdict()
else:
# find maximum values
content_query = session\
.query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"),
func.count().label("length"))\
.with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\
.filter(models.DataIdentifierAssociation.scope == scope)\
.filter(models.DataIdentifierAssociation.name == name)
bytes, length = 0, 0
for row in content_query:
bytes, length = row.bytes, row.length
# find archives that contain files of the requested dataset
sub_query_archives = session\
.query(models.DataIdentifierAssociation.scope.label('dataset_scope'),
models.DataIdentifierAssociation.name.label('dataset_name'),
models.DataIdentifierAssociation.bytes.label('file_bytes'),
models.ConstituentAssociation.child_scope.label('file_scope'),
models.ConstituentAssociation.child_name.label('file_name'),
models.RSEFileAssociation.scope.label('replica_scope'),
models.RSEFileAssociation.name.label('replica_name'),
models.RSE.rse,
models.RSE.id.label('rse_id'),
models.RSEFileAssociation.created_at,
models.RSEFileAssociation.accessed_at,
models.RSEFileAssociation.updated_at)\
.filter(models.DataIdentifierAssociation.scope == scope)\
.filter(models.DataIdentifierAssociation.name == name)\
.filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\
.filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\
.filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\
.filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\
.filter(models.RSEFileAssociation.rse_id == models.RSE.id)\
.filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\
.filter(models.RSE.deleted == false())\
.subquery()
# count the metrics
group_query_archives = session\
.query(sub_query_archives.c.dataset_scope,
sub_query_archives.c.dataset_name,
sub_query_archives.c.file_scope,
sub_query_archives.c.file_name,
sub_query_archives.c.rse_id,
sub_query_archives.c.rse,
func.sum(sub_query_archives.c.file_bytes).label('file_bytes'),
func.min(sub_query_archives.c.created_at).label('created_at'),
func.max(sub_query_archives.c.updated_at).label('updated_at'),
func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\
.group_by(sub_query_archives.c.dataset_scope,
sub_query_archives.c.dataset_name,
sub_query_archives.c.file_scope,
sub_query_archives.c.file_name,
sub_query_archives.c.rse_id,
sub_query_archives.c.rse)\
.subquery()
# bring it in the same column state as the non-archive query
full_query_archives = session\
.query(group_query_archives.c.dataset_scope.label('scope'),
group_query_archives.c.dataset_name.label('name'),
group_query_archives.c.rse_id,
group_query_archives.c.rse,
func.sum(group_query_archives.c.file_bytes).label('available_bytes'),
func.count().label('available_length'),
func.min(group_query_archives.c.created_at).label('created_at'),
func.max(group_query_archives.c.updated_at).label('updated_at'),
func.max(group_query_archives.c.accessed_at).label('accessed_at'))\
.group_by(group_query_archives.c.dataset_scope,
group_query_archives.c.dataset_name,
group_query_archives.c.rse_id,
group_query_archives.c.rse)
# find the non-archive dataset replicas
sub_query = session\
.query(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name,
models.RSEFileAssociation.rse_id,
func.sum(models.RSEFileAssociation.bytes).label("available_bytes"),
func.count().label("available_length"),
func.min(models.RSEFileAssociation.created_at).label("created_at"),
func.max(models.RSEFileAssociation.updated_at).label("updated_at"),
func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\
.with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\
.filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\
.filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\
.filter(models.DataIdentifierAssociation.scope == scope)\
.filter(models.DataIdentifierAssociation.name == name)\
.filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\
.group_by(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name,
models.RSEFileAssociation.rse_id)\
.subquery()
query = session\
.query(sub_query.c.scope,
sub_query.c.name,
sub_query.c.rse_id,
models.RSE.rse,
sub_query.c.available_bytes,
sub_query.c.available_length,
sub_query.c.created_at,
sub_query.c.updated_at,
sub_query.c.accessed_at)\
.filter(models.RSE.id == sub_query.c.rse_id)\
.filter(models.RSE.deleted == false())
# join everything together
final_query = query.union_all(full_query_archives)
for row in final_query.all():
replica = row._asdict()
replica['length'], replica['bytes'] = length, bytes
if replica['length'] == row.available_length:
replica['state'] = ReplicaState.AVAILABLE
else:
replica['state'] = ReplicaState.UNAVAILABLE
yield replica
@stream_session
def list_dataset_replicas_bulk(names_by_intscope, session=None):
"""
:param names_by_intscope: The dictionary of internal scopes pointing at the list of names.
:param session: Database session to use.
:returns: A list of dictionaries containing the dataset replicas
with associated metrics and timestamps
"""
condition = []
for scope in names_by_intscope:
condition.append(and_(models.CollectionReplica.scope == scope,
models.CollectionReplica.name.in_(names_by_intscope[scope])))
try:
# chunk size refers to the number of different scopes, see above
for chunk in chunks(condition, 10):
query = session.query(models.CollectionReplica.scope,
models.CollectionReplica.name,
models.RSE.rse,
models.CollectionReplica.rse_id,
models.CollectionReplica.bytes,
models.CollectionReplica.length,
models.CollectionReplica.available_bytes,
models.CollectionReplica.available_replicas_cnt.label("available_length"),
models.CollectionReplica.state,
models.CollectionReplica.created_at,
models.CollectionReplica.updated_at,
models.CollectionReplica.accessed_at) \
.filter(models.CollectionReplica.did_type == DIDType.DATASET) \
.filter(models.CollectionReplica.rse_id == models.RSE.id) \
.filter(or_(*chunk)) \
.filter(models.RSE.deleted == false())
for row in query:
yield row._asdict()
except NoResultFound:
raise exception.DataIdentifierNotFound('No Data Identifiers found')
@stream_session
def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log):
"""
List dataset replicas for a DID (scope:name) using the
Virtual Placement service.
NOTICE: This is an RnD function and might change or go away at any time.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param session: Database session to use.
:returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites
"""
vp_endpoint = get_vp_endpoint()
vp_replies = ['other']
nr_replies = 5 # force limit reply size
if not vp_endpoint:
return vp_replies
try:
vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name),
verify=False,
timeout=1)
if vp_replies.status_code == 200:
vp_replies = vp_replies.json()
else:
vp_replies = ['other']
except requests.exceptions.RequestException as re:
logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re))
vp_replies = ['other']
if vp_replies != ['other']:
# check that there is at least one regular replica
# that is not on tape and has a protocol with scheme "root"
# and can be accessed from WAN
accessible_replica_exists = False
for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session):
rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session)
if rse_info['rse_type'] == 'TAPE':
continue
for prot in rse_info['protocols']:
if prot['scheme'] == 'root' and prot['domains']['wan']['read']:
accessible_replica_exists = True
break
if accessible_replica_exists is True:
break
if accessible_replica_exists is True:
for vp_reply in vp_replies:
yield {'vp': True, 'site': vp_reply}
@stream_session
def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None):
"""
List datasets at a RSE.
:param rse: the rse id.
:param filters: dictionary of attributes by which the results should be filtered.
:param limit: limit number.
:param session: Database session to use.
:returns: A list of dict dataset replicas
"""
query = session.query(models.CollectionReplica.scope,
models.CollectionReplica.name,
models.RSE.id.label('rse_id'),
models.RSE.rse,
models.CollectionReplica.bytes,
models.CollectionReplica.length,
models.CollectionReplica.available_bytes,
models.CollectionReplica.available_replicas_cnt.label("available_length"),
models.CollectionReplica.state,
models.CollectionReplica.created_at,
models.CollectionReplica.updated_at,
models.CollectionReplica.accessed_at)\
.filter_by(did_type=DIDType.DATASET)\
.filter(models.CollectionReplica.rse_id == models.RSE.id)\
.filter(models.RSE.id == rse_id)\
.filter(models.RSE.deleted == false())
for (k, v) in filters and filters.items() or []:
if k == 'name' or k == 'scope':
v_str = v if k != 'scope' else v.internal
if '*' in v_str or '%' in v_str:
if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically
query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%')))
else:
query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\'))
else:
query = query.filter(getattr(models.CollectionReplica, k) == v)
# hints ?
elif k == 'created_before':
created_before = str_to_date(v)
query = query.filter(models.CollectionReplica.created_at <= created_before)
elif k == 'created_after':
created_after = str_to_date(v)
query = query.filter(models.CollectionReplica.created_at >= created_after)
else:
query = query.filter(getattr(models.CollectionReplica, k) == v)
if limit:
query = query.limit(limit)
for row in query:
yield row._asdict()
@transactional_session
def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None):
"""
Get update request for collection replicas.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum numberws to return.
:param session: Database session in use.
:returns: List of update requests for collection replicas.
"""
# Delete update requests which do not have collection_replicas
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None)
& ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503
models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False)
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None)
& ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503
models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope,
models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False)
# Delete duplicates
if session.bind.dialect.name == 'oracle':
schema = ''
if BASE.metadata.schema:
schema = BASE.metadata.schema + '.'
session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema))
elif session.bind.dialect.name == 'mysql':
subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\
group_by(models.UpdatedCollectionReplica.scope,
models.UpdatedCollectionReplica.name,
models.UpdatedCollectionReplica.rse_id).subquery()
subquery2 = session.query(subquery1.c.max_id).subquery()
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False)
else:
replica_update_requests = session.query(models.UpdatedCollectionReplica)
update_requests_with_rse_id = []
update_requests_without_rse_id = []
duplicate_request_ids = []
for update_request in replica_update_requests.all():
if update_request.rse_id is not None:
small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id}
if small_request not in update_requests_with_rse_id:
update_requests_with_rse_id.append(small_request)
else:
duplicate_request_ids.append(update_request.id)
continue
else:
small_request = {'name': update_request.name, 'scope': update_request.scope}
if small_request not in update_requests_without_rse_id:
update_requests_without_rse_id.append(small_request)
else:
duplicate_request_ids.append(update_request.id)
continue
for chunk in chunks(duplicate_request_ids, 100):
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False)
query = session.query(models.UpdatedCollectionReplica)
if limit:
query = query.limit(limit)
return [update_request.to_dict() for update_request in query.all()]
@transactional_session
def update_collection_replica(update_request, session=None):
"""
Update a collection replica.
:param update_request: update request from the upated_col_rep table.
"""
if update_request['rse_id'] is not None:
# Check one specific dataset replica
ds_length = 0
old_available_replicas = 0
ds_bytes = 0
ds_replica_state = None
ds_available_bytes = 0
available_replicas = 0
try:
collection_replica = session.query(models.CollectionReplica)\
.filter_by(scope=update_request['scope'],
name=update_request['name'],
rse_id=update_request['rse_id'])\
.one()
ds_length = collection_replica.length
old_available_replicas = collection_replica.available_replicas_cnt
ds_bytes = collection_replica.bytes
except NoResultFound:
pass
try:
file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\
.filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope,
models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.name == update_request['name'],
models.RSEFileAssociation.rse_id == update_request['rse_id'],
models.RSEFileAssociation.state == ReplicaState.AVAILABLE,
update_request['scope'] == models.DataIdentifierAssociation.scope)\
.with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)),
label('available_replicas', func.count()))\
.one()
available_replicas = file_replica.available_replicas
ds_available_bytes = file_replica.ds_available_bytes
except NoResultFound:
pass
if available_replicas >= ds_length:
ds_replica_state = ReplicaState.AVAILABLE
else:
ds_replica_state = ReplicaState.UNAVAILABLE
if old_available_replicas > 0 and available_replicas == 0:
session.query(models.CollectionReplica).filter_by(scope=update_request['scope'],
name=update_request['name'],
rse_id=update_request['rse_id'])\
.delete()
else:
updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'],
name=update_request['name'],
rse_id=update_request['rse_id'])\
.one()
updated_replica.state = ds_replica_state
updated_replica.available_replicas_cnt = available_replicas
updated_replica.length = ds_length
updated_replica.bytes = ds_bytes
updated_replica.available_bytes = ds_available_bytes
else:
# Check all dataset replicas
association = session.query(models.DataIdentifierAssociation)\
.filter_by(scope=update_request['scope'],
name=update_request['name'])\
.with_entities(label('ds_length', func.count()),
label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\
.one()
ds_length = association.ds_length
ds_bytes = association.ds_bytes
ds_replica_state = None
collection_replicas = session.query(models.CollectionReplica)\
.filter_by(scope=update_request['scope'], name=update_request['name'])\
.all()
for collection_replica in collection_replicas:
if ds_length:
collection_replica.length = ds_length
else:
collection_replica.length = 0
if ds_bytes:
collection_replica.bytes = ds_bytes
else:
collection_replica.bytes = 0
file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\
.filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope,
models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.name == update_request['name'],
models.RSEFileAssociation.state == ReplicaState.AVAILABLE,
update_request['scope'] == models.DataIdentifierAssociation.scope)\
.with_entities(models.RSEFileAssociation.rse_id,
label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)),
label('available_replicas', func.count()))\
.group_by(models.RSEFileAssociation.rse_id)\
.all()
for file_replica in file_replicas:
if file_replica.available_replicas >= ds_length:
ds_replica_state = ReplicaState.AVAILABLE
else:
ds_replica_state = ReplicaState.UNAVAILABLE
collection_replica = session.query(models.CollectionReplica)\
.filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\
.first()
if collection_replica:
collection_replica.state = ds_replica_state
collection_replica.available_replicas_cnt = file_replica.available_replicas
collection_replica.available_bytes = file_replica.ds_available_bytes
session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete()
@read_session
def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None):
"""
Returns a list of bad PFNs
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this minos instance.
:param total_threads: The total number of minos threads.
:param session: The database session in use.
returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}
"""
result = []
query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at)
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path')
query.order_by(models.BadPFNs.created_at)
query = query.limit(limit)
for path, state, reason, account, expires_at in query.yield_per(1000):
result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at})
return result
@transactional_session
def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None):
"""
Bulk add new bad replicas.
:param replicas: the list of bad replicas.
:param account: The account who declared the bad replicas.
:param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE).
:param session: The database session in use.
:returns: True is successful.
"""
for replica in replicas:
insert_new_row = True
if state == BadFilesStatus.TEMPORARY_UNAVAILABLE:
query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state)
if query.count():
query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False)
insert_new_row = False
if insert_new_row:
new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason,
state=state, account=account, bytes=None, expires_at=expires_at)
new_bad_replica.save(session=session, flush=False)
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!')
raise exception.RucioException(error.args)
return True
@transactional_session
def bulk_delete_bad_pfns(pfns, session=None):
"""
Bulk delete bad PFNs.
:param pfns: the list of new files.
:param session: The database session in use.
:returns: True is successful.
"""
pfn_clause = []
for pfn in pfns:
pfn_clause.append(models.BadPFNs.path == pfn)
for chunk in chunks(pfn_clause, 100):
query = session.query(models.BadPFNs).filter(or_(*chunk))
query.delete(synchronize_session=False)
return True
@transactional_session
def bulk_delete_bad_replicas(bad_replicas, session=None):
"""
Bulk delete bad replica.
:param bad_replicas: The list of bad replicas to delete (Dictionaries).
:param session: The database session in use.
:returns: True is successful.
"""
replica_clause = []
for replica in bad_replicas:
replica_clause.append(and_(models.BadReplicas.scope == replica['scope'],
models.BadReplicas.name == replica['name'],
models.BadReplicas.rse_id == replica['rse_id'],
models.BadReplicas.state == replica['state']))
for chunk in chunks(replica_clause, 100):
session.query(models.BadReplicas).filter(or_(*chunk)).\
delete(synchronize_session=False)
return True
@transactional_session
def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None):
"""
Add bad PFNs.
:param pfns: the list of new files.
:param account: The account who declared the bad replicas.
:param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE.
:param reason: A string describing the reason of the loss.
:param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.
:param session: The database session in use.
:returns: True is successful.
"""
if isinstance(state, string_types):
rep_state = BadPFNStatus[state]
else:
rep_state = state
pfns = clean_surls(pfns)
for pfn in pfns:
new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at)
new_pfn = session.merge(new_pfn)
new_pfn.save(session=session, flush=False)
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.Duplicate('One PFN already exists!')
raise exception.RucioException(error.args)
return True
@read_session
def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None):
"""
List the expired temporary unavailable replicas
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: The maximum number of replicas returned.
:param session: The database session in use.
"""
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\
filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\
filter(models.BadReplicas.expires_at < datetime.utcnow()).\
with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\
order_by(models.BadReplicas.expires_at)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
query = query.limit(limit)
return query.all()
@read_session
def get_replicas_state(scope=None, name=None, session=None):
"""
Method used by the necromancer to get all the replicas of a DIDs
:param scope: The scope of the file.
:param name: The name of the file.
:param session: The database session in use.
:returns: A dictionary with the list of states as keys and the rse_ids as value
"""
query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name)
states = {}
for res in query.all():
rse_id, state = res
if state not in states:
states[state] = []
states[state].append(rse_id)
return states
@read_session
def get_suspicious_files(rse_expression, filter=None, **kwargs):
"""
Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date,
present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list.
Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or
be declared as <is_suspicious> in the bad_replicas table.
Keyword Arguments:
:param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago.
:param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0.
:param rse_expression: The RSE expression where the replicas are located.
:param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}
:param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list
was declared for a replica since younger_than date. Allowed values
= ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS').
:param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE
than the one in the bad_replicas table will be taken into account. Default value = False.
:param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False.
:param session: The database session in use. Default value = None.
:returns: a list of replicas:
[{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...]
"""
younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10))
nattempts = kwargs.get("nattempts", 0)
session = kwargs.get("session", None)
exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D'])
available_elsewhere = kwargs.get("available_elsewhere", False)
is_suspicious = kwargs.get("is_suspicious", False)
# only for the 2 web api used parameters, checking value types and assigning the default values
if not isinstance(nattempts, int):
nattempts = 0
if not isinstance(younger_than, datetime):
younger_than = datetime.now() - timedelta(days=10)
# assembling exclude_states_clause
exclude_states_clause = []
for state in exclude_states:
exclude_states_clause.append(BadFilesStatus(state))
# making aliases for bad_replicas and replicas tables
bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias')
replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias')
# assembling the selection rse_clause
rse_clause = []
if rse_expression:
parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session)
for rse in parsedexp:
rse_clause.append(models.RSEFileAssociation.rse_id == rse['id'])
# query base
query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\
.filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id,
models.RSEFileAssociation.scope == bad_replicas_alias.scope,
models.RSEFileAssociation.name == bad_replicas_alias.name,
bad_replicas_alias.created_at >= younger_than)
if is_suspicious:
query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS)
if rse_clause:
query = query.filter(or_(*rse_clause))
if available_elsewhere:
available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE,
replicas_alias.scope == bad_replicas_alias.scope,
replicas_alias.name == bad_replicas_alias.name,
replicas_alias.rse_id != bad_replicas_alias.rse_id)))
query = query.filter(available_replica)
# it is required that the selected replicas
# do not occur as BAD/DELETED/LOST/RECOVERED/...
# in the bad_replicas table during the same time window.
other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope,
models.BadReplicas.name == bad_replicas_alias.name,
models.BadReplicas.created_at >= younger_than,
models.BadReplicas.rse_id == bad_replicas_alias.rse_id,
models.BadReplicas.state.in_(exclude_states_clause))))
query = query.filter(not_(other_states_present))
# finally, the results are grouped by RSE, scope, name and required to have
# at least 'nattempts' occurrences in the result of the query per replica
query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all()
# print(query)
# translating the rse_id to RSE name and assembling the return list of dictionaries
result = []
rses = {}
for cnt, scope, name, rse_id, created_at in query_result:
if rse_id not in rses:
rse = get_rse_name(rse_id=rse_id, session=session)
rses[rse_id] = rse
result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at})
return result
@transactional_session
def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None):
"""
Sets a tombstone on a replica.
:param rse_id: ID of RSE.
:param scope: scope of the replica DID.
:param name: name of the replica DID.
:param tombstone: the tombstone to set. Default is OBSOLETE
:param session: database session in use.
"""
rowcount = session.query(models.RSEFileAssociation).filter(
and_(
models.RSEFileAssociation.rse_id == rse_id,
models.RSEFileAssociation.name == name,
models.RSEFileAssociation.scope == scope,
~exists().where(
and_(
models.ReplicaLock.rse_id == rse_id,
models.ReplicaLock.name == name,
models.ReplicaLock.scope == scope,
)
)
)
) \
.with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \
.update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False)
if rowcount == 0:
try:
session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one()
raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))
except NoResultFound:
raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))
@read_session
def get_RSEcoverage_of_dataset(scope, name, session=None):
"""
Get total bytes present on RSEs
:param scope: Scope of the dataset
:param name: Name of the dataset
:param session: The db session.
:return: Dictionary { rse_id : <total bytes present at rse_id> }
"""
query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes))
query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED,
))
query = query.group_by(models.RSEFileAssociation.rse_id)
result = {}
for rse_id, total in query:
if total:
result[rse_id] = total
return result
| lib/rucio/core/replica.py | 171,357 | Bulk add new dids.
:param dids: the list of files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
Bulk add new dids.
:param dids: the list of new files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
Bulk add new dids.
:param rse_id: the RSE id.
:param dids: the list of files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
Perform update of collections/archive associations/dids after the removal of their replicas
:param rse_id: the rse id
:param files: list of files whose replica got deleted
:param session: The database session in use.
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param rse_id: The RSE id.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param status: Either BAD or SUSPICIOUS.
:param scheme: The scheme of the PFNs.
:param session: The database session in use.
Internal method to check if a replica exists at a given site.
:param rse_id: The RSE id.
:param scope: The scope of the file.
:param name: The name of the file.
:param path: The path of the replica.
:param session: The database session in use.
List file replicas for archive constituents.
List file replicas for a list of datasets.
:param session: The database session in use.
List file replicas for a list of files.
:param session: The database session in use.
Select n random elements from the generator
Resolve list of DIDs into a list of conditions.
:param dids: The list of data identifiers (DIDs).
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param ignore_availability: Ignore the RSE blocklisting.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param resolve_archives: When set to true, find archives which contain the replicas.
:param session: The database session in use.
Declare a list of bad replicas.
:param dids: The list of DIDs.
:param rse_id: The RSE id.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param state: BadFilesStatus.BAD
:param session: The database session in use.
Add bad PFNs.
:param pfns: the list of new files.
:param account: The account who declared the bad replicas.
:param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE.
:param reason: A string describing the reason of the loss.
:param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.
:param session: The database session in use.
:returns: True is successful.
Add File replica.
:param rse_id: the rse id.
:param scope: the scope name.
:param name: The data identifier name.
:param bytes: the size of the file.
:param account: The account owner.
:param md5: The md5 checksum.
:param adler32: The adler32 checksum.
:param pfn: Physical file name (for nondeterministic rse).
:param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary.
:param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].
:param tombstone: If True, create replica with a tombstone.
:param session: The database session in use.
:returns: True is successful.
Bulk add file replicas.
:param rse_id: The RSE id.
:param files: The list of files.
:param account: The account owner.
:param ignore_availability: Ignore the RSE blocklisting.
:param session: The database session in use.
:returns: True is successful.
Bulk add new bad replicas.
:param replicas: the list of bad replicas.
:param account: The account who declared the bad replicas.
:param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE).
:param session: The database session in use.
:returns: True is successful.
Bulk delete bad PFNs.
:param pfns: the list of new files.
:param session: The database session in use.
:returns: True is successful.
Bulk delete bad replica.
:param bad_replicas: The list of bad replicas to delete (Dictionaries).
:param session: The database session in use.
:returns: True is successful.
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param status: The status of the file (SUSPICIOUS or BAD).
:param session: The database session in use.
Delete file replicas.
:param rse_id: the rse id.
:param files: the list of files to delete.
:param ignore_availability: Ignore the RSE blocklisting.
:param session: The database session in use.
Get total bytes present on RSEs
:param scope: Scope of the dataset
:param name: Name of the dataset
:param session: The db session.
:return: Dictionary { rse_id : <total bytes present at rse_id> }
Get file replicas for a specific scope:name.
:param scope: The scope of the did.
:param name: The name of the did.
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param session: The db session in use.
:returns: List of SQLAlchemy Replica Objects
Get file replicas for all files of a dataset.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param total_threads: Total threads
:param thread_id: This thread
:param session: The db session in use.
:returns: (files in dataset, replicas in dataset)
Returns a list of bad PFNs
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this minos instance.
:param total_threads: The total number of minos threads.
:param session: The database session in use.
returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}
List the bad file replicas summary. Method used by the rucio-ui.
:param rse_expression: The RSE expression.
:param from_date: The start date.
:param to_date: The end date.
:param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}
:param session: The database session in use.
Get update request for collection replicas.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum numberws to return.
:param session: Database session in use.
:returns: List of update requests for collection replicas.
Get the DIDs associated to a PFN on one given RSE
:param pfns: The list of PFNs.
:param rse_id: The RSE id.
:param vo: The VO to get DIDs from.
:param session: The database session in use.
:returns: A dictionary {pfn: {'scope': scope, 'name': name}}
for a givent cache site and filename, return address of the cache node that
should be prefixed.
:param cache_site: Cache site
:param filename: Filename
Get the RSE associated to a list of PFNs.
:param pfns: The list of pfn.
:param vo: The VO to find RSEs at.
:param session: The database session in use.
:returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}.
Get File replica.
:param rse_id: The RSE Id.
:param scope: the scope name.
:param name: The data identifier name.
:param session: The database session in use.
:returns: A dictionary with the list of replica attributes.
Get the accessed_at timestamp for a replica. Just for testing.
:param replicas: List of dictionaries {scope, name, rse_id, path}
:param session: Database session to use.
:returns: A datetime timestamp with the last access time.
Method used by the necromancer to get all the replicas of a DIDs
:param scope: The scope of the file.
:param name: The name of the file.
:param session: The database session in use.
:returns: A dictionary with the list of states as keys and the rse_ids as value
Get soruce replicas for a specific scope:name.
:param scope: The scope of the did.
:param name: The name of the did.
:param soruce_rses: Possible RSE_ids to filter on.
:param session: The db session in use.
:returns: List of SQLAlchemy Replica Objects
Get file replicas for all files of a dataset.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param source_rses: Possible source RSE_ids to filter on.
:param total_threads: Total threads
:param thread_id: This thread
:param session: The db session in use.
:returns: (files in dataset, replicas in dataset)
Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date,
present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list.
Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or
be declared as <is_suspicious> in the bad_replicas table.
Keyword Arguments:
:param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago.
:param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0.
:param rse_expression: The RSE expression where the replicas are located.
:param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}
:param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list
was declared for a replica since younger_than date. Allowed values
= ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS').
:param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE
than the one in the bad_replicas table will be taken into account. Default value = False.
:param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False.
:param session: The database session in use. Default value = None.
:returns: a list of replicas:
[{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...]
VP endpoint is the Virtual Placement server.
Once VP is integrated in Rucio it won't be needed.
List RSE File replicas with no locks.
:param limit: Number of replicas returned.
:param bytes: The amount of needed bytes.
:param rse_id: The rse_id.
:param delay_seconds: The delay to query replicas in BEING_DELETED state
:param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone
:param session: The database session in use.
:returns: a list of dictionary replica.
List RSE File replicas with no locks.
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this necromancer.
:param total_threads: The total number of threads of all necromancers.
:param session: The database session in use.
:returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}.
List the bad file replicas history. Method only used by necromancer
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this necromancer.
:param total_threads: The total number of threads of all necromancers.
:param session: The database session in use.
List the bad file replicas history states. Method used by the rucio-ui.
:param state: The state of the file (SUSPICIOUS or BAD).
:param rse_id: The RSE id.
:param younger_than: datetime object to select bad replicas younger than this date.
:param older_than: datetime object to select bad replicas older than this date.
:param limit: The maximum number of replicas returned.
:param vo: The VO to find replicas from.
:param session: The database session in use.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param session: Database session to use.
:returns: A list of dictionaries containing the dataset replicas
with associated metrics and timestamps
:param names_by_intscope: The dictionary of internal scopes pointing at the list of names.
:param session: Database session to use.
:returns: A list of dictionaries containing the dataset replicas
with associated metrics and timestamps
List dataset replicas for a DID (scope:name) using the
Virtual Placement service.
NOTICE: This is an RnD function and might change or go away at any time.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param session: Database session to use.
:returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites
List datasets at a RSE.
:param rse: the rse id.
:param filters: dictionary of attributes by which the results should be filtered.
:param limit: limit number.
:param session: Database session to use.
:returns: A list of dict dataset replicas
List the expired temporary unavailable replicas
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: The maximum number of replicas returned.
:param session: The database session in use.
List file replicas for a list of data identifiers (DIDs).
:param dids: The list of data identifiers (DIDs).
:param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param request_id: ID associated with the request for debugging.
:param ignore_availability: Ignore the RSE blocklisting.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs.
:param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}
:param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan']
:param sign_urls: If set, will sign the PFNs if necessary.
:param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.
:param resolve_archives: When set to true, find archives which contain the replicas.
:param resolve_parents: When set to true, find all parent datasets which contain the replicas.
:param updated_after: datetime (UTC time), only return replicas updated after this time
:param session: The database session in use.
Sets a tombstone on a replica.
:param rse_id: ID of RSE.
:param scope: scope of the replica DID.
:param name: name of the replica DID.
:param tombstone: the tombstone to set. Default is OBSOLETE
:param session: database session in use.
Update the accessed_at timestamp of the given collection replicas.
:param collection_replicas: the list of collection replicas.
:param session: The database session in use.
:returns: True, if successful, False otherwise.
Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked.
:param replica: a dictionary with the information of the affected replica.
:param session: The database session in use.
:returns: True, if successful, False otherwise.
Update the bad file replicas history. Method only used by necromancer
:param dids: The list of DIDs.
:param rse_id: The rse_id.
:param session: The database session in use.
Update a collection replica.
:param update_request: update request from the upated_col_rep table.
Update File replica information and state.
:param rse_id: the rse id.
:param scope: the tag name.
:param name: The data identifier name.
:param state: The state.
:param session: The database session in use.
Update File replica information and state.
:param replicas: The list of replicas.
:param nowait: Nowait parameter for the for_update queries.
:param session: The database session in use.
-*- coding: utf-8 -*- Copyright 2013-2021 CERN Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Authors: - Vincent Garonne <vincent.garonne@cern.ch>, 2013-2018 - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2020 - Ralph Vigne <ralph.vigne@cern.ch>, 2013-2014 - Martin Barisits <martin.barisits@cern.ch>, 2013-2021 - Mario Lassnig <mario.lassnig@cern.ch>, 2014-2021 - David Cameron <david.cameron@cern.ch>, 2014 - Thomas Beermann <thomas.beermann@cern.ch>, 2014-2021 - Wen Guan <wen.guan@cern.ch>, 2014-2015 - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019 - Dimitrios Christidis <dimitrios.christidis@cern.ch>, 2019-2021 - Robert Illingworth <illingwo@fnal.gov>, 2019 - James Perry <j.perry@epcc.ed.ac.uk>, 2019 - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019 - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019 - Ilija Vukotic <ivukotic@cern.ch>, 2020-2021 - Brandon White <bjwhite@fnal.gov>, 2019 - Tomas Javurek <tomas.javurek@cern.ch>, 2020 - Luc Goossens <luc.goossens@cern.ch>, 2020 - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020 - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020 - Eric Vaandering <ewv@fnal.gov>, 2020-2021 - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021 - Radu Carpa <radu.carpa@cern.ch>, 2021 - Gabriele Fronzé <sucre.91@hotmail.it>, 2021 Ensure we limit results to current VO even if we don't specify an RSE expression To be added : HINTS Now we check that the replica is not already declared bad Check if the replica is still there If yes, and replica state is AVAILABLE, update BadReplicas If the replica state is not AVAILABLE check if other replicas for the same file are still there. No replicas are available for this file. Reset the replica state to BAD Here that means that the file has not been processed by the necro. Just pass We end-up here if the replica is not registered anymore on the RSE If yes, the final state depends on DIDAvailability For completness, it shouldn't happen. If no, the replica is marked as LOST in BadFilesStatus WARNING : this part is ATLAS specific and must be changed For BAD file, we modify the replica state, not for suspicious there shouldn't be any exceptions since all replicas exist For BAD file, we modify the replica state, not for suspicious there shouldn't be any exceptions since all replicas exist The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. WARNING : this part is ATLAS specific and must be changed Accumulate all the dids which were requested explicitly (not via a container/dataset). If any replicas for these dids will be found latter, the associated did will be removed from the list, leaving, at the end, only the requested dids which didn't have any replicas at all. pylint: disable=no-member Container pass-through the data unchanged A "reservoir sampling" algorithm: Copy the N first files from the generator. After that, following element may be picked to substitute one of the previously selected element with a probability which decreases as the number of encountered elements grows. iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory sort by scope, name we need to retain knowledge of the original domain selection by the user in case we have to loop over replicas with a potential outgoing proxy find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) do not hard fail if site cannot be resolved or is empty reset the domain selection to original user's choice (as this could get overwritten each iteration) assign scheme priorities, and don't forget to exclude disabled protocols 0 in RSE protocol definition = disabled, 1 = highest priority select the lan door in autoselect mode, otherwise use the wan door no need to be verbose no need to be verbose get pfns If the current "replica" is a constituent inside an archive, we must construct the pfn for the parent (archive) file and append the xrdcl.unzip query string to it. PFN is cachable No cache entry scope:name found for this protocol do we need to sign the URLs? server side root proxy handling if location is set. supports root and http destinations cannot be pushed into protocols because we need to lookup rse attributes. ultra-conservative implementation. is the RSE site-configured? does it match with the client? if not, it's an outgoing connection therefore the internal proxy must be prepended print('client', client_location['site'], 'has cache:', cache_site) print('filename', name) print('site:', client_location['site'], 'has no cache') print('lets check if it has defined an internal root proxy ') section option empty string to circumvent exception TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. For now -> skip prepending XCache for GCS. ATLAS HACK don't forget to mangle gfal-style davs URL into generic https URL PFNs don't have concepts, therefore quickly encapsulate in a tuple ('pfn', 'domain', 'priority', 'client_extract') xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. never end up here extract properly the pfn from the tuple quick exit, but don't forget to set the total order for the priority --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically and use 1-indexing to be compatible with metalink extract properly the pfn from the tuple set the total order for the priority --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically and use 1-indexing to be compatible with metalink don't forget to resolve parents for the last replica also sort the pfns inside the rse structure Tolerate None for tombstone_delay Check for the replicas already available {scheme: [pfns], scheme: [pfns]} Check that the pfns match to the expected pfns Check wan first Check lan Registration always with wan WARNING : This should not be necessary since that would mean the replica is used as a source. Decrease RSE counter Schedule update of all collections containing this file and having a collection replica in the RSE If the file doesn't have any replicas anymore, we should perform cleanups of objects related to this file. However, if the file is "lost", it's removal wasn't intentional, so we want to skip deleting the metadata here. Perform cleanups: 1) schedule removal of this file from all parent datasets 2) schedule removal of this file from the DID table 3) if the file is an archive, schedule cleanup on the files from inside the archive Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica Delete did from the content for the last did Schedule removal of child file/dataset/container from the parent dataset/container Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore If the parent dataset/container becomes empty as a result of the child removal (it was the last children), metadata cleanup has to be done: 1) Schedule to remove the replicas of this empty collection NOQA 2) Schedule removal of this empty collection from its own parent collections 3) Schedule removal of the entry from the DIDs table NOQA get the list of modified parent scope, name Update incomplete state delete empty dids Remove Archive Constituents Remove rules in Waiting for approval or Suspended Remove DID Metadata Set is_archive = false on collections which don't have archive children anymore Hack to get pep8 happy... Check if more than one replica is available If this is the last replica, check if there are some requests remember scope, name and rse Exclude replicas use as sources Get content Get replicas and lock them find maximum values find archives that contain files of the requested dataset count the metrics bring it in the same column state as the non-archive query find the non-archive dataset replicas join everything together chunk size refers to the number of different scopes, see above force limit reply size check that there is at least one regular replica that is not on tape and has a protocol with scheme "root" and can be accessed from WAN PostgreSQL escapes automatically hints ? Delete update requests which do not have collection_replicas NOQA: W503 NOQA: W503 Delete duplicates Check one specific dataset replica Check all dataset replicas only for the 2 web api used parameters, checking value types and assigning the default values assembling exclude_states_clause making aliases for bad_replicas and replicas tables assembling the selection rse_clause query base it is required that the selected replicas do not occur as BAD/DELETED/LOST/RECOVERED/... in the bad_replicas table during the same time window. finally, the results are grouped by RSE, scope, name and required to have at least 'nattempts' occurrences in the result of the query per replica print(query) translating the rse_id to RSE name and assembling the return list of dictionaries | 25,928 | en | 0.768203 |
import numpy as np
import pytest
from sklearn.datasets import make_classification, make_regression
# To use this experimental feature, we need to explicitly ask for it:
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
@pytest.mark.parametrize(
'params, err_msg',
[({'loss': 'blah'}, 'Loss blah is not supported for'),
({'learning_rate': 0}, 'learning_rate=0 must be strictly positive'),
({'learning_rate': -1}, 'learning_rate=-1 must be strictly positive'),
({'max_iter': 0}, 'max_iter=0 must not be smaller than 1'),
({'max_leaf_nodes': 0}, 'max_leaf_nodes=0 should not be smaller than 2'),
({'max_leaf_nodes': 1}, 'max_leaf_nodes=1 should not be smaller than 2'),
({'max_depth': 0}, 'max_depth=0 should not be smaller than 2'),
({'max_depth': 1}, 'max_depth=1 should not be smaller than 2'),
({'min_samples_leaf': 0}, 'min_samples_leaf=0 should not be smaller'),
({'l2_regularization': -1}, 'l2_regularization=-1 must be positive'),
({'max_bins': 1}, 'max_bins=1 should be no smaller than 2 and no larger'),
({'max_bins': 257}, 'max_bins=257 should be no smaller than 2 and no'),
({'n_iter_no_change': -1}, 'n_iter_no_change=-1 must be positive'),
({'validation_fraction': -1}, 'validation_fraction=-1 must be strictly'),
({'validation_fraction': 0}, 'validation_fraction=0 must be strictly'),
({'tol': -1}, 'tol=-1 must not be smaller than 0')]
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
def test_invalid_classification_loss():
binary_clf = HistGradientBoostingClassifier(loss="binary_crossentropy")
err_msg = ("loss='binary_crossentropy' is not defined for multiclass "
"classification with n_classes=3, use "
"loss='categorical_crossentropy' instead")
with pytest.raises(ValueError, match=err_msg):
binary_clf.fit(np.zeros(shape=(3, 2)), np.arange(3))
@pytest.mark.parametrize(
'scoring, validation_fraction, n_iter_no_change, tol', [
('neg_mean_squared_error', .1, 5, 1e-7), # use scorer
('neg_mean_squared_error', None, 5, 1e-1), # use scorer on train data
(None, .1, 5, 1e-7), # same with default scorer
(None, None, 5, 1e-1),
('loss', .1, 5, 1e-7), # use loss
('loss', None, 5, 1e-1), # use loss on training data
(None, None, None, None), # no early stopping
])
def test_early_stopping_regression(scoring, validation_fraction,
n_iter_no_change, tol):
max_iter = 200
X, y = make_regression(random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if n_iter_no_change is not None:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize('data', (
make_classification(random_state=0),
make_classification(n_classes=3, n_clusters_per_class=1, random_state=0)
))
@pytest.mark.parametrize(
'scoring, validation_fraction, n_iter_no_change, tol', [
('accuracy', .1, 5, 1e-7), # use scorer
('accuracy', None, 5, 1e-1), # use scorer on training data
(None, .1, 5, 1e-7), # same with default scorerscor
(None, None, 5, 1e-1),
('loss', .1, 5, 1e-7), # use loss
('loss', None, 5, 1e-1), # use loss on training data
(None, None, None, None), # no early stopping
])
def test_early_stopping_classification(data, scoring, validation_fraction,
n_iter_no_change, tol):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if n_iter_no_change is not None:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
'scores, n_iter_no_change, tol, stopping',
[
([], 1, 0.001, False), # not enough iterations
([1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0., False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
([1] * 6, 5, 0., True), # no significant improvement
([1] * 6, 5, 0.001, True), # no significant improvement
([1] * 6, 5, 5, True), # no significant improvement
]
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(
n_iter_no_change=n_iter_no_change, tol=tol
)
assert gbdt._should_stop(scores) == stopping
| lib/python3.6/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | 5,922 | To use this experimental feature, we need to explicitly ask for it: noqa use scorer use scorer on train data same with default scorer use loss use loss on training data no early stopping just for coverage easier to overfit fast use scorer use scorer on training data same with default scorerscor use loss use loss on training data no early stopping just for coverage easier to overfit fast not enough iterations not enough iterations not enough iterations significant improvement significant improvement significant improvement significant improvement no significant improvement no significant improvement no significant improvement | 632 | en | 0.864333 |
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ErrorList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'errors': 'list[ErrorData]'
}
attribute_map = {
'errors': 'errors'
}
def __init__(self, errors=None): # noqa: E501
"""ErrorList - a model defined in Swagger""" # noqa: E501
self._errors = None
self.discriminator = None
if errors is not None:
self.errors = errors
@property
def errors(self):
"""Gets the errors of this ErrorList. # noqa: E501
:return: The errors of this ErrorList. # noqa: E501
:rtype: list[ErrorData]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this ErrorList.
:param errors: The errors of this ErrorList. # noqa: E501
:type: list[ErrorData]
"""
self._errors = errors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ErrorList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| controlm_py/models/error_list.py | 3,051 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
ErrorList - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the errors of this ErrorList. # noqa: E501
:return: The errors of this ErrorList. # noqa: E501
:rtype: list[ErrorData]
Sets the errors of this ErrorList.
:param errors: The errors of this ErrorList. # noqa: E501
:type: list[ErrorData]
Returns the model properties as a dict
Returns the string representation of the model
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 | 841 | en | 0.635102 |
# Rainbow 2, by Al Sweigart al@inventwithpython.com
# Shows a simple squiggle rainbow animation.
import time, random, sys
try:
import bext
except ImportError:
print("""This program requires the bext module, which you can install by
opening a Terminal window (on macOS & Linux) and running:
python3 -m pip install --user bext
or a Command Prompt window (on Windows) and running:
python -m pip install --user bext""")
sys.exit()
indent = 10 # How many spaces to indent.
while True:
print(' ' * indent, end='')
bext.fg('red')
print('##', end='')
bext.fg('yellow')
print('##', end='')
bext.fg('green')
print('##', end='')
bext.fg('blue')
print('##', end='')
bext.fg('cyan')
print('##', end='')
bext.fg('purple')
print('##')
if random.randint(0, 1) == 0:
# Increase the number of spaces:
indent = indent + 1
if indent > 20:
indent = 20
else:
# Decrease the number of spaces:
indent = indent - 1
if indent < 0:
indent = 0
time.sleep(0.05) # Add a slight pause.
| src/gamesbyexample/rainbow2.py | 1,121 | Rainbow 2, by Al Sweigart al@inventwithpython.com Shows a simple squiggle rainbow animation. How many spaces to indent. Increase the number of spaces: Decrease the number of spaces: Add a slight pause. | 201 | en | 0.69054 |
'''
Defines the link functions to be used with GLM and GEE families.
'''
import numpy as np
import scipy.stats
FLOAT_EPS = np.finfo(float).eps
class Link(object):
"""
A generic link function for one-parameter exponential family.
`Link` does nothing, but lays out the methods expected of any subclass.
"""
def __call__(self, p):
"""
Return the value of the link function. This is just a placeholder.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g(p) : array_like
The value of the link function g(p) = z
"""
return NotImplementedError
def inverse(self, z):
"""
Inverse of the link function. Just a placeholder.
Parameters
----------
z : array_like
`z` is usually the linear predictor of the transformed variable
in the IRLS algorithm for GLM.
Returns
-------
g^(-1)(z) : ndarray
The value of the inverse of the link function g^(-1)(z) = p
"""
return NotImplementedError
def deriv(self, p):
"""
Derivative of the link function g'(p). Just a placeholder.
Parameters
----------
p : array_like
Returns
-------
g'(p) : ndarray
The value of the derivative of the link function g'(p)
"""
return NotImplementedError
def deriv2(self, p):
"""Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
return _approx_fprime_cs_scalar(p, self.deriv)
def inverse_deriv(self, z):
"""
Derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the link function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses.
"""
return 1 / self.deriv(self.inverse(z))
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses.
"""
iz = self.inverse(z)
return -self.deriv2(iz) / self.deriv(iz)**3
class Logit(Link):
"""
The logit transform
Notes
-----
call and derivative use a private method _clean to make trim p by
machine epsilon so that p is in (0,1)
Alias of Logit:
logit = Logit()
"""
def _clean(self, p):
"""
Clip logistic values to range (eps, 1-eps)
Parameters
----------
p : array_like
Probabilities
Returns
-------
pclip : ndarray
Clipped probabilities
"""
return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS)
def __call__(self, p):
"""
The logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
z : ndarray
Logit transform of `p`
Notes
-----
g(p) = log(p / (1 - p))
"""
p = self._clean(p)
return np.log(p / (1. - p))
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array_like
The value of the logit transform at `p`
Returns
-------
p : ndarray
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t)
def deriv(self, p):
"""
Derivative of the logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
g'(p) : ndarray
Value of the derivative of logit transform at `p`
Notes
-----
g'(p) = 1 / (p * (1 - p))
Alias for `Logit`:
logit = Logit()
"""
p = self._clean(p)
return 1. / (p * (1 - p))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the logit transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the logit function
"""
t = np.exp(z)
return t/(1 + t)**2
def deriv2(self, p):
"""
Second derivative of the logit function.
Parameters
----------
p : array_like
probabilities
Returns
-------
g''(z) : ndarray
The value of the second derivative of the logit function
"""
v = p * (1 - p)
return (2*p - 1) / v**2
class logit(Logit):
pass
class Power(Link):
"""
The power transform
Parameters
----------
power : float
The exponent of the power transform
Notes
-----
Aliases of Power:
inverse = Power(power=-1)
sqrt = Power(power=.5)
inverse_squared = Power(power=-2.)
identity = Power(power=1.)
"""
def __init__(self, power=1.):
self.power = power
def __call__(self, p):
"""
Power transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : array_like
Power transform of x
Notes
-----
g(p) = x**self.power
"""
if self.power == 1:
return p
else:
return np.power(p, self.power)
def inverse(self, z):
"""
Inverse of the power transform link function
Parameters
----------
`z` : array_like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : ndarray
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`)
"""
if self.power == 1:
return z
else:
return np.power(z, 1. / self.power)
def deriv(self, p):
"""
Derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1)
"""
if self.power == 1:
return np.ones_like(p)
else:
return self.power * np.power(p, self.power - 1)
def deriv2(self, p):
"""
Second derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
"""
if self.power == 1:
return np.zeros_like(p)
else:
return self.power * (self.power - 1) * np.power(p, self.power - 2)
def inverse_deriv(self, z):
"""
Derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function
"""
if self.power == 1:
return np.ones_like(z)
else:
return np.power(z, (1 - self.power)/self.power) / self.power
def inverse_deriv2(self, z):
"""
Second derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function
"""
if self.power == 1:
return np.zeros_like(z)
else:
return ((1 - self.power) *
np.power(z, (1 - 2*self.power)/self.power) / self.power**2)
class inverse_power(Power):
"""
The inverse transform
Notes
-----
g(p) = 1/p
Alias of statsmodels.family.links.Power(power=-1.)
"""
def __init__(self):
super(inverse_power, self).__init__(power=-1.)
class sqrt(Power):
"""
The square-root transform
Notes
-----
g(`p`) = sqrt(`p`)
Alias of statsmodels.family.links.Power(power=.5)
"""
def __init__(self):
super(sqrt, self).__init__(power=.5)
class inverse_squared(Power):
r"""
The inverse squared transform
Notes
-----
g(`p`) = 1/(`p`\*\*2)
Alias of statsmodels.family.links.Power(power=2.)
"""
def __init__(self):
super(inverse_squared, self).__init__(power=-2.)
class identity(Power):
"""
The identity transform
Notes
-----
g(`p`) = `p`
Alias of statsmodels.family.links.Power(power=1.)
"""
def __init__(self):
super(identity, self).__init__(power=1.)
class Log(Link):
"""
The log transform
Notes
-----
call and derivative call a private method _clean to trim the data by
machine epsilon so that p is in (0,1). log is an alias of Log.
"""
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p, **extra):
"""
Log transform link function
Parameters
----------
x : array_like
Mean parameters
Returns
-------
z : ndarray
log(x)
Notes
-----
g(p) = log(p)
"""
x = self._clean(p)
return np.log(x)
def inverse(self, z):
"""
Inverse of log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
p : ndarray
The mean probabilities given the value of the inverse `z`
Notes
-----
g^{-1}(z) = exp(z)
"""
return np.exp(z)
def deriv(self, p):
"""
Derivative of log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
derivative of log transform of x
Notes
-----
g'(x) = 1/x
"""
p = self._clean(p)
return 1. / p
def deriv2(self, p):
"""
Second derivative of the log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of log transform of x
Notes
-----
g''(x) = -1/x^2
"""
p = self._clean(p)
return -1. / p**2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the log function,
the exponential function
"""
return np.exp(z)
class log(Log):
"""
The log transform
Notes
-----
log is a an alias of Log.
"""
pass
# TODO: the CDFLink is untested
class CDFLink(Logit):
"""
The use the CDF of a scipy.stats distribution
CDFLink is a subclass of logit in order to use its _clean method
for the link and its derivative.
Parameters
----------
dbn : scipy.stats distribution
Default is dbn=scipy.stats.norm
Notes
-----
The CDF link is untested.
"""
def __init__(self, dbn=scipy.stats.norm):
self.dbn = dbn
def __call__(self, p):
"""
CDF link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
(ppf) inverse of CDF transform of p
Notes
-----
g(`p`) = `dbn`.ppf(`p`)
"""
p = self._clean(p)
return self.dbn.ppf(p)
def inverse(self, z):
"""
The inverse of the CDF link
Parameters
----------
z : array_like
The value of the inverse of the link function at `p`
Returns
-------
p : ndarray
Mean probabilities. The value of the inverse of CDF link of `z`
Notes
-----
g^(-1)(`z`) = `dbn`.cdf(`z`)
"""
return self.dbn.cdf(z)
def deriv(self, p):
"""
Derivative of CDF link
Parameters
----------
p : array_like
mean parameters
Returns
-------
g'(p) : ndarray
The derivative of CDF transform at `p`
Notes
-----
g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))
"""
p = self._clean(p)
return 1. / self.dbn.pdf(self.dbn.ppf(p))
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
p = self._clean(p)
linpred = self.dbn.ppf(p)
return - self.inverse_deriv2(linpred) / self.dbn.pdf(linpred)**3
def deriv2_numdiff(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import _approx_fprime_scalar
p = np.atleast_1d(p)
# Note: special function for norm.ppf does not support complex
return _approx_fprime_scalar(p, self.deriv, centered=True)
def inverse_deriv(self, z):
"""
Derivative of the inverse link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the logit function.
This is just the pdf in a CDFLink,
"""
return self.dbn.pdf(z)
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)''(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This method should be overwritten by subclasses.
The inherited method is implemented through numerical differentiation.
"""
from statsmodels.tools.numdiff import _approx_fprime_scalar
z = np.atleast_1d(z)
# Note: special function for norm.ppf does not support complex
return _approx_fprime_scalar(z, self.inverse_deriv, centered=True)
class probit(CDFLink):
"""
The probit (standard normal CDF) transform
Notes
-----
g(p) = scipy.stats.norm.ppf(p)
probit is an alias of CDFLink.
"""
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function
This is the derivative of the pdf in a CDFLink
"""
return - z * self.dbn.pdf(z)
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
"""
p = self._clean(p)
linpred = self.dbn.ppf(p)
return linpred / self.dbn.pdf(linpred)**2
class cauchy(CDFLink):
"""
The Cauchy (standard Cauchy CDF) transform
Notes
-----
g(p) = scipy.stats.cauchy.ppf(p)
cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy
"""
def __init__(self):
super(cauchy, self).__init__(dbn=scipy.stats.cauchy)
def deriv2(self, p):
"""
Second derivative of the Cauchy link function.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g''(p) : ndarray
Value of the second derivative of Cauchy link function at `p`
"""
p = self._clean(p)
a = np.pi * (p - 0.5)
d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3
return d2
def inverse_deriv2(self, z):
return - 2 * z / (np.pi * (z**2 + 1)**2)
class CLogLog(Logit):
"""
The complementary log-log transform
CLogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
Notes
-----
CLogLog is untested.
"""
def __call__(self, p):
"""
C-Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The CLogLog transform of `p`
Notes
-----
g(p) = log(-log(1-p))
"""
p = self._clean(p)
return np.log(-np.log(1 - p))
def inverse(self, z):
"""
Inverse of C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = 1-exp(-exp(`z`))
"""
return 1 - np.exp(-np.exp(z))
def deriv(self, p):
"""
Derivative of C-Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the CLogLog transform link function
Notes
-----
g'(p) = - 1 / ((p-1)*log(1-p))
"""
p = self._clean(p)
return 1. / ((p - 1) * (np.log(1 - p)))
def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the CLogLog link function
"""
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the CLogLog link function
"""
return np.exp(z - np.exp(z))
class cloglog(CLogLog):
"""
The CLogLog transform link function.
Notes
-----
g(`p`) = log(-log(1-`p`))
cloglog is an alias for CLogLog
cloglog = CLogLog()
"""
pass
class LogLog(Logit):
"""
The log-log transform
LogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
"""
def __call__(self, p):
"""
Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The LogLog transform of `p`
Notes
-----
g(p) = -log(-log(p))
"""
p = self._clean(p)
return -np.log(-np.log(p))
def inverse(self, z):
"""
Inverse of Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = exp(-exp(-`z`))
"""
return np.exp(-np.exp(-z))
def deriv(self, p):
"""
Derivative of Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the LogLog transform link function
Notes
-----
g'(p) = - 1 /(p * log(p))
"""
p = self._clean(p)
return -1. / (p * (np.log(p)))
def deriv2(self, p):
"""
Second derivative of the Log-Log link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the LogLog link function
"""
p = self._clean(p)
d2 = (1 + np.log(p)) / (p * (np.log(p)))**2
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the LogLog link function
"""
return np.exp(-np.exp(-z) - z)
def inverse_deriv2(self, z):
"""
Second derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)''(z) : ndarray
The second derivative of the inverse of the LogLog link function
"""
return self.inverse_deriv(z) * (np.exp(-z) - 1)
class loglog(LogLog):
"""
The LogLog transform link function.
Notes
-----
g(`p`) = -log(-log(`p`))
loglog is an alias for LogLog
loglog = LogLog()
"""
pass
class NegativeBinomial(Link):
'''
The negative binomial link function
Parameters
----------
alpha : float, optional
Alpha is the ancillary parameter of the Negative Binomial link
function. It is assumed to be nonstochastic. The default value is 1.
Permissible values are usually assumed to be in (.01, 2).
'''
def __init__(self, alpha=1.):
self.alpha = alpha
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p):
'''
Negative Binomial transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
The negative binomial transform of `p`
Notes
-----
g(p) = log(p/(p + 1/alpha))
'''
p = self._clean(p)
return np.log(p/(p + 1/self.alpha))
def inverse(self, z):
'''
Inverse of the negative binomial transform
Parameters
----------
z : array_like
The value of the inverse of the negative binomial link at `p`.
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))
'''
return -1/(self.alpha * (1 - np.exp(-z)))
def deriv(self, p):
'''
Derivative of the negative binomial transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the negative binomial transform link function
Notes
-----
g'(x) = 1/(x+alpha*x^2)
'''
return 1/(p + self.alpha * p**2)
def deriv2(self, p):
'''
Second derivative of the negative binomial link function.
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the negative binomial transform link
function
Notes
-----
g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2
'''
numer = -(1 + 2 * self.alpha * p)
denom = (p + self.alpha * p**2)**2
return numer / denom
def inverse_deriv(self, z):
'''
Derivative of the inverse of the negative binomial transform
Parameters
----------
z : array_like
Usually the linear predictor for a GLM or GEE model
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the negative
binomial link
'''
t = np.exp(z)
return t / (self.alpha * (1-t)**2)
class nbinom(NegativeBinomial):
"""
The negative binomial link function.
Notes
-----
g(p) = log(p/(p + 1/alpha))
nbinom is an alias of NegativeBinomial.
nbinom = NegativeBinomial(alpha=1.)
"""
pass
| statsmodels/genmod/families/links.py | 26,362 | The use the CDF of a scipy.stats distribution
CDFLink is a subclass of logit in order to use its _clean method
for the link and its derivative.
Parameters
----------
dbn : scipy.stats distribution
Default is dbn=scipy.stats.norm
Notes
-----
The CDF link is untested.
The complementary log-log transform
CLogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
Notes
-----
CLogLog is untested.
A generic link function for one-parameter exponential family.
`Link` does nothing, but lays out the methods expected of any subclass.
The log transform
Notes
-----
call and derivative call a private method _clean to trim the data by
machine epsilon so that p is in (0,1). log is an alias of Log.
The log-log transform
LogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
The logit transform
Notes
-----
call and derivative use a private method _clean to make trim p by
machine epsilon so that p is in (0,1)
Alias of Logit:
logit = Logit()
The negative binomial link function
Parameters
----------
alpha : float, optional
Alpha is the ancillary parameter of the Negative Binomial link
function. It is assumed to be nonstochastic. The default value is 1.
Permissible values are usually assumed to be in (.01, 2).
The power transform
Parameters
----------
power : float
The exponent of the power transform
Notes
-----
Aliases of Power:
inverse = Power(power=-1)
sqrt = Power(power=.5)
inverse_squared = Power(power=-2.)
identity = Power(power=1.)
The Cauchy (standard Cauchy CDF) transform
Notes
-----
g(p) = scipy.stats.cauchy.ppf(p)
cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy
The CLogLog transform link function.
Notes
-----
g(`p`) = log(-log(1-`p`))
cloglog is an alias for CLogLog
cloglog = CLogLog()
The identity transform
Notes
-----
g(`p`) = `p`
Alias of statsmodels.family.links.Power(power=1.)
The inverse transform
Notes
-----
g(p) = 1/p
Alias of statsmodels.family.links.Power(power=-1.)
The inverse squared transform
Notes
-----
g(`p`) = 1/(`p`\*\*2)
Alias of statsmodels.family.links.Power(power=2.)
The log transform
Notes
-----
log is a an alias of Log.
The LogLog transform link function.
Notes
-----
g(`p`) = -log(-log(`p`))
loglog is an alias for LogLog
loglog = LogLog()
The negative binomial link function.
Notes
-----
g(p) = log(p/(p + 1/alpha))
nbinom is an alias of NegativeBinomial.
nbinom = NegativeBinomial(alpha=1.)
The probit (standard normal CDF) transform
Notes
-----
g(p) = scipy.stats.norm.ppf(p)
probit is an alias of CDFLink.
The square-root transform
Notes
-----
g(`p`) = sqrt(`p`)
Alias of statsmodels.family.links.Power(power=.5)
Return the value of the link function. This is just a placeholder.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g(p) : array_like
The value of the link function g(p) = z
The logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
z : ndarray
Logit transform of `p`
Notes
-----
g(p) = log(p / (1 - p))
Power transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : array_like
Power transform of x
Notes
-----
g(p) = x**self.power
Log transform link function
Parameters
----------
x : array_like
Mean parameters
Returns
-------
z : ndarray
log(x)
Notes
-----
g(p) = log(p)
CDF link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
(ppf) inverse of CDF transform of p
Notes
-----
g(`p`) = `dbn`.ppf(`p`)
C-Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The CLogLog transform of `p`
Notes
-----
g(p) = log(-log(1-p))
Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The LogLog transform of `p`
Notes
-----
g(p) = -log(-log(p))
Negative Binomial transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
The negative binomial transform of `p`
Notes
-----
g(p) = log(p/(p + 1/alpha))
Clip logistic values to range (eps, 1-eps)
Parameters
----------
p : array_like
Probabilities
Returns
-------
pclip : ndarray
Clipped probabilities
Derivative of the link function g'(p). Just a placeholder.
Parameters
----------
p : array_like
Returns
-------
g'(p) : ndarray
The value of the derivative of the link function g'(p)
Derivative of the logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
g'(p) : ndarray
Value of the derivative of logit transform at `p`
Notes
-----
g'(p) = 1 / (p * (1 - p))
Alias for `Logit`:
logit = Logit()
Derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1)
Derivative of log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
derivative of log transform of x
Notes
-----
g'(x) = 1/x
Derivative of CDF link
Parameters
----------
p : array_like
mean parameters
Returns
-------
g'(p) : ndarray
The derivative of CDF transform at `p`
Notes
-----
g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))
Derivative of C-Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the CLogLog transform link function
Notes
-----
g'(p) = - 1 / ((p-1)*log(1-p))
Derivative of Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the LogLog transform link function
Notes
-----
g'(p) = - 1 /(p * log(p))
Derivative of the negative binomial transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the negative binomial transform link function
Notes
-----
g'(x) = 1/(x+alpha*x^2)
Second derivative of the link function g''(p)
implemented through numerical differentiation
Second derivative of the logit function.
Parameters
----------
p : array_like
probabilities
Returns
-------
g''(z) : ndarray
The value of the second derivative of the logit function
Second derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
Second derivative of the log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of log transform of x
Notes
-----
g''(x) = -1/x^2
Second derivative of the link function g''(p)
implemented through numerical differentiation
Second derivative of the link function g''(p)
Second derivative of the Cauchy link function.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g''(p) : ndarray
Value of the second derivative of Cauchy link function at `p`
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the CLogLog link function
Second derivative of the Log-Log link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the LogLog link function
Second derivative of the negative binomial link function.
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the negative binomial transform link
function
Notes
-----
g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2
Second derivative of the link function g''(p)
implemented through numerical differentiation
Inverse of the link function. Just a placeholder.
Parameters
----------
z : array_like
`z` is usually the linear predictor of the transformed variable
in the IRLS algorithm for GLM.
Returns
-------
g^(-1)(z) : ndarray
The value of the inverse of the link function g^(-1)(z) = p
Inverse of the logit transform
Parameters
----------
z : array_like
The value of the logit transform at `p`
Returns
-------
p : ndarray
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
Inverse of the power transform link function
Parameters
----------
`z` : array_like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : ndarray
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`)
Inverse of log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
p : ndarray
The mean probabilities given the value of the inverse `z`
Notes
-----
g^{-1}(z) = exp(z)
The inverse of the CDF link
Parameters
----------
z : array_like
The value of the inverse of the link function at `p`
Returns
-------
p : ndarray
Mean probabilities. The value of the inverse of CDF link of `z`
Notes
-----
g^(-1)(`z`) = `dbn`.cdf(`z`)
Inverse of C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = 1-exp(-exp(`z`))
Inverse of Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = exp(-exp(-`z`))
Inverse of the negative binomial transform
Parameters
----------
z : array_like
The value of the inverse of the negative binomial link at `p`.
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))
Derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the link function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses.
Derivative of the inverse of the logit transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the logit function
Derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function
Derivative of the inverse of the log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the log function,
the exponential function
Derivative of the inverse link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the logit function.
This is just the pdf in a CDFLink,
Derivative of the inverse of the C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the CLogLog link function
Derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the LogLog link function
Derivative of the inverse of the negative binomial transform
Parameters
----------
z : array_like
Usually the linear predictor for a GLM or GEE model
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the negative
binomial link
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses.
Second derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)''(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This method should be overwritten by subclasses.
The inherited method is implemented through numerical differentiation.
Second derivative of the inverse link function
This is the derivative of the pdf in a CDFLink
Second derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)''(z) : ndarray
The second derivative of the inverse of the LogLog link function
Defines the link functions to be used with GLM and GEE families.
TODO: the CDFLink is untested Note: special function for norm.ppf does not support complex Note: special function for norm.ppf does not support complex | 13,904 | en | 0.462085 |
# qubit number=2
# total number=8
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.swap(input_qubit[1],input_qubit[0]) # number=2
prog.swap(input_qubit[1],input_qubit[0]) # number=3
prog.x(input_qubit[1]) # number=5
prog.z(input_qubit[1]) # number=4
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class137.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| data/p2DJ/New/program/qiskit/class/startQiskit_Class137.py | 3,080 | qubit number=2 total number=8 implement the oracle O_f^\pm NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate or multi_control_Z_gate (issue 127) oracle.barrier() oracle.draw('mpl', filename='circuit/deutsch-oracle.png') circuit begin inverse last one (can be omitted if using O_f^\pm) apply H to get superposition number=1 apply oracle O_f apply H back (QFT on Z_2^n) measure number=2 number=3 number=5 number=4 number=6 number=7 circuit end f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0" f = lambda rep: "0" | 538 | en | 0.416839 |
# Created by SylvanasSun in 2017.10.17
# !/usr/bin/python
# -*- coding: utf-8 -*-
import collections
import jieba
from jieba import analyse
# TODO: Change default hash algorithms to the other algorithms of high-performance.
def _default_hashfunc(content, hashbits):
"""
Default hash function is variable-length version of Python's builtin hash.
:param content: data that needs to hash.
:return: return a decimal number.
"""
if content == "":
return 0
x = ord(content[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in content:
x = ((x * m) ^ ord(c)) & mask
x ^= len(content)
if x == -1:
x = -2
return x
# TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance.
def _default_tokenizer_func(content, keyword_weight_pair):
"""
Default tokenizer function that uses jieba tokenizer.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...].
"""
seg_list = jieba.lcut_for_search(content)
# Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight
return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True)
class Simhash(object):
"""
Class Simhash implements simhash algorithms of the Google for filter duplicate content.
Simhash algorithms idea is will reduce the dimension of content and compares the
difference of the "Hamming Distance" implements filter duplicate content.
About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash
Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba).
"""
def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None):
"""
:param data: data that needs to be encode.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:param hash_bit_number: maximum bit number for hashcode.
:param hashfunc: hash function,its first parameter must be data that needs to be encode
and the second parameter must be hash bit number.
:param tokenizer_func: tokenizer function,its first parameter must be content that
needs to be tokenizer and the second parameter must be
keyword_weight_pair.
"""
if hashfunc is None:
self.hashfunc = _default_hashfunc
else:
self.hashfunc = hashfunc
if tokenizer_func is None:
self.tokenizer_func = _default_tokenizer_func
else:
self.tokenizer_func = tokenizer_func
self.hash_bit_number = hash_bit_number
self.keyword_weight_pari = keyword_weight_pair
if isinstance(data, Simhash):
self.hash = data.hash
elif isinstance(data, int):
self.hash = data
else:
self.simhash(data)
def __str__(self):
return str(self.hash)
def simhash(self, content):
"""
Select policies for simhash on the different types of content.
"""
if content is None:
self.hash = -1
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception("Unsupported parameter type %s" % type(content))
def build_from_features(self, features):
"""
:param features: a list of (token,weight) tuples or a token -> weight dict,
if is a string so it need compute weight (a weight of 1 will be assumed).
:return: a decimal digit for the accumulative result of each after handled features-weight pair.
"""
v = [0] * self.hash_bit_number
if isinstance(features, dict):
features = features.items()
# Starting longitudinal accumulation of bits, current bit add current weight
# when the current bits equal 1 and else current bit minus the current weight.
for f in features:
if isinstance(f, str):
h = self.hashfunc(f, self.hash_bit_number)
w = 1
else:
assert isinstance(f, collections.Iterable)
h = self.hashfunc(f[0], self.hash_bit_number)
w = f[1]
for i in range(self.hash_bit_number):
bitmask = 1 << i
v[i] += w if h & bitmask else -w
# Just record weight of the non-negative
fingerprint = 0
for i in range(self.hash_bit_number):
if v[i] >= 0:
fingerprint += 1 << i
return fingerprint
def is_equal(self, another, limit=0.8):
"""
Determine two simhash are similar or not similar.
:param another: another simhash.
:param limit: a limit of the similarity.
:return: if similarity greater than limit return true and else return false.
"""
if another is None:
raise Exception("Parameter another is null")
if isinstance(another, int):
distance = self.hamming_distance(another)
elif isinstance(another, Simhash):
assert self.hash_bit_number == another.hash_bit_number
distance = self.hamming_distance(another.hash)
else:
raise Exception("Unsupported parameter type %s" % type(another))
similarity = float(self.hash_bit_number - distance) / self.hash_bit_number
if similarity > limit:
return True
return False
def hamming_distance(self, another):
"""
Compute hamming distance,hamming distance is a total number of different bits of two binary numbers.
:param another: another simhash value.
:return: a hamming distance that current simhash and another simhash.
"""
x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1)
result = 0
while x:
result += 1
x &= x - 1
return result
if __name__ == "__main__":
sentence_A = """
明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。
东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。
北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。
"""
sentence_B = """
明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。
元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。
建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。
"""
sentence_C = "You know nothing Jon Snow!"
sentence_D = "Jon Snow: I konw nothing."
simhash_A = Simhash(sentence_A)
simhash_B = Simhash(sentence_B)
simhash_C = Simhash(sentence_C)
simhash_D = Simhash(sentence_D)
print(simhash_A)
print(simhash_B)
print(simhash_C)
print(simhash_D)
assert simhash_A.is_equal(simhash_B) is True
assert simhash_B.is_equal(simhash_C) is False
assert simhash_C.is_equal(simhash_D) is True
| algorithms/hash/simhash.py | 7,893 | Class Simhash implements simhash algorithms of the Google for filter duplicate content.
Simhash algorithms idea is will reduce the dimension of content and compares the
difference of the "Hamming Distance" implements filter duplicate content.
About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash
Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba).
:param data: data that needs to be encode.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:param hash_bit_number: maximum bit number for hashcode.
:param hashfunc: hash function,its first parameter must be data that needs to be encode
and the second parameter must be hash bit number.
:param tokenizer_func: tokenizer function,its first parameter must be content that
needs to be tokenizer and the second parameter must be
keyword_weight_pair.
Default hash function is variable-length version of Python's builtin hash.
:param content: data that needs to hash.
:return: return a decimal number.
Default tokenizer function that uses jieba tokenizer.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...].
:param features: a list of (token,weight) tuples or a token -> weight dict,
if is a string so it need compute weight (a weight of 1 will be assumed).
:return: a decimal digit for the accumulative result of each after handled features-weight pair.
Compute hamming distance,hamming distance is a total number of different bits of two binary numbers.
:param another: another simhash value.
:return: a hamming distance that current simhash and another simhash.
Determine two simhash are similar or not similar.
:param another: another simhash.
:param limit: a limit of the similarity.
:return: if similarity greater than limit return true and else return false.
Select policies for simhash on the different types of content.
Created by SylvanasSun in 2017.10.17 !/usr/bin/python -*- coding: utf-8 -*- TODO: Change default hash algorithms to the other algorithms of high-performance. TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight Starting longitudinal accumulation of bits, current bit add current weight when the current bits equal 1 and else current bit minus the current weight. Just record weight of the non-negative | 2,590 | en | 0.706733 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
import warnings
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in luigi.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, subprocess.list2cmdline(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
| luigi/contrib/scalding.py | 10,702 | JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
Returns an array of args to pass to the job.
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
Extra jars for building and running this Scalding Job.
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
Extra arguments to pass to the Scalding job.
optional main job class for this Scalding Job.
Compute path given current file and relative path.
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
-*- coding: utf-8 -*- Copyright 2012-2015 Spotify AB Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. additional jar for scala 2.10 only find name of the job class usually the one that matches file name or last class that extends Job compile scala source build job jar file scalding does not parse argument with '=' properly | 1,825 | en | 0.778817 |
"""
file system and database initialization.
tables:
- polls:
- id PRIMARY KEY
- owner_id => users.id
- topic
- users:
- id PRIMARY KEY
- first_name
- last_name
- username
- answers:
- id PRIMARY KEY
- poll_id => polls.id
- text
- votes:
- user_id => users.id
- poll_id => polls.id
- answer_id => answers.id
"""
import os
from os.path import expanduser, join
from yoyo import get_backend, read_migrations
from . import log
logger = log.getLogger('app.fs')
DATA_DIR: str = expanduser("~/.local/share/multi_vote_bot")
if not os.path.exists(DATA_DIR):
logger.info("Creating data dir at path %s", DATA_DIR)
os.makedirs(DATA_DIR, exist_ok=True)
DB_PATH: str = join(DATA_DIR, "data.db")
def migrate():
""" apply yoyo migrations """
logger.info("Migrating to the latest schema")
log.getLogger('yoyo').setLevel(log.DEBUG)
backend = get_backend('sqlite:///' + DB_PATH)
migrations = read_migrations('./migrations')
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations))
# auto migrate when imported
migrate()
| src/app/fs.py | 1,101 | apply yoyo migrations
file system and database initialization.
tables:
- polls:
- id PRIMARY KEY
- owner_id => users.id
- topic
- users:
- id PRIMARY KEY
- first_name
- last_name
- username
- answers:
- id PRIMARY KEY
- poll_id => polls.id
- text
- votes:
- user_id => users.id
- poll_id => polls.id
- answer_id => answers.id
auto migrate when imported | 385 | en | 0.553792 |
# -*- coding: UTF-8 -*-
import os # File and path handling
import numpy
import copy # for deepcopy
import math
from .image import ImageFile, Image, ImageROI, ImageStack
from .geometry import Geometry
from .processing.pipeline import Pipeline
from .processing.step import Step
from .helpers import *
def touchDirectory(folder):
if not os.path.exists(folder):
os.makedirs(folder)
class generalTest(Step):
""" General class for test scenario evaluations: get image(s), run and store evaluation. """
def __init__(self, testName="General Test", name=None, nExpectedRuns=1, resultFileDirectory=".", rawOutput=False):
Step.__init__(self, testName)
self.testName = testName
self.subtests = []
self.prepared = False
self.currentRun = 0
self.nExpectedRuns = None # usually, number of projections to evaluate
self.resultFileDirectory = None
self.name = None
self.rawOutput = None
self.setName(name)
self.setExpectedRuns(nExpectedRuns)
self.setResultFileDirectory(resultFileDirectory)
self.setRawOutput(rawOutput)
self.reset()
def reset(self):
self.currentRun = 0
self.prepared = False
def addSubtest(self, subt):
self.subtests.append(subt)
def setName(self, name=None):
""" Set an individual name for the (sub) test. """
if name != None:
self.name = name
else:
self.name = self.testName
def setExpectedRuns(self, n=1):
self.nExpectedRuns = n
def setResultFileDirectory(self, resultFileDirectory="."):
""" Set the location where test results should be saved. """
self.resultFileDirectory = resultFileDirectory
touchDirectory(self.resultFileDirectory)
def setRawOutput(self, rawOutput=False):
""" Save intermediate projections as RAW instead of TIFF? """
self.rawOutput = rawOutput
def plotResults(self):
""" Plot results of evaluation. """
# Should be called by step's followUp() function, if needed.
pass
| ctsimu/test.py | 2,197 | General class for test scenario evaluations: get image(s), run and store evaluation.
Plot results of evaluation.
Set an individual name for the (sub) test.
Save intermediate projections as RAW instead of TIFF?
Set the location where test results should be saved.
-*- coding: UTF-8 -*- File and path handling for deepcopy usually, number of projections to evaluate Should be called by step's followUp() function, if needed. | 429 | en | 0.908531 |
"""
Custom dataset processing/generation functions should be added to this file
"""
import pathlib
from sklearn.datasets import fetch_20newsgroups
from functools import partial
from src import workflow, paths
from src.log import logger
import src.log.debug
from tqdm.auto import tqdm
from .. import paths
from ..log import logger
__all__ = [
'process_20_newsgroups'
]
def process_20_newsgroups(*, extract_dir='20_newsgroups',
metadata=None, unpack_dir=None,
opts={"subset":"all", "remove":"('headers', 'footers', 'quotes')"}):
"""
Process 20 newsgroups into (data, target, metadata) format.
Parameters
----------
unpack_dir: path
The interim parent directory the dataset files have been unpacked into.
extract_dir: str
Name of the directory of the unpacked files relative to the unpack_dir. Note that
opts: dict default {"subset":"all", "remove"="('headers', 'footers', 'quotes')"}
Options to pass to sklearn.datasets.fetch_20newsgroups.
Returns
-------
A tuple:
(data, target, additional_metadata)
"""
if metadata is None:
metadata = {}
if unpack_dir is None:
unpack_dir = paths['interim_data_path']
else:
unpack_dir = pathlib.Path(unpack_dir)
data_dir = unpack_dir / f"{extract_dir}"
news = fetch_20newsgroups(**opts)
metadata['target_names'] = news.target_names
return news.data, news.target, metadata
| src/data/process_functions.py | 1,500 | Process 20 newsgroups into (data, target, metadata) format.
Parameters
----------
unpack_dir: path
The interim parent directory the dataset files have been unpacked into.
extract_dir: str
Name of the directory of the unpacked files relative to the unpack_dir. Note that
opts: dict default {"subset":"all", "remove"="('headers', 'footers', 'quotes')"}
Options to pass to sklearn.datasets.fetch_20newsgroups.
Returns
-------
A tuple:
(data, target, additional_metadata)
Custom dataset processing/generation functions should be added to this file | 563 | en | 0.437662 |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_vault_secret_actions
short_description: Perform actions on a Secret resource in Oracle Cloud Infrastructure
description:
- Perform actions on a Secret resource in Oracle Cloud Infrastructure
- For I(action=cancel_secret_deletion), cancels the pending deletion of the specified secret. Canceling
a scheduled deletion restores the secret's lifecycle state to what
it was before you scheduled the secret for deletion.
- For I(action=schedule_secret_deletion), schedules the deletion of the specified secret. This sets the lifecycle state of the secret
to `PENDING_DELETION` and then deletes it after the specified retention period ends.
version_added: "2.9"
author: Oracle (@oracle)
options:
secret_id:
description:
- The OCID of the secret.
type: str
aliases: ["id"]
required: true
time_of_deletion:
description:
- An optional property indicating when to delete the secret version, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
- Applicable only for I(action=schedule_secret_deletion).
type: str
action:
description:
- The action to perform on the Secret.
type: str
required: true
choices:
- "cancel_secret_deletion"
- "schedule_secret_deletion"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action cancel_secret_deletion on secret
oci_vault_secret_actions:
secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx
action: cancel_secret_deletion
- name: Perform action schedule_secret_deletion on secret
oci_vault_secret_actions:
time_of_deletion: 2018-04-03T21:10:29.600Z
secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx
action: schedule_secret_deletion
"""
RETURN = """
secret:
description:
- Details of the Secret resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment where you want to create the secret.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
current_version_number:
description:
- The version number of the secret version that's currently in use.
returned: on success
type: int
sample: 56
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
description:
description:
- A brief description of the secret. Avoid entering confidential information.
returned: on success
type: string
sample: description_example
freeform_tags:
description:
- "Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The OCID of the secret.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
key_id:
description:
- The OCID of the master encryption key that is used to encrypt the secret.
returned: on success
type: string
sample: ocid1.key.oc1..xxxxxxEXAMPLExxxxxx
lifecycle_details:
description:
- Additional information about the current lifecycle state of the secret.
returned: on success
type: string
sample: lifecycle_details_example
lifecycle_state:
description:
- The current lifecycle state of the secret.
returned: on success
type: string
sample: CREATING
metadata:
description:
- Additional metadata that you can use to provide context about how to use the secret or during rotation or
other administrative tasks. For example, for a secret that you use to connect to a database, the additional
metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs.
returned: on success
type: dict
sample: {}
secret_name:
description:
- The user-friendly name of the secret. Avoid entering confidential information.
returned: on success
type: string
sample: secret_name_example
secret_rules:
description:
- A list of rules that control how the secret is used and managed.
returned: on success
type: complex
contains:
rule_type:
description:
- The type of rule, which either controls when the secret contents expire or whether they can be reused.
returned: on success
type: string
sample: SECRET_EXPIRY_RULE
secret_version_expiry_interval:
description:
- A property indicating how long the secret contents will be considered valid, expressed in
L(ISO 8601,https://en.wikipedia.org/wiki/ISO_8601#Time_intervals) format. The secret needs to be
updated when the secret content expires. No enforcement mechanism exists at this time, but audit logs
record the expiration on the appropriate date, according to the time interval specified in the rule.
The timer resets after you update the secret contents.
The minimum value is 1 day and the maximum value is 90 days for this property. Currently, only intervals expressed in days are
supported.
For example, pass `P3D` to have the secret version expire every 3 days.
returned: on success
type: string
sample: secret_version_expiry_interval_example
time_of_absolute_expiry:
description:
- "An optional property indicating the absolute time when this secret will expire, expressed in L(RFC
3339,https://tools.ietf.org/html/rfc3339) timestamp format.
The minimum number of days from current time is 1 day and the maximum number of days from current time is 365 days.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
is_secret_content_retrieval_blocked_on_expiry:
description:
- A property indicating whether to block retrieval of the secret content, on expiry. The default is false.
If the secret has already expired and you would like to retrieve the secret contents,
you need to edit the secret rule to disable this property, to allow reading the secret content.
returned: on success
type: bool
sample: true
is_enforced_on_deleted_secret_versions:
description:
- A property indicating whether the rule is applied even if the secret version with the content you are trying to reuse was deleted.
returned: on success
type: bool
sample: true
time_created:
description:
- "A property indicating when the secret was created, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
time_of_current_version_expiry:
description:
- "An optional property indicating when the current secret version will expire, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339)
timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
time_of_deletion:
description:
- "An optional property indicating when to delete the secret, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
vault_id:
description:
- The OCID of the vault where the secret exists.
returned: on success
type: string
sample: ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"current_version_number": 56,
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"description": "description_example",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"key_id": "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_details": "lifecycle_details_example",
"lifecycle_state": "CREATING",
"metadata": {},
"secret_name": "secret_name_example",
"secret_rules": [{
"rule_type": "SECRET_EXPIRY_RULE",
"secret_version_expiry_interval": "secret_version_expiry_interval_example",
"time_of_absolute_expiry": "2019-04-03T21:10:29.600Z",
"is_secret_content_retrieval_blocked_on_expiry": true,
"is_enforced_on_deleted_secret_versions": true
}],
"time_created": "2019-04-03T21:10:29.600Z",
"time_of_current_version_expiry": "2019-04-03T21:10:29.600Z",
"time_of_deletion": "2019-04-03T21:10:29.600Z",
"vault_id": "ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.vault import VaultsClient
from oci.vault.models import ScheduleSecretDeletionDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SecretActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
cancel_secret_deletion
schedule_secret_deletion
"""
@staticmethod
def get_module_resource_id_param():
return "secret_id"
def get_module_resource_id(self):
return self.module.params.get("secret_id")
def get_get_fn(self):
return self.client.get_secret
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_secret, secret_id=self.module.params.get("secret_id"),
)
def cancel_secret_deletion(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.cancel_secret_deletion,
call_fn_args=(),
call_fn_kwargs=dict(secret_id=self.module.params.get("secret_id"),),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def schedule_secret_deletion(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ScheduleSecretDeletionDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.schedule_secret_deletion,
call_fn_args=(),
call_fn_kwargs=dict(
secret_id=self.module.params.get("secret_id"),
schedule_secret_deletion_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
SecretActionsHelperCustom = get_custom_class("SecretActionsHelperCustom")
class ResourceHelper(SecretActionsHelperCustom, SecretActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
secret_id=dict(aliases=["id"], type="str", required=True),
time_of_deletion=dict(type="str"),
action=dict(
type="str",
required=True,
choices=["cancel_secret_deletion", "schedule_secret_deletion"],
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="secret",
service_client_class=VaultsClient,
namespace="vault",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| plugins/modules/oci_vault_secret_actions.py | 15,464 | Supported actions:
cancel_secret_deletion
schedule_secret_deletion
!/usr/bin/python Copyright (c) 2017, 2021 Oracle and/or its affiliates. This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) Apache License v2.0 See LICENSE.TXT for details. GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN | 458 | en | 0.727582 |
import os
import sys
import json
from .version import __version__
from satsearch import Search
from satstac import Items
from satsearch.parser import SatUtilsParser
import satsearch.config as config
def main(items=None, printmd=None, printcal=False, found=False,
save=None, download=None, requestor_pays=False, **kwargs):
""" Main function for performing a search """
if items is None:
## if there are no items then perform a search
search = Search.search(**kwargs)
if found:
num = search.found()
print('%s items found' % num)
return num
items = search.items()
else:
# otherwise, load a search from a file
items = Items.load(items)
print('%s items found' % len(items))
# print metadata
if printmd is not None:
print(items.summary(printmd))
# print calendar
if printcal:
print(items.calendar())
# save all metadata in JSON file
if save is not None:
items.save(filename=save)
# download files given `download` keys
if download is not None:
if 'ALL' in download:
# get complete set of assets
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items
def cli():
parser = SatUtilsParser.newbie(description='sat-search (v%s)' % __version__)
kwargs = parser.parse_args(sys.argv[1:])
# if a filename, read the GeoJSON file
if 'intersects' in kwargs:
if os.path.exists(kwargs['intersects']):
with open(kwargs['intersects']) as f:
kwargs['intersects'] = json.loads(f.read())
cmd = kwargs.pop('command', None)
if cmd is not None:
main(**kwargs)
if __name__ == "__main__":
cli()
| satsearch/main.py | 1,911 | Main function for performing a search
if there are no items then perform a search otherwise, load a search from a file print metadata print calendar save all metadata in JSON file download files given `download` keys get complete set of assets if a filename, read the GeoJSON file | 283 | en | 0.670781 |
from argparse import ArgumentParser
import datetime
import dateutil
import sys, re
from os import path
def parseArgs():
parser = ArgumentParser(add_help=False)
parser.add_argument("-a", "--action", help="Please select an option out of <discover, manage, settings>", type=str, required=True)
parser.add_argument("-f", "--file", help="Please specify absolute path to initial dataset", type=str)
args = parser.parse_args()
# for debugging TODO: remove later
args.file = r"C:\Users\flietz\OneDrive - TU Wien\!Studium\1_MSc\!Diplomarbeit\code\pipeline\resources\dataset\Mail_ApplicationDummy.csv"
if args.action is None or args.action not in ("discover", "manage", "settings"):
sys.exit('Please specify an action out of <"discover", "manager", "settings">')
if args.action == "discover" and (args.file is None or not path.exists(args.file)):
sys.exit("The input file could not be found in the filesystem.")
arguments = {"file": args.file}
return args.action, arguments
class DataCleaner:
def __init__(self, removeURLs, removeMultWhitespace, lowercasing, dateFormat):
self.removeURLs = removeURLs
self.removeMultWhitespace = removeMultWhitespace
self.lowercasing = lowercasing
self.dateFormat = dateFormat
def apply(self, inputDf):
def removeUrl(content):
return re.sub(r'https?://\S+', '', content)
def removeMultWhitespace(content):
return re.sub(r' +', ' ', content)
# Remove URLs
if self.removeURLs:
inputDf["Content"] = inputDf.apply(lambda row: removeUrl(row["Content"]), axis=1)
# Remove Multi-Whitespaces
if self.removeMultWhitespace:
inputDf["Content"] = inputDf.apply(lambda row: removeMultWhitespace(row["Content"]), axis=1)
if self.lowercasing:
inputDf["Content"] = inputDf.apply(lambda row: row["Content"].lower(), axis=1)
# Not-Empty-Constraints
if inputDf["Content"].isnull().values.any() or \
inputDf["Datetime"].isnull().values.any() or \
inputDf["From"].isnull().values.any() or \
inputDf["To"].isnull().values.any():
raise AttributeError("Content, Datetime, From and To field cannot be empty. Please check your input dataset.")
# Unify Date format - reformat to %Y-%m-%d %H:%M:%S
def reformatDate(datestring, dateformat):
try:
newDate = dateutil.parser.parse(datestring, dayfirst=True)
return newDate.strftime(dateformat)
except ValueError as e:
raise ValueError("Make sure that all datetime columns are well-formatted "
"and that they contain dates that are within the possible bounds.") from e
inputDf["Datetime"] = inputDf.apply(lambda row: reformatDate(row["Datetime"], self.dateFormat), axis=1)
# clean signatures, clauses
def stripEndClauses(content, clauses):
clauseIndex = 0
index = 0
# Find lowest greetings or end clause index and strip off everything that comes after it
for item in clauses:
# needle and haystack both in lowercase to ignore case
index = content.lower().find(item.lower())
if index > -1 and (index < clauseIndex or clauseIndex == 0):
clauseIndex = index
if clauseIndex > 0:
return content[:clauseIndex]
else:
return content
def stripStartClauses(content, clauses):
clauseIndex = 0
index = 0
# Find lowest greetings or end clause index and strip off everything that comes after it
for item in clauses:
# needle and haystack both in lowercase to ignore case
index = content.lower().find(item.lower())
if index > -1 and (index > clauseIndex or clauseIndex == 0):
clauseIndex = index
if clauseIndex > 0:
return content[clauseIndex:]
else:
return content
startClausesList = []
endGreetingsList = ["Yours sincerely", "Sincerely", "Sincerely yours", "Take care", "Regards",
"Warm regards", "Best regards", "Kind regards", "Warmest regards", "Yours truly", "Yours,",
"Warmly,", "Warm wishes", "Best,", "Best Wishes", "Thanks in advance", "Thank you in advance",
"Thanks in advance"]
confList = ["The information contained in this communication",
"The content of this email is confidential", "The content of this e-mail", "This email and attachments (if any) is intended",
"This email is intended solely", "This e-mail is intended solely"]
endClausesList = endGreetingsList+confList
inputDf["Content"] = inputDf.apply(lambda row: stripEndClauses(row["Content"], endClausesList), axis=1)
inputDf["Content"] = inputDf.apply(lambda row: stripStartClauses(row["Content"], startClausesList), axis=1)
# Reduce multiple new-lines to one
inputDf["Content"] = inputDf.apply(lambda row: re.sub(r'\n+', '\n', row["Content"]), axis=1)
# Replace new-lines with whitespaces
inputDf["Content"] = inputDf.apply(lambda row: re.sub(r'\n', ' ', row["Content"]), axis=1)
def convertDateString(datestring):
try:
return datetime.datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S")
except ValueError:
return datetime.datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S") | stages/utils/utils.py | 5,686 | for debugging TODO: remove later Remove URLs Remove Multi-Whitespaces Not-Empty-Constraints Unify Date format - reformat to %Y-%m-%d %H:%M:%S clean signatures, clauses Find lowest greetings or end clause index and strip off everything that comes after it needle and haystack both in lowercase to ignore case Find lowest greetings or end clause index and strip off everything that comes after it needle and haystack both in lowercase to ignore case Reduce multiple new-lines to one Replace new-lines with whitespaces | 515 | en | 0.731304 |
#!/usr/bin/env python3.8
import importlib
import typing
from enum import Enum
import discord
from discord.ext import commands
from discord.types.interactions import ApplicationCommandOption
import common.paginator as paginator
import common.star_classes as star_classes
import common.utils as utils
class OwnerCMDs(commands.Cog, name="Owner", command_attrs=dict(hidden=True)):
def __init__(self, bot):
self.bot: utils.SeraphimBase = bot
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
@commands.command(hidden=True, aliases=["reloadallextensions"])
async def reload_all_extensions(self, ctx):
extensions = [i for i in self.bot.extensions.keys() if i != "cogs.db_handler"]
for extension in extensions:
self.bot.reload_extension(extension)
await ctx.reply("All extensions reloaded!")
@commands.command(hidden=True)
async def list_loaded_extensions(self, ctx):
exten_list = [f"`{k}`" for k in self.bot.extensions.keys()]
exten_str = ", ".join(exten_list)
await ctx.reply(f"Extensions: {exten_str}")
class OptionTypeEnum(Enum):
SUB_COMMAND = 1
SUB_COMMAND_GROUP = 2
STRING = 3
INTEGER = 4
BOOLEAN = 5
USER = 6
CHANNEL = 7
ROLE = 8
MENTIONABLE = 9
NUMBER = 10
@commands.command(hidden=True, aliases=["list_slash_commands", "listslashcmds"])
async def list_slash_cmds(
self, ctx: utils.SeraContextBase, guild: typing.Optional[discord.Guild]
):
if not guild:
app_cmds = await ctx.bot.http.get_global_commands(ctx.bot.application_id)
else:
app_cmds = await ctx.bot.http.get_guild_commands(
ctx.bot.application_id, guild.id
)
slash_entries = []
if not app_cmds:
raise commands.BadArgument(
"This guild/bot does not have any specific slash commands."
)
for entry in app_cmds:
if entry.get("type", 0) == 1:
entry_str_list = []
if entry["description"]:
entry_str_list.append(entry["description"])
else:
entry_str_list.append("No description provided.")
if entry.get("options"):
entry_str_list.append("__Arguments:__")
for option in entry["options"]: # type: ignore
option: ApplicationCommandOption
option_type = self.OptionTypeEnum(option["type"]).name
required_txt = ", required" if option["required"] else ""
entry_str_list.append(
f"{option['name']} (type {option_type}{required_txt}) - {option['description']}"
)
slash_entries.append(
(f"{entry['name']} - ID {entry['id']}", "\n".join(entry_str_list))
)
if not slash_entries:
raise commands.BadArgument(
"This guild/bot does not have any specific slash commands."
)
pages = paginator.FieldPages(ctx, entries=slash_entries, per_page=6)
await pages.paginate()
@commands.command(hidden=True, aliases=["removeslashcmd"])
async def remove_slash_cmd(
self, ctx, cmd: discord.Object, guild: typing.Optional[discord.Guild],
):
if guild:
await self.bot.http.delete_guild_command(
self.bot.application_id, guild.id, cmd.id
)
else:
await self.bot.http.delete_global_command(self.bot.application_id, cmd.id)
await ctx.reply("Removed command.")
@commands.command(hidden=True, aliases=["removeallslashcmds"])
async def remove_all_slash_cmds(self, ctx, guild: typing.Optional[discord.Guild]):
if not guild:
app_cmds = await self.bot.http.get_global_commands(self.bot.application_id)
else:
app_cmds = await self.bot.http.get_guild_commands(
self.bot.application_id, guild.id
)
slash_cmd_ids = [e["id"] for e in app_cmds if e.get("type", 0) == 1]
for cmd_id in slash_cmd_ids:
if not guild:
await self.bot.http.delete_global_command(
self.bot.application_id, cmd_id
)
else:
await self.bot.http.delete_guild_command(
self.bot.application_id, guild.id, cmd_id
)
await ctx.reply("Removed all commands.")
def setup(bot):
importlib.reload(utils)
importlib.reload(star_classes)
importlib.reload(paginator)
bot.add_cog(OwnerCMDs(bot))
| cogs/core/cmds/owner_cmds.py | 4,805 | !/usr/bin/env python3.8 type: ignore | 36 | en | 0.188428 |
# Copyright (c) 2015-2016, 2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Hooks for nose library."""
import re
import textwrap
import astroid
import astroid.builder
_BUILDER = astroid.builder.AstroidBuilder(astroid.MANAGER)
def _pep8(name, caps=re.compile("([A-Z])")):
return caps.sub(lambda m: "_" + m.groups()[0].lower(), name)
def _nose_tools_functions():
"""Get an iterator of names and bound methods."""
module = _BUILDER.string_build(
textwrap.dedent(
"""
import unittest
class Test(unittest.TestCase):
pass
a = Test()
"""
)
)
try:
case = next(module["a"].infer())
except astroid.InferenceError:
return
for method in case.methods():
if method.name.startswith("assert") and "_" not in method.name:
pep8_name = _pep8(method.name)
yield pep8_name, astroid.BoundMethod(method, case)
if method.name == "assertEqual":
# nose also exports assert_equals.
yield "assert_equals", astroid.BoundMethod(method, case)
def _nose_tools_transform(node):
for method_name, method in _nose_tools_functions():
node.locals[method_name] = [method]
def _nose_tools_trivial_transform():
"""Custom transform for the nose.tools module."""
stub = _BUILDER.string_build("""__all__ = []""")
all_entries = ["ok_", "eq_"]
for pep8_name, method in _nose_tools_functions():
all_entries.append(pep8_name)
stub[pep8_name] = method
# Update the __all__ variable, since nose.tools
# does this manually with .append.
all_assign = stub["__all__"].parent
all_object = astroid.List(all_entries)
all_object.parent = all_assign
all_assign.value = all_object
return stub
astroid.register_module_extender(
astroid.MANAGER, "nose.tools.trivial", _nose_tools_trivial_transform
)
astroid.MANAGER.register_transform(
astroid.Module, _nose_tools_transform, lambda n: n.name == "nose.tools"
)
| venv/Lib/site-packages/astroid/brain/brain_nose.py | 2,282 | Get an iterator of names and bound methods.
Custom transform for the nose.tools module.
Hooks for nose library.
Copyright (c) 2015-2016, 2018, 2020 Claudiu Popa <pcmanticore@gmail.com> Copyright (c) 2016 Ceridwen <ceridwenv@gmail.com> Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com> Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER nose also exports assert_equals. Update the __all__ variable, since nose.tools does this manually with .append. | 562 | en | 0.70018 |
# Lint as: python3
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function for flattening dictionary settings."""
import numbers
from typing import Mapping, Sequence
def _flatten_args(pairs_in, args_out, prefix, visited_stack):
"""Helper function for flatten_args. See `flatten_args` below for details."""
for key, v in pairs_in:
if not isinstance(key, str):
raise ValueError('Keys must be strings. %r' % key)
flat_key = prefix + '.' + key if prefix else key
if v is None:
args_out[flat_key] = 'none'
elif isinstance(v, str):
args_out[flat_key] = v
elif isinstance(v, bool):
args_out[flat_key] = 'true' if v else 'false'
elif isinstance(v, numbers.Number):
args_out[flat_key] = str(v)
elif isinstance(v, Mapping):
if not any(v is entry for entry in visited_stack):
_flatten_args(v.items(), args_out, flat_key, visited_stack + [v])
elif isinstance(v, Sequence):
if not any(v is entry for entry in visited_stack):
_flatten_args(((str(i + 1), vv) for i, vv in enumerate(v)), args_out,
flat_key, visited_stack + [v])
else:
raise ValueError('Value for \'{}\' cannot be type: \'{}\''.format(
flat_key, str(type(v))))
def flatten_args(args_in):
"""Converts a dictionary of dictionarys and lists into a flat table.
Args:
args_in: dictionary containing a hierachy of dictionaries and lists. Leaf
values can be strings, bools, numbers..
Returns:
A flat dictionary with keys separated by '.' and string values.
"""
args_out = {}
_flatten_args(args_in.items(), args_out, None, [args_in])
return args_out
| dmlab2d/settings_helper.py | 2,207 | Helper function for flatten_args. See `flatten_args` below for details.
Converts a dictionary of dictionarys and lists into a flat table.
Args:
args_in: dictionary containing a hierachy of dictionaries and lists. Leaf
values can be strings, bools, numbers..
Returns:
A flat dictionary with keys separated by '.' and string values.
Function for flattening dictionary settings.
Lint as: python3 Copyright 2020 The DMLab2D Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 963 | en | 0.784729 |
import os
import numpy as np
import scipy.sparse as sp
import pickle
import torch
from torch.utils.data import DataLoader
from dgl.data.utils import download, _get_dgl_url, get_download_dir, extract_archive
import random
import time
import dgl
def ReadTxtNet(file_path="", undirected=True):
""" Read the txt network file.
Notations: The network is unweighted.
Parameters
----------
file_path str : path of network file
undirected bool : whether the edges are undirected
Return
------
net dict : a dict recording the connections in the graph
node2id dict : a dict mapping the nodes to their embedding indices
id2node dict : a dict mapping nodes embedding indices to the nodes
"""
if file_path == 'youtube' or file_path == 'blog':
name = file_path
dir = get_download_dir()
zip_file_path='{}/{}.zip'.format(dir, name)
download(_get_dgl_url(os.path.join('dataset/DeepWalk/', '{}.zip'.format(file_path))), path=zip_file_path)
extract_archive(zip_file_path,
'{}/{}'.format(dir, name))
file_path = "{}/{}/{}-net.txt".format(dir, name, name)
node2id = {}
id2node = {}
cid = 0
src = []
dst = []
weight = []
net = {}
with open(file_path, "r") as f:
for line in f.readlines():
tup = list(map(int, line.strip().split(" ")))
assert len(tup) in [2, 3], "The format of network file is unrecognizable."
if len(tup) == 3:
n1, n2, w = tup
elif len(tup) == 2:
n1, n2 = tup
w = 1
if n1 not in node2id:
node2id[n1] = cid
id2node[cid] = n1
cid += 1
if n2 not in node2id:
node2id[n2] = cid
id2node[cid] = n2
cid += 1
n1 = node2id[n1]
n2 = node2id[n2]
if n1 not in net:
net[n1] = {n2: w}
src.append(n1)
dst.append(n2)
weight.append(w)
elif n2 not in net[n1]:
net[n1][n2] = w
src.append(n1)
dst.append(n2)
weight.append(w)
if undirected:
if n2 not in net:
net[n2] = {n1: w}
src.append(n2)
dst.append(n1)
weight.append(w)
elif n1 not in net[n2]:
net[n2][n1] = w
src.append(n2)
dst.append(n1)
weight.append(w)
print("node num: %d" % len(net))
print("edge num: %d" % len(src))
assert max(net.keys()) == len(net) - 1, "error reading net, quit"
sm = sp.coo_matrix(
(np.array(weight), (src, dst)),
dtype=np.float32)
return net, node2id, id2node, sm
def net2graph(net_sm):
""" Transform the network to DGL graph
Return
------
G DGLGraph : graph by DGL
"""
start = time.time()
G = dgl.DGLGraph(net_sm)
end = time.time()
t = end - start
print("Building DGLGraph in %.2fs" % t)
return G
def make_undirected(G):
#G.readonly(False)
G.add_edges(G.edges()[1], G.edges()[0])
return G
def find_connected_nodes(G):
nodes = torch.nonzero(G.out_degrees()).squeeze(-1)
return nodes
class LineDataset:
def __init__(self,
net_file,
batch_size,
num_samples,
negative=5,
gpus=[0],
fast_neg=True,
ogbl_name="",
load_from_ogbl=False,
ogbn_name="",
load_from_ogbn=False,
):
""" This class has the following functions:
1. Transform the txt network file into DGL graph;
2. Generate random walk sequences for the trainer;
3. Provide the negative table if the user hopes to sample negative
nodes according to nodes' degrees;
Parameter
---------
net_file str : path of the dgl network file
walk_length int : number of nodes in a sequence
window_size int : context window size
num_walks int : number of walks for each node
batch_size int : number of node sequences in each batch
negative int : negative samples for each positve node pair
fast_neg bool : whether do negative sampling inside a batch
"""
self.batch_size = batch_size
self.negative = negative
self.num_samples = num_samples
self.num_procs = len(gpus)
self.fast_neg = fast_neg
if load_from_ogbl:
assert len(gpus) == 1, "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbl_with_name
self.G = load_from_ogbl_with_name(ogbl_name)
elif load_from_ogbn:
assert len(gpus) == 1, "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbn_with_name
self.G = load_from_ogbn_with_name(ogbn_name)
else:
self.G = dgl.load_graphs(net_file)[0][0]
self.G = make_undirected(self.G)
print("Finish reading graph")
self.num_nodes = self.G.number_of_nodes()
start = time.time()
seeds = np.random.choice(np.arange(self.G.number_of_edges()),
self.num_samples,
replace=True) # edge index
self.seeds = torch.split(torch.LongTensor(seeds),
int(np.ceil(self.num_samples / self.num_procs)),
0)
end = time.time()
t = end - start
print("generate %d samples in %.2fs" % (len(seeds), t))
# negative table for true negative sampling
self.valid_nodes = find_connected_nodes(self.G)
if not fast_neg:
node_degree = self.G.out_degrees(self.valid_nodes).numpy()
node_degree = np.power(node_degree, 0.75)
node_degree /= np.sum(node_degree)
node_degree = np.array(node_degree * 1e8, dtype=np.int)
self.neg_table = []
for idx, node in enumerate(self.valid_nodes):
self.neg_table += [node] * node_degree[idx]
self.neg_table_size = len(self.neg_table)
self.neg_table = np.array(self.neg_table, dtype=np.long)
del node_degree
def create_sampler(self, i):
""" create random walk sampler """
return EdgeSampler(self.G, self.seeds[i])
def save_mapping(self, map_file):
with open(map_file, "wb") as f:
pickle.dump(self.node2id, f)
class EdgeSampler(object):
def __init__(self, G, seeds):
self.G = G
self.seeds = seeds
self.edges = torch.cat((self.G.edges()[0].unsqueeze(0), self.G.edges()[1].unsqueeze(0)), 0).t()
def sample(self, seeds):
""" seeds torch.LongTensor : a batch of indices of edges """
return self.edges[torch.LongTensor(seeds)] | examples/pytorch/ogb/line/reading_data.py | 7,107 | Read the txt network file.
Notations: The network is unweighted.
Parameters
----------
file_path str : path of network file
undirected bool : whether the edges are undirected
Return
------
net dict : a dict recording the connections in the graph
node2id dict : a dict mapping the nodes to their embedding indices
id2node dict : a dict mapping nodes embedding indices to the nodes
This class has the following functions:
1. Transform the txt network file into DGL graph;
2. Generate random walk sequences for the trainer;
3. Provide the negative table if the user hopes to sample negative
nodes according to nodes' degrees;
Parameter
---------
net_file str : path of the dgl network file
walk_length int : number of nodes in a sequence
window_size int : context window size
num_walks int : number of walks for each node
batch_size int : number of node sequences in each batch
negative int : negative samples for each positve node pair
fast_neg bool : whether do negative sampling inside a batch
create random walk sampler
Transform the network to DGL graph
Return
------
G DGLGraph : graph by DGL
seeds torch.LongTensor : a batch of indices of edges
G.readonly(False) edge index negative table for true negative sampling | 1,229 | en | 0.673955 |
"""Tests for the DirecTV component."""
from http import HTTPStatus
from homeassistant.components.directv.const import CONF_RECEIVER_ID, DOMAIN
from homeassistant.components.ssdp import ATTR_SSDP_LOCATION
from homeassistant.const import CONF_HOST, CONTENT_TYPE_JSON
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
HOST = "127.0.0.1"
RECEIVER_ID = "028877455858"
SSDP_LOCATION = "http://127.0.0.1/"
UPNP_SERIAL = "RID-028877455858"
MOCK_CONFIG = {DOMAIN: [{CONF_HOST: HOST}]}
MOCK_SSDP_DISCOVERY_INFO = {ATTR_SSDP_LOCATION: SSDP_LOCATION}
MOCK_USER_INPUT = {CONF_HOST: HOST}
def mock_connection(aioclient_mock: AiohttpClientMocker) -> None:
"""Mock the DirecTV connection for Home Assistant."""
aioclient_mock.get(
f"http://{HOST}:8080/info/getVersion",
text=load_fixture("directv/info-get-version.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/getLocations",
text=load_fixture("directv/info-get-locations.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
params={"clientAddr": "B01234567890"},
text=load_fixture("directv/info-mode-standby.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
params={"clientAddr": "9XXXXXXXXXX9"},
status=HTTPStatus.INTERNAL_SERVER_ERROR,
text=load_fixture("directv/info-mode-error.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
text=load_fixture("directv/info-mode.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/remote/processKey",
text=load_fixture("directv/remote-process-key.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/tune",
text=load_fixture("directv/tv-tune.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "2CA17D1CD30X"},
text=load_fixture("directv/tv-get-tuned.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "A01234567890"},
text=load_fixture("directv/tv-get-tuned-music.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "C01234567890"},
status=HTTPStatus.FORBIDDEN,
text=load_fixture("directv/tv-get-tuned-restricted.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
text=load_fixture("directv/tv-get-tuned-movie.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
async def setup_integration(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
skip_entry_setup: bool = False,
setup_error: bool = False,
) -> MockConfigEntry:
"""Set up the DirecTV integration in Home Assistant."""
if setup_error:
aioclient_mock.get(
f"http://{HOST}:8080/info/getVersion",
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
else:
mock_connection(aioclient_mock)
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=RECEIVER_ID,
data={CONF_HOST: HOST, CONF_RECEIVER_ID: RECEIVER_ID},
)
entry.add_to_hass(hass)
if not skip_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
| tests/components/directv/__init__.py | 3,982 | Mock the DirecTV connection for Home Assistant.
Tests for the DirecTV component. | 80 | en | 0.707659 |
from random import shuffle
from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore
from tensorflow.keras.models import Sequential # type: ignore
import numpy as np
from utils.Recording import Recording
from utils.array_operations import split_list_by_percentage
from utils.typing import assert_type
class ConvModel(RainbowModelLeaveRecsOut):
def __init__(self, **kwargs):
"""
Convolutional model
:param kwargs:
window_size: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int
"""
# hyper params to instance vars
self.window_size = kwargs["window_size"]
self.stride_size = kwargs["stride_size"]
self.test_percentage = kwargs["test_percentage"]
self.verbose = 0
self.epochs = 10
self.batch_size = 32
# create model
self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
def __create_model(self, n_features, n_outputs):
# window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1]
print(
f"Building model for {self.window_size} timesteps (window_size) and {n_features} features"
)
model = Sequential()
model.add(
Conv1D(
filters=64,
kernel_size=3,
activation="relu",
input_shape=(self.window_size, n_features),
)
)
model.add(Conv1D(filters=64, kernel_size=3, activation="relu"))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return model
| archive/model_archive/ConvModel.py | 2,032 | Convolutional model
:param kwargs:
window_size: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int
type: ignore type: ignore hyper params to instance vars create model window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1] | 285 | en | 0.513144 |
import json
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import (
MessageDecoder, MessageEncoder,
)
from django.utils import six
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.deserialize_messages(self.request.session.get(self.session_key)), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder(separators=(',', ':'))
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, six.string_types):
return json.loads(data, cls=MessageDecoder)
return data
| django/contrib/messages/storage/session.py | 1,714 | Stores messages in the session (that is, django.contrib.sessions).
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
Stores a list of messages to the request's session. | 270 | en | 0.712072 |
from django.contrib import admin
# Register your models here.
from account.models import UserProfile
from blog.models import BlogArticles
class BlogArticlesAdmin(admin.ModelAdmin):
list_display = ("title", "author", "publish")
list_filter = ("publish", "author")
search_fields = ("title", "body")
raw_id_fields = ("author",)
date_hierarchy = "publish"
ordering = ("-publish", "author")
admin.site.register(BlogArticles, BlogArticlesAdmin)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ("user", "birth", "phone")
list_filter = ("phone",)
admin.site.register(UserProfile, UserProfileAdmin)
| blog/admin.py | 640 | Register your models here. | 26 | en | 0.957485 |
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.constants import pi
__all__ = [
# functional api
"rad2deg",
"deg2rad",
"pol2cart",
"cart2pol",
"convert_points_from_homogeneous",
"convert_points_to_homogeneous",
"convert_affinematrix_to_homography",
"convert_affinematrix_to_homography3d",
"angle_axis_to_rotation_matrix",
"angle_axis_to_quaternion",
"rotation_matrix_to_angle_axis",
"rotation_matrix_to_quaternion",
"quaternion_to_angle_axis",
"quaternion_to_rotation_matrix",
"quaternion_log_to_exp",
"quaternion_exp_to_log",
"denormalize_pixel_coordinates",
"normalize_pixel_coordinates",
"normalize_quaternion",
"denormalize_pixel_coordinates3d",
"normalize_pixel_coordinates3d",
]
def rad2deg(tensor: torch.Tensor) -> torch.Tensor:
r"""Function that converts angles from radians to degrees.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: Tensor with same shape as input.
Example:
>>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3)
>>> output = rad2deg(input)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(tensor)))
return 180. * tensor / pi.to(tensor.device).type(tensor.dtype)
def deg2rad(tensor: torch.Tensor) -> torch.Tensor:
r"""Function that converts angles from degrees to radians.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: tensor with same shape as input.
Examples::
>>> input = 360. * torch.rand(1, 3, 3)
>>> output = deg2rad(input)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(tensor)))
return tensor * pi.to(tensor.device).type(tensor.dtype) / 180.
def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Function that converts polar coordinates to cartesian coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> rho = torch.rand(1, 3, 3)
>>> phi = torch.rand(1, 3, 3)
>>> x, y = pol2cart(rho, phi)
"""
if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)):
raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format(
type(rho), type(phi)))
x = rho * torch.cos(phi)
y = rho * torch.sin(phi)
return x, y
def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function that converts cartesian coordinates to polar coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
eps (float): To avoid division by zero. Default is 1e-8
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> x = torch.rand(1, 3, 3)
>>> y = torch.rand(1, 3, 3)
>>> rho, phi = cart2pol(x, y)
"""
if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)):
raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format(
type(x), type(y)))
rho = torch.sqrt(x**2 + y**2 + eps)
phi = torch.atan2(y, x)
return rho, phi
def convert_points_from_homogeneous(
points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
r"""Function that converts points from homogeneous to Euclidean space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_from_homogeneous(input) # BxNx2
"""
if not isinstance(points, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(points)))
if len(points.shape) < 2:
raise ValueError("Input must be at least a 2D tensor. Got {}".format(
points.shape))
# we check for points at infinity
z_vec: torch.Tensor = points[..., -1:]
# set the results of division by zeror/near-zero to 1.0
# follow the convention of opencv:
# https://github.com/opencv/opencv/pull/14411/files
mask: torch.Tensor = torch.abs(z_vec) > eps
scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_(
mask, torch.tensor(1.0).to(points.device) / z_vec[mask])
return scale * points[..., :-1]
def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor:
r"""Function that converts points from Euclidean to homogeneous space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_to_homogeneous(input) # BxNx4
"""
if not isinstance(points, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(points)))
if len(points.shape) < 2:
raise ValueError("Input must be at least a 2D tensor. Got {}".format(
points.shape))
return torch.nn.functional.pad(points, [0, 1], "constant", 1.0)
def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor:
H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.)
H[..., -1, -1] += 1.0
return H
def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor:
r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3].
Examples::
>>> input = torch.rand(2, 2, 3) # Bx2x3
>>> output = convert_affinematrix_to_homography(input) # Bx3x3
"""
if not isinstance(A, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(A)))
if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)):
raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}"
.format(A.shape))
return _convert_affinematrix_to_homography_impl(A)
def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor:
r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4].
Examples::
>>> input = torch.rand(2, 3, 4) # Bx3x4
>>> output = convert_affinematrix_to_homography3d(input) # Bx4x4
"""
if not isinstance(A, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(A)))
if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)):
raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}"
.format(A.shape))
return _convert_affinematrix_to_homography_impl(A)
def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:
r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix
Args:
angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
torch.Tensor: tensor of 3x3 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 3, 3)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = angle_axis_to_rotation_matrix(input) # Nx3x3
"""
if not isinstance(angle_axis, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError(
"Input size must be a (*, 3) tensor. Got {}".format(
angle_axis.shape))
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
# we get a division by zero.
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = angle_axis / (theta + eps)
wx, wy, wz = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = cos_theta + wx * wx * (k_one - cos_theta)
r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)
r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)
r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta
r11 = cos_theta + wy * wy * (k_one - cos_theta)
r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
[r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
[k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == False).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = \
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
return rotation_matrix # Nx3x3
def rotation_matrix_to_angle_axis(
rotation_matrix: torch.Tensor) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix (torch.Tensor): rotation matrix.
Returns:
torch.Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(
"Input size must be a (*, 3, 3) tensor. Got {}".format(
rotation_matrix.shape))
quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_angle_axis(quaternion)
def rotation_matrix_to_quaternion(
rotation_matrix: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to 4d quaternion vector.
The quaternion vector has components in (x, y, z, w) format.
Args:
rotation_matrix (torch.Tensor): the rotation matrix to convert.
eps (float): small value to avoid zero division. Default: 1e-8.
Return:
torch.Tensor: the rotation in quaternion.
Shape:
- Input: :math:`(*, 3, 3)`
- Output: :math:`(*, 4)`
Example:
>>> input = torch.rand(4, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_quaternion(input) # Nx4
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(
"Input size must be a (*, 3, 3) tensor. Got {}".format(
rotation_matrix.shape))
def safe_zero_division(numerator: torch.Tensor,
denominator: torch.Tensor) -> torch.Tensor:
eps: float = torch.finfo(numerator.dtype).tiny # type: ignore
return numerator / torch.clamp(denominator, min=eps)
rotation_matrix_vec: torch.Tensor = rotation_matrix.view(
*rotation_matrix.shape[:-2], 9)
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk(
rotation_matrix_vec, chunks=9, dim=-1)
trace: torch.Tensor = m00 + m11 + m22
def trace_positive_cond():
sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw.
qw = 0.25 * sq
qx = safe_zero_division(m21 - m12, sq)
qy = safe_zero_division(m02 - m20, sq)
qz = safe_zero_division(m10 - m01, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_1():
sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx.
qw = safe_zero_division(m21 - m12, sq)
qx = 0.25 * sq
qy = safe_zero_division(m01 + m10, sq)
qz = safe_zero_division(m02 + m20, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_2():
sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy.
qw = safe_zero_division(m02 - m20, sq)
qx = safe_zero_division(m01 + m10, sq)
qy = 0.25 * sq
qz = safe_zero_division(m12 + m21, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_3():
sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz.
qw = safe_zero_division(m10 - m01, sq)
qx = safe_zero_division(m02 + m20, sq)
qy = safe_zero_division(m12 + m21, sq)
qz = 0.25 * sq
return torch.cat([qx, qy, qz, qw], dim=-1)
where_2 = torch.where(m11 > m22, cond_2(), cond_3())
where_1 = torch.where(
(m00 > m11) & (m00 > m22), cond_1(), where_2)
quaternion: torch.Tensor = torch.where(
trace > 0., trace_positive_cond(), where_1)
return quaternion
def normalize_quaternion(quaternion: torch.Tensor,
eps: float = 1e-12) -> torch.Tensor:
r"""Normalizes a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
normalized. The tensor can be of shape :math:`(*, 4)`.
eps (Optional[bool]): small value to avoid division by zero.
Default: 1e-12.
Return:
torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([1., 0., 1., 0.])
>>> normalize_quaternion(quaternion)
tensor([0.7071, 0.0000, 0.7071, 0.0000])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
return F.normalize(quaternion, p=2, dim=-1, eps=eps)
# based on:
# https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101
# https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247
def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor:
r"""Converts a quaternion to a rotation matrix.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 1., 0.])
>>> quaternion_to_rotation_matrix(quaternion)
tensor([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
# normalize the input quaternion
quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)
# unpack the normalized quaternion components
x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1)
# compute the actual conversion
tx: torch.Tensor = 2.0 * x
ty: torch.Tensor = 2.0 * y
tz: torch.Tensor = 2.0 * z
twx: torch.Tensor = tx * w
twy: torch.Tensor = ty * w
twz: torch.Tensor = tz * w
txx: torch.Tensor = tx * x
txy: torch.Tensor = ty * x
txz: torch.Tensor = tz * x
tyy: torch.Tensor = ty * y
tyz: torch.Tensor = tz * y
tzz: torch.Tensor = tz * z
one: torch.Tensor = torch.tensor(1.)
matrix: torch.Tensor = torch.stack([
one - (tyy + tzz), txy - twz, txz + twy,
txy + twz, one - (txx + tzz), tyz - twx,
txz - twy, tyz + twx, one - (txx + tyy)
], dim=-1).view(-1, 3, 3)
if len(quaternion.shape) == 1:
matrix = torch.squeeze(matrix, dim=0)
return matrix
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
"""Convert quaternion vector to angle axis of rotation.
The quaternion should be in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape Nx4 or 4. Got {}".format(
quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def quaternion_log_to_exp(quaternion: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Applies exponential map to log quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 3)`.
Return:
torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0.])
>>> quaternion_log_to_exp(quaternion)
tensor([0., 0., 0., 1.])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 3:
raise ValueError(
"Input must be a tensor of shape (*, 3). Got {}".format(
quaternion.shape))
# compute quaternion norm
norm_q: torch.Tensor = torch.norm(
quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps)
# compute scalar and vector
quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q
quaternion_scalar: torch.Tensor = torch.cos(norm_q)
# compose quaternion and return
quaternion_exp: torch.Tensor = torch.cat(
[quaternion_vector, quaternion_scalar], dim=-1)
return quaternion_exp
def quaternion_exp_to_log(quaternion: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Applies the log map to a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the quaternion log map of shape :math:`(*, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0., 1.])
>>> quaternion_exp_to_log(quaternion)
tensor([0., 0., 0.])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
# unpack quaternion vector and scalar
quaternion_vector: torch.Tensor = quaternion[..., 0:3]
quaternion_scalar: torch.Tensor = quaternion[..., 3:4]
# compute quaternion norm
norm_q: torch.Tensor = torch.norm(
quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps)
# apply log map
quaternion_log: torch.Tensor = quaternion_vector * torch.acos(
torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q
return quaternion_log
# based on:
# https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
r"""Convert an angle axis to a quaternion.
The quaternion vector has components in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 3) # Nx3
>>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4
"""
if not torch.is_tensor(angle_axis):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError(
"Input must be a tensor of shape Nx3 or 3. Got {}".format(
angle_axis.shape))
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = torch.sin(half_theta) / theta
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
return torch.cat([w, quaternion], dim=-1)
# based on:
# https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71
def normalize_pixel_coordinates(
pixel_coordinates: torch.Tensor,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 2:
raise ValueError("Input pixel_coordinates must be of shape (*, 2). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
hw: torch.Tensor = torch.stack([
torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype),
torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype)
])
factor: torch.Tensor = torch.tensor(
2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps)
return factor * pixel_coordinates - 1
def denormalize_pixel_coordinates(
pixel_coordinates: torch.Tensor,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 2:
raise ValueError("Input pixel_coordinates must be of shape (*, 2). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
hw: torch.Tensor = torch.stack([
torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps)
return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def normalize_pixel_coordinates3d(
pixel_coordinates: torch.Tensor,
depth: int,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the z-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 3:
raise ValueError("Input pixel_coordinates must be of shape (*, 3). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
dhw: torch.Tensor = torch.stack([
torch.tensor(depth), torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps)
return factor * pixel_coordinates - 1
def denormalize_pixel_coordinates3d(
pixel_coordinates: torch.Tensor,
depth: int,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the x-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 3:
raise ValueError("Input pixel_coordinates must be of shape (*, 3). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
dhw: torch.Tensor = torch.stack([
torch.tensor(depth), torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps)
return torch.tensor(1.) / factor * (pixel_coordinates + 1)
| kornia/geometry/conversions.py | 29,153 | Convert an angle axis to a quaternion.
The quaternion vector has components in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 3) # Nx3
>>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4
Convert 3d vector of axis-angle rotation to 3x3 rotation matrix
Args:
angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
torch.Tensor: tensor of 3x3 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 3, 3)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = angle_axis_to_rotation_matrix(input) # Nx3x3
Function that converts cartesian coordinates to polar coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
eps (float): To avoid division by zero. Default is 1e-8
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> x = torch.rand(1, 3, 3)
>>> y = torch.rand(1, 3, 3)
>>> rho, phi = cart2pol(x, y)
Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3].
Examples::
>>> input = torch.rand(2, 2, 3) # Bx2x3
>>> output = convert_affinematrix_to_homography(input) # Bx3x3
Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4].
Examples::
>>> input = torch.rand(2, 3, 4) # Bx3x4
>>> output = convert_affinematrix_to_homography3d(input) # Bx4x4
Function that converts points from homogeneous to Euclidean space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_from_homogeneous(input) # BxNx2
Function that converts points from Euclidean to homogeneous space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_to_homogeneous(input) # BxNx4
Function that converts angles from degrees to radians.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: tensor with same shape as input.
Examples::
>>> input = 360. * torch.rand(1, 3, 3)
>>> output = deg2rad(input)
Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates.
Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the x-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates.
Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates.
Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the z-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates.
Normalizes a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
normalized. The tensor can be of shape :math:`(*, 4)`.
eps (Optional[bool]): small value to avoid division by zero.
Default: 1e-12.
Return:
torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([1., 0., 1., 0.])
>>> normalize_quaternion(quaternion)
tensor([0.7071, 0.0000, 0.7071, 0.0000])
Function that converts polar coordinates to cartesian coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> rho = torch.rand(1, 3, 3)
>>> phi = torch.rand(1, 3, 3)
>>> x, y = pol2cart(rho, phi)
Applies the log map to a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the quaternion log map of shape :math:`(*, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0., 1.])
>>> quaternion_exp_to_log(quaternion)
tensor([0., 0., 0.])
Applies exponential map to log quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 3)`.
Return:
torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0.])
>>> quaternion_log_to_exp(quaternion)
tensor([0., 0., 0., 1.])
Convert quaternion vector to angle axis of rotation.
The quaternion should be in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3
Converts a quaternion to a rotation matrix.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 1., 0.])
>>> quaternion_to_rotation_matrix(quaternion)
tensor([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]])
Function that converts angles from radians to degrees.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: Tensor with same shape as input.
Example:
>>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3)
>>> output = rad2deg(input)
Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix (torch.Tensor): rotation matrix.
Returns:
torch.Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
Convert 3x3 rotation matrix to 4d quaternion vector.
The quaternion vector has components in (x, y, z, w) format.
Args:
rotation_matrix (torch.Tensor): the rotation matrix to convert.
eps (float): small value to avoid zero division. Default: 1e-8.
Return:
torch.Tensor: the rotation in quaternion.
Shape:
- Input: :math:`(*, 3, 3)`
- Output: :math:`(*, 4)`
Example:
>>> input = torch.rand(4, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_quaternion(input) # Nx4
functional api we check for points at infinity set the results of division by zeror/near-zero to 1.0 follow the convention of opencv: https://github.com/opencv/opencv/pull/14411/files We want to be careful to only evaluate the square root if the norm of the angle_axis vector is greater than zero. Otherwise we get a division by zero. stolen from ceres/rotation.h compute rotation matrices create mask to handle both cases noqa create output pose matrix fill output matrix with masked values Nx3x3 type: ignore sq = 4 * qw. sq = 4 * qx. sq = 4 * qy. sq = 4 * qz. based on: https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.pyL101 https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.pyL247 normalize the input quaternion unpack the normalized quaternion components compute the actual conversion unpack input and compute conversion compute quaternion norm compute scalar and vector compose quaternion and return unpack quaternion vector and scalar compute quaternion norm apply log map based on: https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.pyL138 unpack input and compute conversion based on: https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.pyL65-L71 compute normalization factor compute normalization factor compute normalization factor compute normalization factor | 9,642 | en | 0.569194 |
#!/usr/bin/env python3
# In this example, we demonstrate how Korali samples the posterior distribution
# in a bayesian problem where the likelihood is calculated by providing
# reference data points and their objective values.
# Importing the computational model
import sys
sys.path.append('./_model')
from model import *
# Creating new experiment
import korali
e = korali.Experiment()
# Setting up the reference likelihood for the Bayesian Problem
e["Problem"]["Type"] = "Bayesian/Reference"
e["Problem"]["Likelihood Model"] = "Normal"
e["Problem"]["Reference Data"] = getReferenceData()
e["Problem"]["Computational Model"] = lambda sampleData: model(sampleData, getReferencePoints())
# Configuring Nested Sampling parameters
e["Solver"]["Type"] = "Sampler/Nested"
e["Solver"]["Resampling Method"] = "Ellipse"
e["Solver"]["Number Live Points"] = 1500
# Configuring the problem's random distributions
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = 0.0
e["Distributions"][0]["Maximum"] = +5.0
# Configuring the problem's variables and their prior distributions
e["Variables"][0]["Name"] = "a"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][1]["Name"] = "b"
e["Variables"][1]["Prior Distribution"] = "Uniform 0"
e["Variables"][2]["Name"] = "[Sigma]"
e["Variables"][2]["Prior Distribution"] = "Uniform 0"
e["File Output"]["Frequency"] = 1000
e["Console Output"]["Frequency"] = 500
e["Console Output"]["Verbosity"] = 'Detailed'
e["Solver"]["Termination Criteria"]["Max Generations"] = 100000
e["Solver"]["Termination Criteria"]["Min Log Evidence Delta"] = 1e-1
# Configuring output settings
e["File Output"]["Path"] = '_korali_result_nested'
# Starting Korali's Engine and running experiment
k = korali.Engine()
k.run(e)
| examples/bayesian.inference/reference/run-nested.py | 1,836 | !/usr/bin/env python3 In this example, we demonstrate how Korali samples the posterior distribution in a bayesian problem where the likelihood is calculated by providing reference data points and their objective values. Importing the computational model Creating new experiment Setting up the reference likelihood for the Bayesian Problem Configuring Nested Sampling parameters Configuring the problem's random distributions Configuring the problem's variables and their prior distributions Configuring output settings Starting Korali's Engine and running experiment | 566 | en | 0.782448 |
import os
import cv2
from PIL import Image
import torch
import mmcv
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
class ImageNetDataset(Dataset):
def __init__(self,
data_root,
test_mode=False,**kwargs):
self.classes = list(range(1000))
normalize = T.Normalize(mean=[0.456], std=[1.0])
#normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if not test_mode:
traindir = os.path.join(data_root, 'train')
self.dataset = ImageFolder(traindir, T.Compose([
T.Grayscale(num_output_channels=1),
T.RandomResizedCrop(224, scale=(0.8, 1.0)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize,
]))
else:
valdir = os.path.join(data_root, 'val')
self.dataset = ImageFolder(valdir, T.Compose([
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
normalize,
]))
if not test_mode:
self._set_group_flag()
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
def __getitem__(self, idx):
d = dict(img=self.dataset[idx][0], label=torch.tensor([self.dataset[idx][1]], dtype=torch.long))
return d
def __len__(self):
return len(self.dataset)
| mmdet/datasets/classify/imagenet.py | 1,903 | Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | 205 | en | 0.79646 |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class ZipfTest(test_case.TestCase):
def assertBetween(self, x, minimum, maximum):
self.assertGreaterEqual(x, minimum)
self.assertLessEqual(x, maximum)
def assertAllBetween(self, a, minval, maxval, atol=1e-6):
a = self._GetNdArray(a)
minval = self._GetNdArray(minval)
maxval = self._GetNdArray(maxval)
self.assertEqual(a.shape, minval.shape)
self.assertEqual(a.shape, maxval.shape)
for idx, _ in np.ndenumerate(a):
self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)
def testZipfShape(self):
power = tf.constant([3.0] * 5)
zipf = tfd.Zipf(power=power)
self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))
self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])
self.assertEqual(zipf.event_shape, tf.TensorShape([]))
def testInvalidPower(self):
invalid_powers = [-.02, 0.5, -2., .99, 1.]
for power in invalid_powers:
with self.assertRaisesOpError("Condition x > y"):
zipf = tfd.Zipf(power=power, validate_args=True)
self.evaluate(zipf.power)
def testNanPower(self):
zipf = tfd.Zipf(power=np.nan, validate_args=False)
self.assertAllNan(self.evaluate(zipf.power))
def testValidPower_ImplicitlyConvertsToFloat32(self):
powers = [2, 10, 1.1]
for power in powers:
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(zipf.power.dtype, tf.float32)
def testEventDtype(self):
for power_dtype in [tf.float32, tf.float64]:
for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
power_dtype = tf.float32
event_dtype = tf.int32
power = tf.constant(5., dtype=power_dtype)
zipf = tfd.Zipf(power=power, dtype=event_dtype)
self.assertEqual(zipf.dtype, event_dtype)
self.assertEqual(zipf.dtype, zipf.sample(10).dtype)
self.assertEqual(zipf.dtype, zipf.sample(1).dtype)
self.assertEqual(zipf.dtype, zipf.mode().dtype)
def testInvalidEventDtype(self):
with self.assertRaisesWithPredicateMatch(
TypeError, "power.dtype .* not a supported .* type"):
power = tf.constant(5., dtype=tf.float16)
zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)
self.evaluate(zipf.sample())
def testZipfLogPmf_InvalidArgs(self):
power = tf.constant([4.0])
# Non-integer samples are rejected if validate_args is True and
# interpolate_nondiscrete is False.
non_integer_samples = [0.99, 4.5, 5.001, 1e-6, -3, -2, -1, -0., 0]
for x in non_integer_samples:
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=True)
with self.assertRaisesOpError("Condition (x == y|x > 0)"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("Condition (x == y|x > 0)"):
self.evaluate(zipf.prob(x))
def testZipfLogPmf_IntegerArgs(self):
batch_size = 9
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
# Check that log_pmf(x) of tfd.Zipf is between the values of
# stats.zipf.logpmf for ceil(x) and floor(x).
log_pmf_values = self.evaluate(log_pmf)
floor_x = np.floor(x)
ceil_x = np.ceil(x)
self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
# Check that pmf(x) of tfd.Zipf is between the values of stats.zipf.pmf for
# ceil(x) and floor(x).
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power, interpolate_nondiscrete=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllBetween(
self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllBetween(
self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfCdf_IntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
zipf = tfd.Zipf(power=power, interpolate_nondiscrete=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsInterpolated(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfMean(self):
power_v = [2.0, 3.0, 2.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.mean().shape)
self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))
def testZipfVariance(self):
power_v = [4.0, 3.0, 5.5] # var is undefined for power <= 3
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.variance().shape)
stat_vars = np.vectorize(stats.zipf.var)(power_v)
self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)
def testZipfStd(self):
power_v = [4.0, 3.5, 4.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.stddev().shape)
stat_stddevs = np.vectorize(stats.zipf.std)(power_v)
self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)
def testZipfMode(self):
power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((6,), zipf.mode().shape)
self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))
def testZipfSample(self):
power_v = 5.
n = int(500e4)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)
self.assertAllClose(
sample_values.std(), stats.zipf.std(power_v), rtol=.03)
def testZipfSample_ValidateArgs(self):
power_v = 3.
n = int(100e3)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
self.evaluate(samples)
def testZipfSampleMultidimensionalMean(self):
power_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10
zipf = tfd.Zipf(power=power_v)
n = int(100e3)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 10,), samples.shape)
self.assertEqual((n, 1, 10,), sample_values.shape)
# stats.zipf wants float64 params.
stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))
self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)
def testZipfSampleMultidimensionalStd(self):
power_v = np.array([np.arange(5, 10, dtype=np.float32)]) # 1 x 5
zipf = tfd.Zipf(power=power_v)
n = int(100e4)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 5), samples.shape)
self.assertEqual((n, 1, 5), sample_values.shape)
# stats.zipf wants float64 params.
stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))
self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)
# Test that sampling with the same seed twice gives the same results.
def testZipfSampleMultipleTimes(self):
n = 1000
seed = tfp_test_util.test_seed()
power = 1.5
zipf1 = tfd.Zipf(power=power, name="zipf1")
tf.compat.v1.set_random_seed(seed)
samples1 = self.evaluate(zipf1.sample(n, seed=seed))
zipf2 = tfd.Zipf(power=power, name="zipf2")
tf.compat.v1.set_random_seed(seed)
samples2 = self.evaluate(zipf2.sample(n, seed=seed))
self.assertAllEqual(samples1, samples2)
def testZipfSample_AvoidsInfiniteLoop(self):
zipf = tfd.Zipf(power=1.)
n = 1000
self.evaluate(zipf.sample(n, seed=tfp_test_util.test_seed()))
if __name__ == "__main__":
tf.test.main()
| tensorflow_probability/python/distributions/zipf_test.py | 15,329 | Copyright 2018 The TensorFlow Probability Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ Dependency imports pylint: disable=g-direct-tensorflow-import,g-import-not-at-top Non-integer samples are rejected if validate_args is True and interpolate_nondiscrete is False. Check that log_pmf(x) of tfd.Zipf is between the values of stats.zipf.logpmf for ceil(x) and floor(x). Check that pmf(x) of tfd.Zipf is between the values of stats.zipf.pmf for ceil(x) and floor(x). var is undefined for power <= 3 1 x 10 stats.zipf wants float64 params. 1 x 5 stats.zipf wants float64 params. Test that sampling with the same seed twice gives the same results. | 1,205 | en | 0.797023 |
"""Implements interface for OSv unikernels."""
from backend.vm import VMConfig
from os import path
from .imgedit import set_cmdline
class OSv:
cmdline_template = "--ip=eth0,{ipv4_addr},255.255.255.0 --nameserver=10.0.125.0 {extra_cmdline}"
@staticmethod
def configure(image, config, nic_name):
cmdline = OSv.cmdline_template.format(
ipv4_addr=config.ipv4_addr,
extra_cmdline=config.cmdline if config.cmdline else image.default_cmdline,
)
set_cmdline(path.join(image.root, 'system.qemu'), cmdline)
vmc = VMConfig(
name=config.name,
nic_name=nic_name,
num_cpus=4,
vdisk_path=path.join(image.root, 'system.qemu'),
vdisk_format='qcow2',
memory_size=1024000
)
return vmc
| backend/unikernel/osv/__init__.py | 827 | Implements interface for OSv unikernels. | 40 | en | 0.667147 |
# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for tensor2robot.train_eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import flags
import gin
import mock
import numpy as np
from six.moves import zip
from tensor2robot.hooks import hook_builder
from tensor2robot.models import abstract_model
from tensor2robot.preprocessors import noop_preprocessor
from tensor2robot.utils import mocks
from tensor2robot.utils import train_eval
import tensorflow.compat.v1 as tf
from tensorflow.contrib import predictor as contrib_predictor
FLAGS = flags.FLAGS
_MAX_TRAIN_STEPS = 400
_EVAL_STEPS = 40
_BATCH_SIZE = 4
_EVAL_THROTTLE_SECS = 0.0
class FakeHook(tf.train.SessionRunHook):
def __init__(self):
self._mock = mock.MagicMock()
def begin(self):
self._mock.begin()
return
@property
def mock(self):
return self._mock
class FakeHookBuilder(hook_builder.HookBuilder):
def __init__(self):
self._hook = FakeHook()
def create_hooks(self, *args, **kwargs):
del args, kwargs
return [self._hook]
@property
def hook_mock(self):
return self._hook.mock
class TrainEvalTest(tf.test.TestCase):
def _compute_total_loss(self, labels, logits):
"""Summation of the categorical hinge loss for labels and logits."""
error = 0.
for label, logit in zip(labels, logits):
# Reference tensorflow implementation can be found in keras.losses.
positive = (label * logit)
negative = ((1 - label) * logit)
error += np.maximum(0., negative - positive + 1.)
return error
def test_train_eval_model(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
mock_input_generator_eval = mocks.MockInputGenerator(batch_size=1)
fake_hook_builder = FakeHookBuilder()
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
input_generator_eval=mock_input_generator_eval,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir,
train_hook_builders=[fake_hook_builder],
eval_hook_builders=[fake_hook_builder],
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
self.assertTrue(fake_hook_builder.hook_mock.begin.called)
# We ensure that both numpy and tf_example inference models are exported.
best_exporter_numpy_path = os.path.join(model_dir, 'export',
'best_exporter_numpy', '*')
numpy_model_paths = sorted(tf.io.gfile.glob(best_exporter_numpy_path))
# There should be at least 1 exported model.
self.assertGreater(len(numpy_model_paths), 0)
# This mock network converges nicely which is why we have several best
# models, by default we keep the best 5 and the latest one is always the
# best.
self.assertLessEqual(len(numpy_model_paths), 5)
best_exporter_tf_example_path = os.path.join(
model_dir, 'export', 'best_exporter_tf_example', '*')
tf_example_model_paths = sorted(
tf.io.gfile.glob(best_exporter_tf_example_path))
# There should be at least 1 exported model.
self.assertGreater(len(tf_example_model_paths), 0)
# This mock network converges nicely which is why we have several best
# models, by default we keep the best 5 and the latest one is always the
# best.
self.assertLessEqual(len(tf_example_model_paths), 5)
# We test both saved models within one test since the bulk of the time
# is spent training the model in the firstplace.
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
prediction_ref = estimator_predict.predict(
input_fn=mock_input_generator_eval.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL))
# Now we can load our exported estimator graph with the numpy feed_dict
# interface, there are no dependencies on the model_fn or preprocessor
# anymore.
# We load the latest model since it had the best eval performance.
numpy_predictor_fn = contrib_predictor.from_saved_model(
numpy_model_paths[-1])
features, labels = mock_input_generator_eval.create_numpy_data()
ref_error = self._compute_total_loss(
labels, [val['logit'].flatten() for val in prediction_ref])
numpy_predictions = []
for feature, label in zip(features, labels):
predicted = numpy_predictor_fn({'x': feature.reshape(
1, -1)})['logit'].flatten()
numpy_predictions.append(predicted)
# This ensures that we actually achieve near-perfect classification.
if label > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
numpy_error = self._compute_total_loss(labels, numpy_predictions)
# Now we can load our exported estimator graph with the tf_example feed_dict
# interface, there are no dependencies on the model_fn or preprocessor
# anymore.
# We load the latest model since it had the best eval performance.
tf_example_predictor_fn = contrib_predictor.from_saved_model(
tf_example_model_paths[-1])
tf_example_predictions = []
for feature, label in zip(features, labels):
# We have to create our serialized tf.Example proto.
example = tf.train.Example()
example.features.feature['measured_position'].float_list.value.extend(
feature)
feed_dict = {
'input_example_tensor':
np.array(example.SerializeToString()).reshape(1,)
}
predicted = tf_example_predictor_fn(feed_dict)['logit'].flatten()
tf_example_predictions.append(predicted)
# This ensures that we actually achieve perfect classification.
if label > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
tf_example_error = self._compute_total_loss(labels, tf_example_predictions)
np.testing.assert_almost_equal(tf_example_error, numpy_error)
# The exported saved models both have to have the same performance and since
# we train on eval on the same fixed dataset the latest and greatest
# model error should also be the best.
np.testing.assert_almost_equal(ref_error, tf_example_error, decimal=3)
def test_init_from_checkpoint_global_step(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir,
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
# The model trains for 200 steps and saves a checkpoint each 100 steps and
# keeps 3 -> len == 3.
self.assertLen(tf.io.gfile.glob(os.path.join(model_dir, 'model*.meta')), 3)
# The continuous training has its own directory.
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(
abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
train_eval.train_eval_model(
t2r_model=continue_mock_t2r_model,
input_generator_train=continue_mock_input_generator_train,
model_dir=continue_model_dir,
max_train_steps=_MAX_TRAIN_STEPS + 100,
eval_steps=_EVAL_STEPS,
eval_throttle_secs=_EVAL_THROTTLE_SECS,
create_exporters_fn=train_eval.create_default_exporters)
# If the model was successful restored including the global step, only 1
# additional checkpoint to the init one should be created -> len == 2.
self.assertLen(
tf.io.gfile.glob(os.path.join(continue_model_dir, 'model*.meta')), 2)
def test_init_from_checkpoint_use_avg_model_params_and_weights(self):
"""Tests that a simple model trains and exported models are valid."""
gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', 100)
gin.bind_parameter('tf.estimator.RunConfig.keep_checkpoint_max', 3)
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
use_avg_model_params=True)
mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
mock_input_generator = mocks.MockInputGenerator(batch_size=1)
mock_input_generator.set_specification_from_model(
mock_t2r_model, tf.estimator.ModeKeys.TRAIN)
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator_train,
max_train_steps=_MAX_TRAIN_STEPS,
model_dir=model_dir)
init_checkpoint = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(model_dir))
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
initial_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
# pylint: disable=g-complex-comprehension
initial_predictions = [
prediction['logit'] for prediction in list(
initial_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
# The continuous training has its own directory.
continue_model_dir = self.create_tempdir().full_path
init_from_checkpoint_fn = functools.partial(
abstract_model.default_init_from_checkpoint_fn, checkpoint=model_dir)
continue_mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
init_from_checkpoint_fn=init_from_checkpoint_fn)
continue_mock_input_generator_train = mocks.MockInputGenerator(
batch_size=_BATCH_SIZE)
# Re-initialize the model and train for one step, basically the same
# performance as the original model.
train_eval.train_eval_model(
t2r_model=continue_mock_t2r_model,
input_generator_train=continue_mock_input_generator_train,
model_dir=continue_model_dir,
max_train_steps=_MAX_TRAIN_STEPS)
continue_checkpoint = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(continue_model_dir))
for tensor_name, _ in tf.train.list_variables(model_dir):
if 'ExponentialMovingAverage' in tensor_name:
# These values are replaced by the swapping saver when using the
# use_avg_model_params.
continue
if 'Adam' in tensor_name:
# The adam optimizer values are not required.
continue
if 'global_step' in tensor_name:
# The global step will be incremented by 1.
continue
self.assertAllClose(
init_checkpoint.get_tensor(tensor_name),
continue_checkpoint.get_tensor(tensor_name),
atol=1e-3)
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
continue_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=continue_model_dir))
continue_predictions = [
prediction['logit'] for prediction in list(
continue_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
self.assertTrue(
np.allclose(initial_predictions, continue_predictions, atol=1e-1))
# A randomly initialized model estimator with all the parameters.
random_estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn)
random_predictions = [
prediction['logit'] for prediction in list(
random_estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL)))
]
self.assertFalse(
np.allclose(initial_predictions, random_predictions, atol=1e-2))
if __name__ == '__main__':
tf.test.main()
| utils/train_eval_test.py | 14,062 | Summation of the categorical hinge loss for labels and logits.
Tests that a simple model trains and exported models are valid.
Tests that a simple model trains and exported models are valid.
Tests that a simple model trains and exported models are valid.
Tests for tensor2robot.train_eval.
coding=utf-8 Copyright 2019 The Tensor2Robot Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Lint as: python2, python3 Reference tensorflow implementation can be found in keras.losses. We ensure that both numpy and tf_example inference models are exported. There should be at least 1 exported model. This mock network converges nicely which is why we have several best models, by default we keep the best 5 and the latest one is always the best. There should be at least 1 exported model. This mock network converges nicely which is why we have several best models, by default we keep the best 5 and the latest one is always the best. We test both saved models within one test since the bulk of the time is spent training the model in the firstplace. Verify that the serving estimator does exactly the same as the normal estimator with all the parameters. Now we can load our exported estimator graph with the numpy feed_dict interface, there are no dependencies on the model_fn or preprocessor anymore. We load the latest model since it had the best eval performance. This ensures that we actually achieve near-perfect classification. Now we can load our exported estimator graph with the tf_example feed_dict interface, there are no dependencies on the model_fn or preprocessor anymore. We load the latest model since it had the best eval performance. We have to create our serialized tf.Example proto. This ensures that we actually achieve perfect classification. The exported saved models both have to have the same performance and since we train on eval on the same fixed dataset the latest and greatest model error should also be the best. The model trains for 200 steps and saves a checkpoint each 100 steps and keeps 3 -> len == 3. The continuous training has its own directory. If the model was successful restored including the global step, only 1 additional checkpoint to the init one should be created -> len == 2. Verify that the serving estimator does exactly the same as the normal estimator with all the parameters. pylint: disable=g-complex-comprehension The continuous training has its own directory. Re-initialize the model and train for one step, basically the same performance as the original model. These values are replaced by the swapping saver when using the use_avg_model_params. The adam optimizer values are not required. The global step will be incremented by 1. Verify that the serving estimator does exactly the same as the normal estimator with all the parameters. A randomly initialized model estimator with all the parameters. | 3,335 | en | 0.913462 |
"""Support for Agent camera streaming."""
from datetime import timedelta
import logging
from agent import AgentError
from homeassistant.components.camera import SUPPORT_ON_OFF
from homeassistant.components.mjpeg.camera import (
CONF_MJPEG_URL,
CONF_STILL_IMAGE_URL,
MjpegCamera,
filter_urllib3_logging,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers import entity_platform
from .const import (
ATTRIBUTION,
CAMERA_SCAN_INTERVAL_SECS,
CONNECTION,
DOMAIN as AGENT_DOMAIN,
)
SCAN_INTERVAL = timedelta(seconds=CAMERA_SCAN_INTERVAL_SECS)
_LOGGER = logging.getLogger(__name__)
_DEV_EN_ALT = "enable_alerts"
_DEV_DS_ALT = "disable_alerts"
_DEV_EN_REC = "start_recording"
_DEV_DS_REC = "stop_recording"
_DEV_SNAP = "snapshot"
CAMERA_SERVICES = {
_DEV_EN_ALT: "async_enable_alerts",
_DEV_DS_ALT: "async_disable_alerts",
_DEV_EN_REC: "async_start_recording",
_DEV_DS_REC: "async_stop_recording",
_DEV_SNAP: "async_snapshot",
}
async def async_setup_entry(
hass, config_entry, async_add_entities, discovery_info=None
):
"""Set up the Agent cameras."""
filter_urllib3_logging()
cameras = []
server = hass.data[AGENT_DOMAIN][config_entry.entry_id][CONNECTION]
if not server.devices:
_LOGGER.warning("Could not fetch cameras from Agent server")
return
for device in server.devices:
if device.typeID == 2:
camera = AgentCamera(device)
cameras.append(camera)
async_add_entities(cameras)
platform = entity_platform.current_platform.get()
for service, method in CAMERA_SERVICES.items():
platform.async_register_entity_service(service, {}, method)
class AgentCamera(MjpegCamera):
"""Representation of an Agent Device Stream."""
def __init__(self, device):
"""Initialize as a subclass of MjpegCamera."""
self._servername = device.client.name
self.server_url = device.client._server_url
device_info = {
CONF_NAME: device.name,
CONF_MJPEG_URL: f"{self.server_url}{device.mjpeg_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}",
CONF_STILL_IMAGE_URL: f"{self.server_url}{device.still_image_url}&size={device.mjpegStreamWidth}x{device.mjpegStreamHeight}",
}
self.device = device
self._removed = False
self._name = f"{self._servername} {device.name}"
self._unique_id = f"{device._client.unique}_{device.typeID}_{device.id}"
super().__init__(device_info)
@property
def device_info(self):
"""Return the device info for adding the entity to the agent object."""
return {
"identifiers": {(AGENT_DOMAIN, self._unique_id)},
"name": self._name,
"manufacturer": "Agent",
"model": "Camera",
"sw_version": self.device.client.version,
}
async def async_update(self):
"""Update our state from the Agent API."""
try:
await self.device.update()
if self._removed:
_LOGGER.debug("%s reacquired", self._name)
self._removed = False
except AgentError:
if self.device.client.is_available: # server still available - camera error
if not self._removed:
_LOGGER.error("%s lost", self._name)
self._removed = True
@property
def extra_state_attributes(self):
"""Return the Agent DVR camera state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"editable": False,
"enabled": self.is_on,
"connected": self.connected,
"detected": self.is_detected,
"alerted": self.is_alerted,
"has_ptz": self.device.has_ptz,
"alerts_enabled": self.device.alerts_active,
}
@property
def should_poll(self) -> bool:
"""Update the state periodically."""
return True
@property
def is_recording(self) -> bool:
"""Return whether the monitor is recording."""
return self.device.recording
@property
def is_alerted(self) -> bool:
"""Return whether the monitor has alerted."""
return self.device.alerted
@property
def is_detected(self) -> bool:
"""Return whether the monitor has alerted."""
return self.device.detected
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.device.client.is_available
@property
def connected(self) -> bool:
"""Return True if entity is connected."""
return self.device.connected
@property
def supported_features(self) -> int:
"""Return supported features."""
return SUPPORT_ON_OFF
@property
def is_on(self) -> bool:
"""Return true if on."""
return self.device.online
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
if self.is_on:
return "mdi:camcorder"
return "mdi:camcorder-off"
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self.device.detector_active
@property
def unique_id(self) -> str:
"""Return a unique identifier for this agent object."""
return self._unique_id
async def async_enable_alerts(self):
"""Enable alerts."""
await self.device.alerts_on()
async def async_disable_alerts(self):
"""Disable alerts."""
await self.device.alerts_off()
async def async_enable_motion_detection(self):
"""Enable motion detection."""
await self.device.detector_on()
async def async_disable_motion_detection(self):
"""Disable motion detection."""
await self.device.detector_off()
async def async_start_recording(self):
"""Start recording."""
await self.device.record()
async def async_stop_recording(self):
"""Stop recording."""
await self.device.record_stop()
async def async_turn_on(self):
"""Enable the camera."""
await self.device.enable()
async def async_snapshot(self):
"""Take a snapshot."""
await self.device.snapshot()
async def async_turn_off(self):
"""Disable the camera."""
await self.device.disable()
| homeassistant/components/agent_dvr/camera.py | 6,497 | Representation of an Agent Device Stream.
Initialize as a subclass of MjpegCamera.
Return True if entity is available.
Return True if entity is connected.
Return the device info for adding the entity to the agent object.
Return the Agent DVR camera state attributes.
Return the icon to use in the frontend, if any.
Return whether the monitor has alerted.
Return whether the monitor has alerted.
Return true if on.
Return whether the monitor is recording.
Return the camera motion detection status.
Update the state periodically.
Return supported features.
Return a unique identifier for this agent object.
Support for Agent camera streaming.
server still available - camera error | 681 | en | 0.694886 |
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pybuilder.errors import BuildFailedException
from pybuilder.plugins.python.test_plugin_helper import ReportsProcessor
from test_utils import Mock, patch
class ReportsProcessorTests(unittest.TestCase):
def setUp(self):
self.reports_processor = ReportsProcessor(Mock(), Mock())
total_time = Mock()
total_time.get_millis.return_value = 42
self.reports_processor.process_reports([], total_time)
def test_should_raise_exception_when_not_all_tests_pass(self):
self.reports_processor.tests_failed = 1
self.assertRaises(BuildFailedException, self.reports_processor.write_report_and_ensure_all_tests_passed)
def test_should_not_raise_exception_when_all_tests_pass(self):
self.reports_processor.tests_failed = 0
self.reports_processor.write_report_and_ensure_all_tests_passed()
@patch("pybuilder.plugins.python.test_plugin_helper.render_report", return_value='rendered-report')
def test_should_write_report(self, render_report):
self.reports_processor.write_report_and_ensure_all_tests_passed()
self.reports_processor.project.write_report.assert_called_with("integrationtest.json", 'rendered-report')
def test_should_parse_reports(self):
reports = [
{'test': 'name1', 'test_file':
'file1', 'success': False, 'time': 1},
{'test': 'name2', 'test_file':
'file2', 'success': False, 'time': 2},
{'test': 'name3', 'test_file':
'file3', 'success': True, 'time': 3},
{'test': 'name4', 'test_file': 'file4', 'success': True, 'time': 4}
]
self.reports_processor.process_reports(reports, Mock())
self.assertEqual(self.reports_processor.tests_failed, 2)
self.assertEqual(self.reports_processor.tests_executed, 4)
def test_should_create_test_report_with_attributes(self):
mock_time = Mock()
mock_time.get_millis.return_value = 42
self.reports_processor.process_reports([], mock_time)
self.reports_processor.tests_failed = 4
self.reports_processor.tests_executed = 42
self.reports_processor.reports = ['a', 'b', 'c']
self.assertEqual(self.reports_processor.test_report,
{
'num_of_tests': 42,
'success': False,
'tests': ['a', 'b', 'c'],
'tests_failed': 4,
'time': 42
}
)
| src/unittest/python/plugins/python/test_plugin_helper_tests.py | 3,270 | -*- coding: utf-8 -*- This file is part of PyBuilder Copyright 2011-2015 PyBuilder Team Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 631 | en | 0.87201 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SqlPoolsV3Args', 'SqlPoolsV3']
@pulumi.input_type
class SqlPoolsV3Args:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SqlPoolsV3 resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if location is not None:
pulumi.set(__self__, "location", location)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if sql_pool_name is not None:
pulumi.set(__self__, "sql_pool_name", sql_pool_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
The sql pool SKU. The list of SKUs may vary by region and support offer.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="sqlPoolName")
def sql_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the sql pool.
"""
return pulumi.get(self, "sql_pool_name")
@sql_pool_name.setter
def sql_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_pool_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class SqlPoolsV3(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A sql pool resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlPoolsV3Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A sql pool resource.
:param str resource_name: The name of the resource.
:param SqlPoolsV3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlPoolsV3Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["sql_pool_name"] = sql_pool_name
__props__.__dict__["tags"] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["current_service_objective_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["requested_service_objective_name"] = None
__props__.__dict__["sql_pool_guid"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:synapse/v20200401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210401preview:SqlPoolsV3")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlPoolsV3, __self__).__init__(
'azure-native:synapse/v20200401preview:SqlPoolsV3',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlPoolsV3':
"""
Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__["current_service_objective_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["requested_service_objective_name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["sql_pool_guid"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SqlPoolsV3(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="currentServiceObjectiveName")
def current_service_objective_name(self) -> pulumi.Output[str]:
"""
The current service level objective name of the sql pool.
"""
return pulumi.get(self, "current_service_objective_name")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of SqlPool.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="requestedServiceObjectiveName")
def requested_service_objective_name(self) -> pulumi.Output[str]:
"""
The requested service level objective name of the sql pool.
"""
return pulumi.get(self, "requested_service_objective_name")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sql pool SKU. The list of SKUs may vary by region and support offer.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sqlPoolGuid")
def sql_pool_guid(self) -> pulumi.Output[str]:
"""
The Guid of the sql pool.
"""
return pulumi.get(self, "sql_pool_guid")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the sql pool.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
SystemData of SqlPool.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | 13,465 | The set of arguments for constructing a SqlPoolsV3 resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
A sql pool resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace.
A sql pool resource.
:param str resource_name: The name of the resource.
:param SqlPoolsV3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
The current service level objective name of the sql pool.
Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
Kind of SqlPool.
The geo-location where the resource lives
The geo-location where the resource lives
The name of the resource
The requested service level objective name of the sql pool.
The name of the resource group. The name is case insensitive.
The sql pool SKU. The list of SKUs may vary by region and support offer.
The sql pool SKU. The list of SKUs may vary by region and support offer.
The Guid of the sql pool.
The name of the sql pool.
The status of the sql pool.
SystemData of SqlPool.
Resource tags.
Resource tags.
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
The name of the workspace.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 2,663 | en | 0.631135 |
import cv2
from PIL import ImageGrab
import numpy as np
def main():
while True:
# bbox specifies specific region (bbox= x,y,width,height)
img = ImageGrab.grab(bbox=(0, 40, 1075, 640))
vanilla = img_np = np.array(img)
img_np = np.array(img)
gray = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
_, binary = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(
binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
image = cv2.drawContours(img_np, contours, -1, (0, 255, 0), 2)
cv2.imshow("test", image)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
print("test")
break
else:
cv2.waitKey(1)
# cv2.waitKey(0)
if __name__ == "__main__":
main()
| main.py | 857 | bbox specifies specific region (bbox= x,y,width,height) cv2.waitKey(0) | 70 | en | 0.329442 |
from timetableparser import TimeTableParser
from timetablewriter import TimeTableWriter
parser = TimeTableParser(False)
writer = TimeTableWriter(True)
# parser.decrypt_pdf("test/a.pdf", "out_a.pdf")
# parser.decrypt_pdf("test/b.pdf", "out_b.pdf")
csv_file_a = "test/output_week_a.csv"
csv_file_b = "test/output_week_b.csv"
# parser.extract_table_from_pdf("out_a.pdf", csv_file_a)
# parser.extract_table_from_pdf("out_b.pdf", csv_file_b)
writer.write_excel("Scott", parser.parse_csv(csv_file_a), parser.parse_csv(csv_file_b), "test/output.xlsx")
print("output file is `test/output.xlsx`")
| Pdf2TimeTable/test.py | 589 | parser.decrypt_pdf("test/a.pdf", "out_a.pdf") parser.decrypt_pdf("test/b.pdf", "out_b.pdf") parser.extract_table_from_pdf("out_a.pdf", csv_file_a) parser.extract_table_from_pdf("out_b.pdf", csv_file_b) | 201 | de | 0.109817 |
# -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Objectif rejoindre."""
from math import fabs, radians, sqrt
from vector import Vector
from primaires.vehicule.vecteur import Vecteur
from secondaires.navigation.constantes import *
from secondaires.navigation.equipage.objectif import Objectif
class Rejoindre(Objectif):
"""Objectif rejoindre.
Cet objectif demande à un équipage de rejoindre un point précisé
en coordonnées. Le point indiqué doit être statique (il existe un
objectif particulier pour les points mobiles, comme les navires, qui
intègrent leur propre calcul).
Cet objectif est responsable de trouver un chemin entre le point
actuel et le point visé. Cela inclut le choix de chemins
détournés si le chemin direct ne peut être pris avec des calculs qui
peuvent être assez complexes pour déterminer la vitesse et direction
des chemins intermédiaires.
"""
def __init__(self, equipage, x=None, y=None, vitesse=1):
Objectif.__init__(self, equipage, x, y, vitesse)
self.x = x
self.y = y
self.vitesse = vitesse
self.ancienne_vitesse = None
self.vitesse_optimale = vitesse
self.autre_direction = None
self.autoriser_vitesse_sup = True
self.doit_reculer = ()
def afficher(self):
"""Méthode à redéfinir retournant l'affichage de l'objectif."""
if self.doit_reculer:
return "Doit reculer"
navire = self.navire
distance = self.get_distance()
direction = (distance.direction + 90) % 360
msg_dist = get_nom_distance(distance)
return "Cap sur {}° ({}), à {}".format(round(direction),
distance.nom_direction, msg_dist)
def get_distance(self):
"""Retourne la distance (Vecteur) entre le navire et la destination.
Cette méthode crée un vecteur (class Vecteur définie dans
le module primaire vehicule) qui représente la distance entre
la position du navire et la destination.
"""
navire = self.navire
position = navire.opt_position
o_x = position.x
o_y = position.y
d_x = self.x
d_y = self.y
distance = Vecteur(d_x - o_x, d_y - o_y, 0)
return distance
def trouver_distance_min(self, cible):
"""Trouve la distance minimum.
Cette distance est fonction de la distance minimum entre
une salle du navire d'origine et une salle du navire cible.
"""
navire = self.navire
etendue = navire.etendue
altitude = etendue.altitude
salle_cible = None
distance = None
for salle in navire.salles.values():
if salle.coords.z != altitude:
continue
x, y = salle.coords.x, salle.coords.y
for t_salle in cible.salles.values():
if t_salle.coords.z != altitude:
continue
t_x, t_y = t_salle.coords.x, t_salle.coords.y
t_distance = sqrt((t_x - x) ** 2 + (t_y - y) ** 2)
if distance is None or t_distance < distance:
distance = t_distance
salle_cible = t_salle
return distance, salle_cible
def transmettre_controles(self):
"""Donne les contrôles indiqués (vitesse et direction)."""
equipage = self.equipage
navire = self.navire
distance = self.get_distance()
if self.autre_direction:
direction = round(self.autre_direction)
else:
direction = round(distance.direction)
# Crée ou modifie les contrôles
if equipage.controles.get("direction"):
equipage.controles["direction"].direction = direction
else:
equipage.controler("direction", direction)
vitesse = self.vitesse
if equipage.controles.get("vitesse"):
ancienne_vitesse = equipage.controles["vitesse"].vitesse
equipage.controles["vitesse"].vitesse = vitesse
if vitesse != ancienne_vitesse:
equipage.controles["vitesse"].calculer_vitesse()
else:
equipage.controler("vitesse", self.vitesse,
self.autoriser_vitesse_sup)
def trouver_cap(self):
"""Trouve le cap, tenant compte des obstacles."""
equipage = self.equipage
navire = self.navire
# Si on doit reculer, vérifie que c'est toujours vrai
if self.doit_reculer:
x, y = self.doit_reculer
p_x = navire.position.x
p_y = navire.position.y
max_distance = navire.get_max_distance_au_centre()
if sqrt((x - p_x) ** 2 + (y - p_y) ** 2) > max_distance + 1:
self.doit_reculer = ()
else:
return
# On examine les points listés par la vigie
# Si il n'y a pas de vigie, pas le moyen de les éviter
tries = equipage.vigie_tries
# Si le dictionnaire est vide, ne fait rien
if not tries:
self.autre_direction = None
self.transmettre_controles()
return
# On n'examine que les obstacles
obstacles = tries.get("obstacle", {}).copy()
obstacles.update(tries.get("salle", {}))
obstacles.update(tries.get("sallenavire", {}))
# On s'intéresse seulement aux obstacles qui ont un angle
# dangereux, entre -90° et 90°
dangereux = obstacles.copy()
for angle in obstacles.keys():
if angle < -90 or angle > 90:
del dangereux[angle]
# Si il n'y a aucun obstacle, ne continue pas
if not dangereux:
self.ancienne_vitesse = None
self.autre_direction = None
self.transmettre_controles()
return
# Maintenant on cherche la distance la plus courte
min_angle = None
min_distance = None
for angle, (vecteur, point) in dangereux.items():
if min_distance is None or vecteur.mag < min_distance:
min_distance = vecteur.mag
min_angle = angle
# En fonction de la distance, modifie la vitesse
if -45 <= min_angle <= 45:
if min_distance <= 2:
self.vitesse = 0.05
elif min_distance < 10:
self.vitesse = 0.2
elif min_distance < 25:
self.vitesse = 0.6
# Cherche ensuite le meilleur cap
# On cherche le meilleur cap possible (c'est-à-dire le plus long)
distance = 30
angles = [i * 5 for i in range(0, 35)]
for i in range(1, 35):
angles.append(i * -5)
# Si on est pas exactement dans la bonne direction pour rejoindre
# le point (x, y), on envisage de changer de cap
o_distance = self.get_distance()
if o_distance.norme < 30:
distance = o_distance.norme
relative = o_distance.direction - navire.direction.direction
angles = sorted(angles, key=lambda a: fabs(a - relative))
position = navire.opt_position
while distance > 0:
for angle in angles:
vecteur = navire.opt_direction
vecteur.mag = distance
vecteur.around_z(radians(angle))
if not navire.controller_collision(vecteur, collision=False,
marge=0.8):
if angle != 0:
self.info("Cap libre sur {}°".format(angle))
self.autre_direction = round((
navire.direction.direction + angle) % 360)
if fabs(angle) > 30:
self.vitesse = 0
self.transmettre_controles()
return
distance -= 5
# On ne change pas de cap mais peut-être change-t-on de vitesse
self.transmettre_controles()
def creer(self):
"""L'objectif est créé.
On crée les contrôles associéss pour atteindre l'objectif
visé, à savoir, rejoindre le point (x, y), en essayant
de trouver les obstacles corresondant et un cap de remplacement
si nécessaire.
"""
equipage = self.equipage
commandant = self.commandant
if commandant is None:
return
self.trouver_cap()
def verifier(self, prioritaire):
"""Vérifie que l'objectif est toujours valide.
Dans cette méthode, on vérifie :
Qu'il n'y a aucun obstacle sur la trajectoire assignée
"""
equipage = self.equipage
navire = self.navire
commandant = self.commandant
if commandant is None:
return
if prioritaire:
self.trouver_cap()
def reagir_collision(self, salle, contre):
"""Réagit à une collision."""
if not self.doit_reculer:
commandant = self.commandant
if commandant is None:
return
personnage = commandant.personnage
navire = self.navire
equipage = self.equipage
p_x = navire.position.x
p_y = navire.position.y
self.warning("Essaye de faire reculer le navire")
self.doit_reculer = (p_x, p_y)
# Supprime le contrôle de cap, si il existe
equipage.retirer_controle("direction")
if navire.gouvernail:
equipage.demander("relacher_gouvernail",
personnage=personnage)
# Demande de plier les voiles si besoin
if any(v.hissee for v in navire.voiles):
equipage.demander("plier_voiles", None, personnage=personnage)
# Demande de ramer en marche arrière
rames = navire.rames
if rames:
# On doit centrer les rames si besoin
if any(r.orientation != 0 for r in rames):
equipage.demander("ramer", "centre",
personnage=personnage)
equipage.demander("ramer", "arrière", personnage=personnage)
| src/secondaires/navigation/equipage/objectifs/rejoindre.py | 11,757 | Objectif rejoindre.
Cet objectif demande à un équipage de rejoindre un point précisé
en coordonnées. Le point indiqué doit être statique (il existe un
objectif particulier pour les points mobiles, comme les navires, qui
intègrent leur propre calcul).
Cet objectif est responsable de trouver un chemin entre le point
actuel et le point visé. Cela inclut le choix de chemins
détournés si le chemin direct ne peut être pris avec des calculs qui
peuvent être assez complexes pour déterminer la vitesse et direction
des chemins intermédiaires.
Méthode à redéfinir retournant l'affichage de l'objectif.
L'objectif est créé.
On crée les contrôles associéss pour atteindre l'objectif
visé, à savoir, rejoindre le point (x, y), en essayant
de trouver les obstacles corresondant et un cap de remplacement
si nécessaire.
Retourne la distance (Vecteur) entre le navire et la destination.
Cette méthode crée un vecteur (class Vecteur définie dans
le module primaire vehicule) qui représente la distance entre
la position du navire et la destination.
Réagit à une collision.
Donne les contrôles indiqués (vitesse et direction).
Trouve le cap, tenant compte des obstacles.
Trouve la distance minimum.
Cette distance est fonction de la distance minimum entre
une salle du navire d'origine et une salle du navire cible.
Vérifie que l'objectif est toujours valide.
Dans cette méthode, on vérifie :
Qu'il n'y a aucun obstacle sur la trajectoire assignée
Objectif rejoindre.
-*-coding:Utf-8 -* Copyright (c) 2014 LE GOFF Vincent All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Crée ou modifie les contrôles Si on doit reculer, vérifie que c'est toujours vrai On examine les points listés par la vigie Si il n'y a pas de vigie, pas le moyen de les éviter Si le dictionnaire est vide, ne fait rien On n'examine que les obstacles On s'intéresse seulement aux obstacles qui ont un angle dangereux, entre -90° et 90° Si il n'y a aucun obstacle, ne continue pas Maintenant on cherche la distance la plus courte En fonction de la distance, modifie la vitesse Cherche ensuite le meilleur cap On cherche le meilleur cap possible (c'est-à-dire le plus long) Si on est pas exactement dans la bonne direction pour rejoindre le point (x, y), on envisage de changer de cap On ne change pas de cap mais peut-être change-t-on de vitesse Supprime le contrôle de cap, si il existe Demande de plier les voiles si besoin Demande de ramer en marche arrière On doit centrer les rames si besoin | 3,866 | fr | 0.849197 |
"""
724. Minimum Partition
https://www.lintcode.com/problem/minimum-partition/description
01背包
算法班2020 C27 01背包变形
第1种dp定义
dp[i][j]: considering previous i items to fill <=j, what the maximum value
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - nums[i - 1]] + nums[i - 1])
dp[0][0] = 0
dp[i][0] = 0
answer
max(dp[n])
2d array
time limit exceeded
"""
class Solution:
"""
@param nums: the given array
@return: the minimum difference between their sums
"""
def findMin(self, nums):
# write your code here
if not nums:
return 0
n = len(nums)
total_sum = sum(nums)
target = total_sum // 2
dp = [[0] * (target + 1) for _ in range(2)]
now, old = 0, 0
for i in range(1, n + 1):
old = now
now = 1 - now
for j in range(0, target + 1):
dp[now][j] = dp[old][j]
if j >= nums[i - 1]:
dp[now][j] = max(dp[now][j], dp[old][j - nums[i - 1]] + nums[i - 1])
return total_sum - 2 * max(dp[now])
s = Solution()
nums = [987,523,979,847,734,706,452,903,702,332,713,181,991,843,879,505,718,694,18,303,795,521,696,388,866,908,350,528,445,780,864,295,257,337,704,648,495,949,39,33,606,553,618,191,854,405,715,413,472,185,216,489,212,199,162,462,929,191,429,726,902,9,579,403,370,435,871,160,197,884,619,716,182,7,906,974,679,531,852,158,861,174,445,701,871,557,942,798,921,389,450,485,901,179,515,401,117,451,731,828,685,20,50,673,891,232,30,385,511,338,375,118,81,392,296,546,903,59,580,620,268,422,597,876,333,766,158,295,443,204,434,357,632,592,543,341,434,58,525,683,338,165,332,51,152,191,378,63,10,475,951,469,622,811,296,415,282,547,994,358,134,195,888,75,195,805,908,673,867,346,935,318,603,507,45,209,54,641,515,867,881,880,290,781,452,808,775,998,731,908,451,592,608,87,1000,812,30,673,393,380,241,135,421,144,954,64,747,502,633]
print(s.findMin(nums)) | lintcode/724.1.py | 1,962 | @param nums: the given array
@return: the minimum difference between their sums
724. Minimum Partition
https://www.lintcode.com/problem/minimum-partition/description
01背包
算法班2020 C27 01背包变形
第1种dp定义
dp[i][j]: considering previous i items to fill <=j, what the maximum value
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - nums[i - 1]] + nums[i - 1])
dp[0][0] = 0
dp[i][0] = 0
answer
max(dp[n])
2d array
time limit exceeded
write your code here | 447 | en | 0.524982 |
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from django import forms
from taggit.forms import TagField
from dcim.models import Device
from extras.forms import (
AddRemoveTagsForm, CustomFieldBulkEditForm, CustomFieldFilterForm, CustomFieldModelForm, CustomFieldModelCSVForm,
)
from utilities.forms import (
APISelect, APISelectMultiple, BootstrapMixin, DynamicModelChoiceField, DynamicModelMultipleChoiceField,
FlexibleModelChoiceField, SlugField, StaticSelect2Multiple, TagFilterField,
)
from .constants import *
from .models import Secret, SecretRole, UserKey
def validate_rsa_key(key, is_secret=True):
"""
Validate the format and type of an RSA key.
"""
if key.startswith('ssh-rsa '):
raise forms.ValidationError("OpenSSH line format is not supported. Please ensure that your public is in PEM (base64) format.")
try:
key = RSA.importKey(key)
except ValueError:
raise forms.ValidationError("Invalid RSA key. Please ensure that your key is in PEM (base64) format.")
except Exception as e:
raise forms.ValidationError("Invalid key detected: {}".format(e))
if is_secret and not key.has_private():
raise forms.ValidationError("This looks like a public key. Please provide your private RSA key.")
elif not is_secret and key.has_private():
raise forms.ValidationError("This looks like a private key. Please provide your public RSA key.")
try:
PKCS1_OAEP.new(key)
except Exception:
raise forms.ValidationError("Error validating RSA key. Please ensure that your key supports PKCS#1 OAEP.")
#
# Secret roles
#
class SecretRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = SecretRole
fields = [
'name', 'slug', 'description', 'users', 'groups',
]
widgets = {
'users': StaticSelect2Multiple(),
'groups': StaticSelect2Multiple(),
}
class SecretRoleCSVForm(forms.ModelForm):
slug = SlugField()
class Meta:
model = SecretRole
fields = SecretRole.csv_headers
help_texts = {
'name': 'Name of secret role',
}
#
# Secrets
#
class SecretForm(BootstrapMixin, CustomFieldModelForm):
plaintext = forms.CharField(
max_length=SECRET_PLAINTEXT_MAX_LENGTH,
required=False,
label='Plaintext',
widget=forms.PasswordInput(
attrs={
'class': 'requires-session-key',
}
)
)
plaintext2 = forms.CharField(
max_length=SECRET_PLAINTEXT_MAX_LENGTH,
required=False,
label='Plaintext (verify)',
widget=forms.PasswordInput()
)
role = DynamicModelChoiceField(
queryset=SecretRole.objects.all(),
widget=APISelect(
api_url="/api/secrets/secret-roles/"
)
)
tags = TagField(
required=False
)
class Meta:
model = Secret
fields = [
'role', 'name', 'plaintext', 'plaintext2', 'tags',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# A plaintext value is required when creating a new Secret
if not self.instance.pk:
self.fields['plaintext'].required = True
def clean(self):
# Verify that the provided plaintext values match
if self.cleaned_data['plaintext'] != self.cleaned_data['plaintext2']:
raise forms.ValidationError({
'plaintext2': "The two given plaintext values do not match. Please check your input."
})
class SecretCSVForm(CustomFieldModelCSVForm):
device = FlexibleModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Device name or ID',
error_messages={
'invalid_choice': 'Device not found.',
}
)
role = forms.ModelChoiceField(
queryset=SecretRole.objects.all(),
to_field_name='name',
help_text='Name of assigned role',
error_messages={
'invalid_choice': 'Invalid secret role.',
}
)
plaintext = forms.CharField(
help_text='Plaintext secret data'
)
class Meta:
model = Secret
fields = Secret.csv_headers
help_texts = {
'name': 'Name or username',
}
def save(self, *args, **kwargs):
s = super().save(*args, **kwargs)
s.plaintext = str(self.cleaned_data['plaintext'])
return s
class SecretBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Secret.objects.all(),
widget=forms.MultipleHiddenInput()
)
role = DynamicModelChoiceField(
queryset=SecretRole.objects.all(),
required=False,
widget=APISelect(
api_url="/api/secrets/secret-roles/"
)
)
name = forms.CharField(
max_length=100,
required=False
)
class Meta:
nullable_fields = [
'name',
]
class SecretFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Secret
q = forms.CharField(
required=False,
label='Search'
)
role = DynamicModelMultipleChoiceField(
queryset=SecretRole.objects.all(),
to_field_name='slug',
required=True,
widget=APISelectMultiple(
api_url="/api/secrets/secret-roles/",
value_field="slug",
)
)
tag = TagFilterField(model)
#
# UserKeys
#
class UserKeyForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = UserKey
fields = ['public_key']
help_texts = {
'public_key': "Enter your public RSA key. Keep the private one with you; you'll need it for decryption. "
"Please note that passphrase-protected keys are not supported.",
}
labels = {
'public_key': ''
}
def clean_public_key(self):
key = self.cleaned_data['public_key']
# Validate the RSA key format.
validate_rsa_key(key, is_secret=False)
return key
class ActivateUserKeyForm(forms.Form):
_selected_action = forms.ModelMultipleChoiceField(
queryset=UserKey.objects.all(),
label='User Keys'
)
secret_key = forms.CharField(
widget=forms.Textarea(
attrs={
'class': 'vLargeTextField',
}
),
label='Your private key'
)
| netbox/secrets/forms.py | 6,592 | Validate the format and type of an RSA key.
Secret roles Secrets A plaintext value is required when creating a new Secret Verify that the provided plaintext values match UserKeys Validate the RSA key format. | 209 | en | 0.528187 |
import tensorflow as tf
import numpy as np
def _tf_fspecial_gauss(size, sigma, ch=1):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
g = tf.tile(g, [1, 1, ch, 1])
return g / tf.reduce_sum(g)
def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=0.5):
img1 = tf.image.rgb_to_grayscale(img1)
img2 = tf.image.rgb_to_grayscale(img2)
window = _tf_fspecial_gauss(size, sigma,
ch=img1.get_shape().as_list()[-1]) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1, 1, 1, 1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1, 1, 1, 1], padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1, 1, 1, 1],
padding='VALID') - mu1_mu2
if cs_map:
value = (
((2*mu1_mu2 + C1) * (2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
), (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)
)
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(
mcs[0:level-1]**weight[0:level-1]) * (mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
| ssim.py | 2,989 | Function to mimic the 'fspecial' gaussian MATLAB function
window shape [size, size] depth of image (255 in case the image has a differnt scale) list to tensor of dim D+1 | 176 | en | 0.782164 |
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from rosbridge_library.protocol import Protocol
from rosbridge_library.capabilities.call_service import CallService
from rosbridge_library.capabilities.advertise import Advertise
from rosbridge_library.capabilities.publish import Publish
from rosbridge_library.capabilities.subscribe import Subscribe
# imports for defragmentation
from rosbridge_library.capabilities.defragmentation import Defragment
# imports for external service_server
from rosbridge_library.capabilities.advertise_service import AdvertiseService
from rosbridge_library.capabilities.service_response import ServiceResponse
from rosbridge_library.capabilities.unadvertise_service import UnadvertiseService
class RosbridgeProtocol(Protocol):
""" Adds the handlers for the rosbridge opcodes """
rosbridge_capabilities = [CallService, Advertise, Publish, Subscribe, Defragment, AdvertiseService, ServiceResponse, UnadvertiseService]
print("registered capabilities (classes):")
for cap in rosbridge_capabilities:
print(" -", str(cap))
parameters = None
def __init__(self, client_id, parameters = None):
self.parameters = parameters
Protocol.__init__(self, client_id)
for capability_class in self.rosbridge_capabilities:
self.add_capability(capability_class)
| base-image/rosbridge/rosbridge_library/src/rosbridge_library/rosbridge_protocol.py | 2,940 | Adds the handlers for the rosbridge opcodes
Software License Agreement (BSD License) Copyright (c) 2012, Willow Garage, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Willow Garage, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. imports for defragmentation imports for external service_server | 1,652 | en | 0.874466 |
from __future__ import print_function
import sys
import os
import getopt
import re
import string
import errno
import six
from jsbeautifier.__version__ import __version__
#
# The MIT License (MIT)
# Copyright (c) 2007-2013 Einar Lielmanis and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, einar@jsbeautifier.org,
# Parsing improvement for brace-less and semicolon-less statements
# by Liam Newman <bitwiseman@gmail.com>
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.preserve_newlines = True
self.max_preserve_newlines = 10
self.space_in_paren = False
self.space_in_empty_paren = False
self.e4x = False
self.jslint_happy = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
self.unescape_strings = False
self.wrap_line_length = 0
self.break_chained_methods = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
space_in_paren = %s
jslint_happy = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
wrap_line_length = %s
unescape_strings = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.space_in_paren,
self.jslint_happy,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
self.wrap_line_length,
self.unescape_strings,
)
class BeautifierFlags:
def __init__(self, mode):
self.mode = mode
self.parent = None
self.last_text = ''
self.last_word = ''
self.declaration_statement = False
self.declaration_assignment = False
self.in_html_comment = False
self.multiline_frame = False
self.if_block = False
self.else_block = False
self.do_block = False
self.do_while = False
self.in_case = False
self.in_case_statement = False
self.case_body = False
self.indentation_level = 0
self.line_indent_level = 0
self.start_line_index = 0
self.ternary_depth = 0
self.had_comment = False
def apply_base(self, flags_base, added_newline):
next_indent_level = flags_base.indentation_level;
if not added_newline and \
flags_base.line_indent_level > next_indent_level:
next_indent_level = flags_base.line_indent_level;
self.parent = flags_base;
self.last_text = flags_base.last_text
self.last_word = flags_base.last_word
self.indentation_level = next_indent_level
# Using object instead of string to allow for later expansion of info about each line
class OutputLine:
def __init__(self):
self.text = []
class Acorn:
def __init__(self):
# This section of code was translated to python from acorn (javascript).
#
# Acorn was written by Marijn Haverbeke and released under an MIT
# license. The Unicode regexps (for identifiers and whitespace) were
# taken from [Esprima](http://esprima.org) by Ariya Hidayat.
#
# Git repositories for Acorn are available at
#
# http://marijnhaverbeke.nl/git/acorn
# https://github.com/marijnh/acorn.git
# ## Character categories
# Big ugly regular expressions that match characters in the
# whitespace, identifier, and identifier-start categories. These
# are only applied when a character is found to actually have a
# code point above 128.
self.nonASCIIwhitespace = re.compile(six.u("[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]"))
self.nonASCIIidentifierStartChars = six.u("\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc")
self.nonASCIIidentifierChars = six.u("\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f")
self.nonASCIIidentifierStart = re.compile("[" + self.nonASCIIidentifierStartChars + "]")
self.nonASCIIidentifier = re.compile("[" + self.nonASCIIidentifierStartChars + self.nonASCIIidentifierChars + "]")
# Whether a single character denotes a newline.
self.newline = re.compile(six.u("[\n\r\u2028\u2029]"))
# Matches a whole line break (where CRLF is considered a single
# line break). Used to count lines.
self.lineBreak = re.compile(six.u("\r\n|[\n\r\u2028\u2029]"))
# Test whether a given character code starts an identifier.
def isIdentifierStart(self, code):
if code < 65:
return code == 36
if code < 91:
return True
if code < 97:
return code == 95
if code < 123:
return True;
return code >= 0xaa and self.nonASCIIidentifierStart.match(six.unichr(code)) != None;
# Test whether a given character is part of an identifier.
def isIdentifierChar(self, code):
if code < 48:
return code == 36;
if code < 58:
return True;
if code < 65:
return False;
if code < 91:
return True;
if code < 97:
return code == 95;
if code < 123:
return True;
return code >= 0xaa and self.nonASCIIidentifier.match(six.unichr(code)) != None;
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
stream = sys.stdin
else:
stream = open(file_name)
return beautify(''.join(stream.readlines()), opts);
def usage(stream=sys.stdout):
print("jsbeautifier.py@" + __version__ + """
Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-P, --space-in-paren add padding spaces within paren, ie. f( a, b )
-E, --space-in-empty-paren Add a single space inside empty paren, ie. f( )
-j, --jslint-happy more jslint-compatible output
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
-x, --unescape-strings Decode printable chars encoded in \\xNN notation.
-X, --e4x Pass E4X xml literals through untouched
-w, --wrap-line-length Attempt to wrap line when it exceeds this length.
NOTE: Line continues until next wrap point is found.
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
-v, --version Show the version
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
class MODE:
BlockStatement, Statement, ObjectLiteral, ArrayLiteral, \
ForInitializer, Conditional, Expression = range(7)
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = opts
self.blank_state()
self.acorn = Acorn();
def blank_state(self):
# internal flags
self.flags = None
self.previous_flags = None
self.flag_store = []
self.input_wanted_newline = False
if self.opts.indent_with_tabs:
self.opts.indent_char = "\t"
self.opts.indent_size = 1
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.preindent_string = ''
self.last_type = 'TK_START_BLOCK' # last token type
self.last_last_text = '' # pre-last token text
self.input = None
self.output_lines = [ OutputLine() ]
self.output_space_before_token = False
self.whitespace_before_token = []
self.whitespace = ["\n", "\r", "\t", " "]
self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$'
self.digits = '0123456789'
self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! ~ , : ? ^ ^= |= :: =>'
self.punct += ' <?= <? ?> <%= <% %>'
self.punct = self.punct.split(' ')
# Words which always should start on a new line
self.line_starters = 'continue,try,throw,return,var,let,const,if,switch,case,default,for,while,break,function,yield'.split(',')
self.reserved_words = self.line_starters + ['do', 'in', 'else', 'get', 'set', 'new', 'catch', 'finally', 'typeof'];
self.set_mode(MODE.BlockStatement)
self.parser_pos = 0
def beautify(self, s, opts = None ):
if opts != None:
self.opts = opts
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']:
raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".'))
self.blank_state()
while s and s[0] in [' ', '\t']:
self.preindent_string += s[0]
s = s[1:]
self.input = self.unpack(s, self.opts.eval_code)
self.parser_pos = 0
handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_RESERVED': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_COMMA': self.handle_comma,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_INLINE_COMMENT': self.handle_inline_comment,
'TK_COMMENT': self.handle_comment,
'TK_DOT': self.handle_dot,
'TK_UNKNOWN': self.handle_unknown,
}
while True:
self.token_text, self.token_type = self.get_next_token()
#print (token_text, self.token_type, self.flags.mode)
if self.token_type == 'TK_EOF':
while self.flags.mode == MODE.Statement:
self.restore_mode();
break
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
self.input_wanted_newline = self.n_newlines > 0
if keep_whitespace:
for i in range(self.n_newlines):
self.append_newline(i > 0)
else: # not keep_whitespace
if self.opts.max_preserve_newlines != 0 and self.n_newlines > self.opts.max_preserve_newlines:
self.n_newlines = self.opts.max_preserve_newlines
if self.opts.preserve_newlines and self.n_newlines > 1:
for i in range(self.n_newlines):
self.append_newline(i != 0)
handlers[self.token_type](self.token_text)
# The cleanest handling of inline comments is to treat them as though they aren't there.
# Just continue formatting and the behavior should be logical.
if self.token_type != 'TK_INLINE_COMMENT' and self.token_type != 'TK_COMMENT' and self.token_type != 'TK_BLOCK_COMMENT' and self.token_type != 'TK_UNKNOWN':
self.last_last_text = self.flags.last_text
self.last_type = self.token_type
self.flags.last_text = self.token_text
self.flags.had_comment = self.token_type in ['TK_COMMENT', 'TK_INLINE_COMMENT', 'TK_BLOCK_COMMENT']
sweet_code = ''.join(self.output_lines[0].text)
if len(self.output_lines) > 1:
for line_index in range(1, len(self.output_lines)):
sweet_code += '\n' + ''.join(self.output_lines[line_index].text);
sweet_code = re.sub('[\n ]+$', '', sweet_code)
return sweet_code
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def trim_output(self, eat_newlines = False):
self.trim_output_line(self.output_lines[-1])
while eat_newlines and len(self.output_lines) > 1 and \
len(self.output_lines[-1].text) == 0:
self.output_lines.pop()
self.trim_output_line(self.output_lines[-1])
def trim_output_line(self, line):
while len(line.text) \
and (
line.text[-1] == ' '\
or line.text[-1] == self.indent_string \
or line.text[-1] == self.preindent_string):
line.text.pop()
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else']
def is_array(self, mode):
return mode == MODE.ArrayLiteral
def is_expression(self, mode):
return mode in [MODE.Expression, MODE.ForInitializer, MODE.Conditional]
def just_added_newline(self):
line = self.output_lines[-1]
return len(line.text) == 0
def just_added_blankline(self):
if self.just_added_newline():
if len(self.output_lines) == 1:
return True
line = self.output_lines[-2]
return len(line.text) == 0
return False
def allow_wrap_or_preserved_newline(self, token_text, force_linewrap = False):
if self.opts.wrap_line_length > 0 and not force_linewrap:
line = self.output_lines[-1]
# never wrap the first token of a line.
if len(line.text) > 0:
proposed_line_length = len(''.join(line.text)) + len(token_text)
if self.output_space_before_token:
proposed_line_length += 1
if proposed_line_length >= self.opts.wrap_line_length:
force_linewrap = True
if ((self.opts.preserve_newlines and self.input_wanted_newline) or force_linewrap) and not self.just_added_newline():
self.append_newline(preserve_statement_flags = True)
def append_newline(self, force_newline = False, preserve_statement_flags = False):
self.output_space_before_token = False
if not preserve_statement_flags:
if self.flags.last_text != ';' and self.flags.last_text != ',' and self.flags.last_text != '=' and self.last_type != 'TK_OPERATOR':
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode();
if len(self.output_lines) == 1 and self.just_added_newline():
# no newline on start of file
return
if force_newline or not self.just_added_newline():
self.flags.multiline_frame = True
self.output_lines.append(OutputLine())
def append_token_line_indentation(self):
if self.just_added_newline():
line = self.output_lines[-1]
if self.opts.keep_array_indentation and self.is_array(self.flags.mode) and self.input_wanted_newline:
# prevent removing of this whitespace as redundant
line.text.append('');
for item in self.whitespace_before_token:
line.text.append(item)
else:
if self.preindent_string != '':
line.text.append(self.preindent_string)
level = self.flags.indentation_level;
self.append_indent_string(level)
def append_indent_string(self, level):
# Never indent your first output indent at the start of the file
if len(self.output_lines) > 1:
line = self.output_lines[-1]
self.flags.line_indent_level = level
for i in range(level):
line.text.append(self.indent_string)
def append_token_space_before(self):
# make sure only single space gets drawn
line = self.output_lines[-1]
if self.output_space_before_token and len(line.text) and line.text[-1] not in [' ', self.indent_string]:
line.text.append(' ')
def append_token(self, s):
self.append_token_line_indentation()
self.append_token_space_before()
self.output_space_before_token = False
self.output_lines[-1].text.append(s)
def indent(self):
self.flags.indentation_level += 1
def deindent(self):
allow_deindent = self.flags.indentation_level > 0 and ((self.flags.parent == None) or self.flags.indentation_level > self.flags.parent.indentation_level)
if allow_deindent:
self.flags.indentation_level -= 1
def remove_redundant_indentation(self, frame):
# This implementation is effective but has some issues:
# - less than great performance due to array splicing
# - can cause line wrap to happen too soon due to indent removal
# after wrap points are calculated
# These issues are minor compared to ugly indentation.
if frame.multiline_frame:
return
# remove one indent from each line inside this section
index = frame.start_line_index
splice_index = 0
while index < len(self.output_lines):
line = self.output_lines[index]
index += 1
# skip empty lines
if len(line.text) == 0:
continue
# skip the preindent string if present
if self.preindent_string != '' and \
line.text[0] == self.preindent_string:
splice_index = 1
else:
splice_index = 0
# remove one indent, if present
if line.text[splice_index] == self.indent_string:
del line.text[splice_index]
def set_mode(self, mode):
if self.flags:
self.flag_store.append(self.flags)
self.previous_flags = self.flags
else:
self.previous_flags = BeautifierFlags(mode)
self.flags = BeautifierFlags(mode)
self.flags.apply_base(self.previous_flags, self.just_added_newline());
self.flags.start_line_index = len(self.output_lines)
def restore_mode(self):
if len(self.flag_store) > 0:
self.previous_flags = self.flags
self.flags = self.flag_store.pop()
if self.previous_flags.mode == MODE.Statement:
self.remove_redundant_indentation(self.previous_flags)
def start_of_object_property(self):
return self.flags.parent.mode == MODE.ObjectLiteral and self.flags.mode == MODE.Statement and self.flags.last_text == ':' and \
self.flags.ternary_depth == 0
def start_of_statement(self):
if (
(self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and self.token_type == 'TK_WORD') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'do') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'return' and not self.input_wanted_newline) \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text == 'else' and not (self.token_type == 'TK_RESERVED' and self.token_text == 'if' )) \
or (self.last_type == 'TK_END_EXPR' and (self.previous_flags.mode == MODE.ForInitializer or self.previous_flags.mode == MODE.Conditional)) \
or (self.last_type == 'TK_WORD' and self.flags.mode == MODE.BlockStatement \
and not self.flags.in_case
and not (self.token_text == '--' or self.token_text == '++')
and self.token_type != 'TK_WORD' and self.token_type != 'TK_RESERVED') \
or (self.flags.mode == MODE.ObjectLiteral and self.flags.last_text == ':' and self.flags.ternary_depth == 0) \
):
self.set_mode(MODE.Statement);
self.indent();
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and self.token_type == 'TK_WORD':
self.flags.declaration_statement = True
# Issue #276:
# If starting a new statement with [if, for, while, do], push to a new line.
# if (a) if (b) if(c) d(); else e(); else f();
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(self.token_text, self.token_type == 'TK_RESERVED' and self.token_text in ['do', 'for', 'if', 'while']);
return True
else:
return False
def is_next(self, find):
local_pos = self.parser_pos
if local_pos >= len(self.input):
return False
c = self.input[local_pos]
while (c in self.whitespace) and c != find:
local_pos+= 1
if local_pos >= len(self.input):
return False
c = self.input[local_pos]
return c == find
def get_next_token(self):
self.n_newlines = 0
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
self.input_wanted_newline = False
self.whitespace_before_token = []
c = self.input[self.parser_pos]
self.parser_pos += 1
while c in self.whitespace:
if c == '\n':
self.n_newlines += 1
self.whitespace_before_token = []
elif c == self.indent_string:
self.whitespace_before_token.append(self.indent_string)
elif c != '\r':
self.whitespace_before_token.append(' ')
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[self.parser_pos]
self.parser_pos += 1
# NOTE: because beautifier doesn't fully parse, it doesn't use acorn.isIdentifierStart.
# It just treats all identifiers and numbers and such the same.
if self.acorn.isIdentifierChar(ord(self.input[self.parser_pos-1])):
if self.parser_pos < len(self.input):
while self.acorn.isIdentifierChar(ord(self.input[self.parser_pos])):
c = c + self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos == len(self.input):
break
# small and surprisingly unugly hack for IE-10 representation
if self.parser_pos != len(self.input) and self.input[self.parser_pos] in '+-' \
and re.match('^[0-9]+[Ee]$', c):
sign = self.input[self.parser_pos]
self.parser_pos += 1
t = self.get_next_token()
c += sign + t[0]
return c, 'TK_WORD'
if not (self.last_type == 'TK_DOT' \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['set', 'get'])) \
and c in self.reserved_words:
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
return c, 'TK_RESERVED'
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
if self.input[self.parser_pos] == '*': # peek /* .. */ comment
self.parser_pos += 1
if self.parser_pos < len(self.input):
while not (self.input[self.parser_pos] == '*' and \
self.parser_pos + 1 < len(self.input) and \
self.input[self.parser_pos + 1] == '/')\
and self.parser_pos < len(self.input):
c = self.input[self.parser_pos]
comment += c
if c in '\r\n':
inline_comment = False
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
self.parser_pos += 2
if inline_comment and self.n_newlines == 0:
return '/*' + comment + '*/', 'TK_INLINE_COMMENT'
else:
return '/*' + comment + '*/', 'TK_BLOCK_COMMENT'
if self.input[self.parser_pos] == '/': # peek // comment
comment = c
while self.input[self.parser_pos] not in '\r\n':
comment += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
return comment, 'TK_COMMENT'
if c == '`' or c == "'" or c == '"' or \
( \
(c == '/') or \
(self.opts.e4x and c == "<" and re.match('^<(!\[CDATA\[[\s\S]*?\]\]|[-a-zA-Z:0-9_.]+|\{[^{}]*\})\s*([-a-zA-Z:0-9_.]+=(\{[^{}]*\}|"[^"]*"|\'[^\']*\')\s*)*\/?\s*>', self.input[self.parser_pos - 1:])) \
) and ( \
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text)) or \
(self.last_type == 'TK_END_EXPR' and self.previous_flags.mode in [MODE.Conditional, MODE.ForInitializer]) or \
(self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR', \
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON', 'TK_COMMA'])):
sep = c
esc = False
esc1 = 0
esc2 = 0
resulting_string = c
in_char_class = False
if self.parser_pos < len(self.input):
if sep == '/':
# handle regexp
in_char_class = False
while esc or in_char_class or self.input[self.parser_pos] != sep:
resulting_string += self.input[self.parser_pos]
if not esc:
esc = self.input[self.parser_pos] == '\\'
if self.input[self.parser_pos] == '[':
in_char_class = True
elif self.input[self.parser_pos] == ']':
in_char_class = False
else:
esc = False
self.parser_pos += 1
if self.parser_pos >= len(self.input):
# incomplete regex when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
elif self.opts.e4x and sep == '<':
# handle e4x xml literals
xmlRegExp = re.compile('<(\/?)(!\[CDATA\[[\s\S]*?\]\]|[-a-zA-Z:0-9_.]+|\{[^{}]*\})\s*([-a-zA-Z:0-9_.]+=(\{[^{}]*\}|"[^"]*"|\'[^\']*\')\s*)*(\/?)\s*>')
xmlStr = self.input[self.parser_pos - 1:]
match = xmlRegExp.match(xmlStr)
if match:
rootTag = match.group(2)
depth = 0
while (match):
isEndTag = match.group(1)
tagName = match.group(2)
isSingletonTag = (match.groups()[-1] != "") or (match.group(2)[0:8] == "![CDATA[")
if tagName == rootTag and not isSingletonTag:
if isEndTag:
depth -= 1
else:
depth += 1
if depth <= 0:
break
match = xmlRegExp.search(xmlStr, match.end())
if match:
xmlLength = match.end() # + len(match.group())
else:
xmlLength = len(xmlStr)
self.parser_pos += xmlLength - 1
return xmlStr[:xmlLength], 'TK_STRING'
else:
# handle string
while esc or self.input[self.parser_pos] != sep:
resulting_string += self.input[self.parser_pos]
if esc1 and esc1 >= esc2:
try:
esc1 = int(resulting_string[-esc2:], 16)
except Exception:
esc1 = False
if esc1 and esc1 >= 0x20 and esc1 <= 0x7e:
esc1 = chr(esc1)
resulting_string = resulting_string[:-2 - esc2]
if esc1 == sep or esc1 == '\\':
resulting_string += '\\'
resulting_string += esc1
esc1 = 0
if esc1:
esc1 += 1
elif not esc:
esc = self.input[self.parser_pos] == '\\'
else:
esc = False
if self.opts.unescape_strings:
if self.input[self.parser_pos] == 'x':
esc1 += 1
esc2 = 2
elif self.input[self.parser_pos] == 'u':
esc1 += 1
esc2 = 4
self.parser_pos += 1
if self.parser_pos >= len(self.input):
# incomplete string when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
self.parser_pos += 1
resulting_string += sep
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
while self.parser_pos < len(self.input) and self.input[self.parser_pos] in self.wordchar:
resulting_string += self.input[self.parser_pos]
self.parser_pos += 1
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.output_lines) == 1 and len(self.output_lines[0].text) == 0 and \
len(self.input) > self.parser_pos and self.input[self.parser_pos] == '!':
resulting_string = c
while self.parser_pos < len(self.input) and c != '\n':
c = self.input[self.parser_pos]
resulting_string += c
self.parser_pos += 1
return resulting_string.strip() + '\n', 'TK_UNKNOWN'
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if self.parser_pos < len(self.input) and self.input[self.parser_pos] in self.digits:
while True:
c = self.input[self.parser_pos]
sharp += c
self.parser_pos += 1
if self.parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or self.parser_pos >= len(self.input):
pass
elif self.input[self.parser_pos] == '[' and self.input[self.parser_pos + 1] == ']':
sharp += '[]'
self.parser_pos += 2
elif self.input[self.parser_pos] == '{' and self.input[self.parser_pos + 1] == '}':
sharp += '{}'
self.parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[self.parser_pos - 1 : self.parser_pos + 3] == '<!--':
self.parser_pos += 3
c = '<!--'
while self.parser_pos < len(self.input) and self.input[self.parser_pos] != '\n':
c += self.input[self.parser_pos]
self.parser_pos += 1
self.flags.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.flags.in_html_comment and self.input[self.parser_pos - 1 : self.parser_pos + 2] == '-->':
self.flags.in_html_comment = False
self.parser_pos += 2
return '-->', 'TK_COMMENT'
if c == '.':
return c, 'TK_DOT'
if c in self.punct:
while self.parser_pos < len(self.input) and c + self.input[self.parser_pos] in self.punct:
c += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
if c == ',':
return c, 'TK_COMMA'
if c == '=':
return c, 'TK_EQUALS'
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def handle_start_expr(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
next_mode = MODE.Expression
if token_text == '[':
if self.last_type == 'TK_WORD' or self.flags.last_text == ')':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in self.line_starters:
self.output_space_before_token = True
self.set_mode(next_mode)
self.append_token(token_text)
self.indent()
if self.opts.space_in_paren:
self.output_space_before_token = True
return
next_mode = MODE.ArrayLiteral
if self.is_array(self.flags.mode):
if self.flags.last_text == '[' or (
self.flags.last_text == ',' and (self.last_last_text == ']' or self.last_last_text == '}')):
# ], [ goes to a new line
# }, [ goes to a new line
if not self.opts.keep_array_indentation:
self.append_newline()
else:
if self.last_type == 'TK_RESERVED' and self.flags.last_text == 'for':
next_mode = MODE.ForInitializer
elif self.last_type == 'TK_RESERVED' and self.flags.last_text in ['if', 'while']:
next_mode = MODE.Conditional
else:
next_mode = MODE.Expression
if self.flags.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.append_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.flags.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
# TODO: Consider whether forcing this is required. Review failing tests when removed.
self.allow_wrap_or_preserved_newline(token_text, self.input_wanted_newline);
elif not (self.last_type == 'TK_RESERVED' and token_text == '(') and self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.output_space_before_token = True
elif (self.last_type == 'TK_RESERVED' and (self.flags.last_word == 'function' or self.flags.last_word == 'typeof')) or \
(self.flags.last_text == '*' and self.last_last_text =='function'):
# function() vs function (), typeof() vs typeof ()
if self.opts.jslint_happy:
self.output_space_before_token = True
elif self.last_type == 'TK_RESERVED' and (self.flags.last_text in self.line_starters or self.flags.last_text == 'catch'):
# TODO: option space_before_conditional
self.output_space_before_token = True
# Support of this kind of newline preservation:
# a = (b &&
# (c || d));
if self.last_type in ['TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
self.set_mode(next_mode)
self.append_token(token_text)
if self.opts.space_in_paren:
self.output_space_before_token = True
# In all cases, if we newline while inside an expression it should be indented.
self.indent()
def handle_end_expr(self, token_text):
# statements inside expressions are not valid syntax, but...
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
if self.flags.multiline_frame:
self.allow_wrap_or_preserved_newline(self.token_text, self.token_text == ']' and self.is_array(self.flags.mode) and not self.opts.keep_array_indentation)
if self.opts.space_in_paren:
if self.last_type == 'TK_START_EXPR' and not self.opts.space_in_empty_paren:
# empty parens are always "()" and "[]", not "( )" or "[ ]"
self.output_space_before_token = False
self.trim_output()
else:
self.output_space_before_token = True
if self.token_text == ']' and self.opts.keep_array_indentation:
self.append_token(token_text)
self.restore_mode()
else:
self.restore_mode()
self.append_token(token_text)
self.remove_redundant_indentation(self.previous_flags);
# do {} while () // no statement required after
if self.flags.do_while and self.previous_flags.mode == MODE.Conditional:
self.previous_flags.mode = MODE.Expression
self.flags.do_block = False
self.flags.do_while = False
def handle_start_block(self, token_text):
self.set_mode(MODE.BlockStatement)
empty_braces = self.is_next('}')
empty_anonymous_function = empty_braces and self.flags.last_word == 'function' and \
self.last_type == 'TK_END_EXPR'
if self.opts.brace_style == 'expand':
if self.last_type != 'TK_OPERATOR' and \
(empty_anonymous_function or
self.last_type == 'TK_EQUALS' or
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text) and self.flags.last_text != 'else')):
self.output_space_before_token = True
else:
self.append_newline(preserve_statement_flags = True)
else: # collapse
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.append_newline()
else:
self.output_space_before_token = True
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.previous_flags.mode) and self.flags.last_text == ',':
if self.last_last_text == '}':
self.output_space_before_token = True
else:
self.append_newline()
self.append_token(token_text)
self.indent()
def handle_end_block(self, token_text):
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
empty_braces = self.last_type == 'TK_START_BLOCK';
if self.opts.brace_style == 'expand':
if not empty_braces:
self.append_newline()
else:
# skip {}
if not empty_braces:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = True
else:
self.append_newline()
self.restore_mode()
self.append_token(token_text)
def handle_word(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
elif self.input_wanted_newline and \
not self.is_expression(self.flags.mode) and \
(self.last_type != 'TK_OPERATOR' or (self.flags.last_text == '--' or self.flags.last_text == '++')) and \
self.last_type != 'TK_EQUALS' and \
(self.opts.preserve_newlines or not (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const', 'set', 'get'])):
self.append_newline()
if self.flags.do_block and not self.flags.do_while:
if self.token_type == 'TK_RESERVED' and token_text == 'while':
# do {} ## while ()
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
self.flags.do_while = True
return
else:
# do {} should always have while as the next word.
# if we don't see the expected while, recover
self.append_newline()
self.flags.do_block = False
# if may be followed by else, or not
# Bare/inline ifs are tricky
# Need to unwind the modes correctly: if (a) if (b) c(); else d(); else e();
if self.flags.if_block:
if (not self.flags.else_block) and (self.token_type == 'TK_RESERVED' and token_text == 'else'):
self.flags.else_block = True
else:
while self.flags.mode == MODE.Statement:
self.restore_mode()
self.flags.if_block = False;
if self.token_type == 'TK_RESERVED' and (token_text == 'case' or (token_text == 'default' and self.flags.in_case_statement)):
self.append_newline()
if self.flags.case_body or self.opts.jslint_happy:
self.flags.case_body = False
self.deindent()
self.append_token(token_text)
self.flags.in_case = True
self.flags.in_case_statement = True
return
if self.token_type == 'TK_RESERVED' and token_text == 'function':
if self.flags.last_text in ['}', ';'] or (self.just_added_newline() and not self.flags.last_text in ['{', ':', '=', ',']):
# make sure there is a nice clean space of at least one blank line
# before a new function definition, except in arrays
if not self.just_added_blankline() and not self.flags.had_comment:
self.append_newline()
self.append_newline(True)
if self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set', 'new', 'return']:
self.output_space_before_token = True
else:
self.append_newline()
elif self.last_type == 'TK_OPERATOR' or self.flags.last_text == '=':
# foo = function
self.output_space_before_token = True
elif self.is_expression(self.flags.mode):
# (function
pass
else:
self.append_newline()
if self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
if self.token_type == 'TK_RESERVED' and token_text == 'function':
self.append_token(token_text)
self.flags.last_word = token_text
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if not (self.token_type == 'TK_RESERVED' and token_text in ['else', 'catch', 'finally']):
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand']:
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.output_space_before_token = True
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode == MODE.BlockStatement:
# TODO: Should this be for STATEMENT as well?
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD' or \
(self.flags.last_text == '*' and self.last_last_text == 'function'):
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.output_space_before_token = True
prefix = 'NEWLINE'
if self.token_type == 'TK_RESERVED' and token_text in self.line_starters and self.flags.last_text != ')':
if self.flags.last_text == 'else':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if self.token_type == 'TK_RESERVED' and token_text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand':
self.append_newline()
else:
self.trim_output(True)
line = self.output_lines[-1]
# If we trimmed and there's something other than a close block before us
# put a newline back in. Handles '} // comment' scenario.
if line.text[-1] != '}':
self.append_newline()
self.output_space_before_token = True
elif prefix == 'NEWLINE':
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# no newline between return nnn
self.output_space_before_token = True
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or not (self.token_type == 'TK_RESERVED' and token_text in ['var', 'let', 'const'])) and self.flags.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if self.token_type == 'TK_RESERVED' and token_text == 'if' and self.flags.last_word == 'else' and self.flags.last_text != '{':
self.output_space_before_token = True
else:
self.append_newline()
elif self.token_type == 'TK_RESERVED' and token_text in self.line_starters and self.flags.last_text != ')':
self.append_newline()
elif self.is_array(self.flags.mode) and self.flags.last_text == ',' and self.last_last_text == '}':
self.append_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.output_space_before_token = True
self.append_token(token_text)
self.flags.last_word = token_text
if self.token_type == 'TK_RESERVED' and token_text == 'do':
self.flags.do_block = True
if self.token_type == 'TK_RESERVED' and token_text == 'if':
self.flags.if_block = True
def handle_semicolon(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
# Semicolon can be the start (and end) of a statement
self.output_space_before_token = False
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode()
self.append_token(token_text)
if self.flags.mode == MODE.ObjectLiteral:
# OBJECT mode is weird and doesn't get reset too well.
self.flags.mode = MODE.BlockStatement
def handle_string(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
# One difference - strings want at least a space before
self.output_space_before_token = True
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
self.output_space_before_token = True
elif self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(token_text)
else:
self.append_newline()
self.append_token(token_text)
def handle_equals(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
if self.flags.declaration_statement:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.declaration_assignment = True
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
def handle_comma(self, token_text):
if self.flags.declaration_statement:
if self.is_expression(self.flags.parent.mode):
# do not break on comma, for ( var a = 1, b = 2
self.flags.declaration_assignment = False
self.append_token(token_text)
if self.flags.declaration_assignment:
self.flags.declaration_assignment = False
self.append_newline(preserve_statement_flags = True)
else:
self.output_space_before_token = True
return
self.append_token(token_text)
if self.flags.mode == MODE.ObjectLiteral \
or (self.flags.mode == MODE.Statement and self.flags.parent.mode == MODE.ObjectLiteral):
if self.flags.mode == MODE.Statement:
self.restore_mode()
self.append_newline()
else:
# EXPR or DO_BLOCK
self.output_space_before_token = True
def handle_operator(self, token_text):
# Check if this is a BlockStatement that should be treated as a ObjectLiteral
if self.token_text == ':' and self.flags.mode == MODE.BlockStatement and \
self.last_last_text == '{' and \
(self.last_type == 'TK_WORD' or self.last_type == 'TK_RESERVED'):
self.flags.mode = MODE.ObjectLiteral
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
space_before = True
space_after = True
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# return had a special handling in TK_WORD
self.output_space_before_token = True
self.append_token(token_text)
return
# hack for actionscript's import .*;
if token_text == '*' and self.last_type == 'TK_DOT' and not self.last_last_text.isdigit():
self.append_token(token_text)
return
if token_text == ':' and self.flags.in_case:
self.flags.case_body = True
self.indent()
self.append_token(token_text)
self.append_newline()
self.flags.in_case = False
return
if token_text == '::':
# no spaces around the exotic namespacing syntax operator
self.append_token(token_text)
return
# http://www.ecma-international.org/ecma-262/5.1/#sec-7.9.1
# if there is a newline between -- or ++ and anything else we should preserve it.
if self.input_wanted_newline and (token_text == '--' or token_text == '++'):
self.append_newline()
# Allow line wrapping between operators in an expression
if self.last_type == 'TK_OPERATOR':
self.allow_wrap_or_preserved_newline(token_text)
if token_text in ['--', '++', '!', '~'] \
or (token_text in ['+', '-'] \
and (self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR'] \
or self.flags.last_text in self.line_starters or self.flags.last_text == ',')):
space_before = False
space_after = False
if self.flags.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_RESERVED':
space_before = True
if self.flags.mode == MODE.BlockStatement and self.flags.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.append_newline()
elif token_text == ':':
if self.flags.ternary_depth == 0:
if self.flags.mode == MODE.BlockStatement:
self.flags.mode = MODE.ObjectLiteral
space_before = False
else:
self.flags.ternary_depth -= 1
elif token_text == '?':
self.flags.ternary_depth += 1
elif self.token_text == '*' and self.last_type == 'TK_RESERVED' and self.flags.last_text == 'function':
space_before = False
space_after = False
if space_before:
self.output_space_before_token = True
self.append_token(token_text)
if space_after:
self.output_space_before_token = True
def handle_block_comment(self, token_text):
lines = token_text.replace('\x0d', '').split('\x0a')
javadoc = False
# block comment starts with a new line
self.append_newline(preserve_statement_flags = True)
if len(lines) > 1:
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
javadoc = True
# first line always indented
self.append_token(lines[0])
for line in lines[1:]:
self.append_newline(preserve_statement_flags = True)
if javadoc:
# javadoc: reformat and re-indent
self.append_token(' ' + line.strip())
else:
# normal comments output raw
self.output_lines[-1].text.append(line)
self.append_newline(preserve_statement_flags = True)
def handle_inline_comment(self, token_text):
self.output_space_before_token = True
self.append_token(token_text)
self.output_space_before_token = True
def handle_comment(self, token_text):
if self.input_wanted_newline:
self.append_newline(preserve_statement_flags = True)
if not self.input_wanted_newline:
self.trim_output(True)
self.output_space_before_token = True
self.append_token(token_text)
self.append_newline(preserve_statement_flags = True)
def handle_dot(self, token_text):
if self.start_of_statement():
# The conditional starts the statement if appropriate.
pass
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
self.output_space_before_token = True
else:
# allow preserved newlines before dots in general
# force newlines on dots after close paren when break_chained - for bar().baz()
self.allow_wrap_or_preserved_newline(token_text,
self.flags.last_text == ')' and self.opts.break_chained_methods)
self.append_token(token_text)
def handle_unknown(self, token_text):
self.append_token(token_text)
if token_text[len(token_text) - 1] == '\n':
self.append_newline()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:o:dEPjbkil:xhtfvXw:",
['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines',
'space-in-paren', 'space-in-empty-paren', 'jslint-happy', 'brace-style=',
'keep-array-indentation', 'indent-level=', 'unescape-strings', 'help', 'usage',
'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation', 'version',
'e4x', 'wrap-line-length'])
except getopt.GetoptError as ex:
print(ex, file=sys.stderr)
return usage(sys.stderr)
js_options = default_options()
file = None
outfile = 'stdout'
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve-newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--space-in-paren', '-P'):
js_options.space_in_paren = True
elif opt in ('--space-in-empty-paren', '-E'):
js_options.space_in_empty_paren = True
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--unescape-strings', '-x'):
js_options.unescape_strings = True
elif opt in ('--e4x', '-X'):
js_options.e4x = True
elif opt in ('--wrap-line-length ', '-w'):
js_options.wrap_line_length = int(arg)
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--version', '-v'):
return print(__version__)
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
print("Must define at least one file.", file=sys.stderr)
return usage(sys.stderr)
else:
try:
if outfile == 'stdout':
print(beautify_file(file, js_options))
else:
mkdir_p(os.path.dirname(outfile))
with open(outfile, 'w') as f:
f.write(beautify_file(file, js_options) + '\n')
except Exception as ex:
print(ex, file=sys.stderr)
return 1
# Success
return 0
| python/jsbeautifier/__init__.py | 68,865 | The MIT License (MIT) Copyright (c) 2007-2013 Einar Lielmanis and contributors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Originally written by Einar Lielmanis et al., Conversion to python by Einar Lielmanis, einar@jsbeautifier.org, Parsing improvement for brace-less and semicolon-less statements by Liam Newman <bitwiseman@gmail.com> Python is not my native language, feel free to push things around. Use either from command line (script displays its usage when run without any parameters), or, alternatively, use it as a module: import jsbeautifier res = jsbeautifier.beautify('your javascript string') res = jsbeautifier.beautify_file('some_file.js') you may specify some options: opts = jsbeautifier.default_options() opts.indent_size = 2 res = jsbeautifier.beautify('some javascript', opts) Here are the available options: (read source) Using object instead of string to allow for later expansion of info about each line This section of code was translated to python from acorn (javascript). Acorn was written by Marijn Haverbeke and released under an MIT license. The Unicode regexps (for identifiers and whitespace) were taken from [Esprima](http://esprima.org) by Ariya Hidayat. Git repositories for Acorn are available at http://marijnhaverbeke.nl/git/acorn https://github.com/marijnh/acorn.git Character categories Big ugly regular expressions that match characters in the whitespace, identifier, and identifier-start categories. These are only applied when a character is found to actually have a code point above 128. Whether a single character denotes a newline. Matches a whole line break (where CRLF is considered a single line break). Used to count lines. Test whether a given character code starts an identifier. Test whether a given character is part of an identifier. stdin internal flags last token type pre-last token text Words which always should start on a new lineprint (token_text, self.token_type, self.flags.mode) not keep_whitespace The cleanest handling of inline comments is to treat them as though they aren't there. Just continue formatting and the behavior should be logical. never wrap the first token of a line. no newline on start of file prevent removing of this whitespace as redundant Never indent your first output indent at the start of the file make sure only single space gets drawn This implementation is effective but has some issues: - less than great performance due to array splicing - can cause line wrap to happen too soon due to indent removal after wrap points are calculated These issues are minor compared to ugly indentation. remove one indent from each line inside this section skip empty lines skip the preindent string if present remove one indent, if present Issue 276: If starting a new statement with [if, for, while, do], push to a new line. if (a) if (b) if(c) d(); else e(); else f(); NOTE: because beautifier doesn't fully parse, it doesn't use acorn.isIdentifierStart. It just treats all identifiers and numbers and such the same. small and surprisingly unugly hack for IE-10 representation in is an operator, need to hack peek /* .. */ comment peek // comment handle regexp incomplete regex when end-of-file reached bail out with what has received so far handle e4x xml literals + len(match.group()) handle string incomplete string when end-of-file reached bail out with what has received so far regexps may have modifiers /regexp/MOD, so fetch those too she-bang Spidermonkey-specific sharp variables for circular references https://developer.mozilla.org/En/Sharp_variables_in_JavaScript http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935 The conditional starts the statement if appropriate. ], [ goes to a new line }, [ goes to a new line do nothing on (( and )( and ][ and ]( and .( TODO: Consider whether forcing this is required. Review failing tests when removed. function() vs function (), typeof() vs typeof () TODO: option space_before_conditional Support of this kind of newline preservation: a = (b && (c || d)); In all cases, if we newline while inside an expression it should be indented. statements inside expressions are not valid syntax, but... statements must all be closed when their container closes empty parens are always "()" and "[]", not "( )" or "[ ]" do {} while () // no statement required after collapse if TK_OPERATOR or TK_START_EXPR statements must all be closed when their container closes skip {} The conditional starts the statement if appropriate. do {} while () do {} should always have while as the next word. if we don't see the expected while, recover if may be followed by else, or not Bare/inline ifs are tricky Need to unwind the modes correctly: if (a) if (b) c(); else d(); else e(); make sure there is a nice clean space of at least one blank line before a new function definition, except in arrays foo = function (function TODO: Should this be for STATEMENT as well? If we trimmed and there's something other than a close block before us put a newline back in. Handles '} // comment' scenario. no newline between return nnn no need to force newline on VAR - for (var x = 0... }, in lists get a newline The conditional starts the statement if appropriate. Semicolon can be the start (and end) of a statement OBJECT mode is weird and doesn't get reset too well. The conditional starts the statement if appropriate. One difference - strings want at least a space before The conditional starts the statement if appropriate. just got an '=' in a var-line, different line breaking rules will apply do not break on comma, for ( var a = 1, b = 2 EXPR or DO_BLOCK Check if this is a BlockStatement that should be treated as a ObjectLiteral The conditional starts the statement if appropriate. return had a special handling in TK_WORD hack for actionscript's import .*; no spaces around the exotic namespacing syntax operator http://www.ecma-international.org/ecma-262/5.1/sec-7.9.1 if there is a newline between -- or ++ and anything else we should preserve it. Allow line wrapping between operators in an expression for (;; ++i) ^^ { foo: --i } foo(): --bar block comment starts with a new line first line always indented javadoc: reformat and re-indent normal comments output raw The conditional starts the statement if appropriate. allow preserved newlines before dots in general force newlines on dots after close paren when break_chained - for bar().baz() Python >2.5 Success | 7,453 | en | 0.805807 |
'''Module to manage and advanced game state'''
from collections import defaultdict
import numpy as np
from . import constants
from . import characters
from . import utility
class ForwardModel(object):
"""Class for helping with the [forward] modeling of the game state."""
def run(self,
num_times,
board,
agents,
bombs,
items,
flames,
is_partially_observable,
agent_view_size,
action_space,
training_agent=None,
is_communicative=False):
"""Run the forward model.
Args:
num_times: The number of times to run it for. This is a maximum and
it will stop early if we reach a done.
board: The board state to run it from.
agents: The agents to use to run it.
bombs: The starting bombs.
items: The starting items.
flames: The starting flames.
is_partially_observable: Whether the board is partially observable or
not. Only applies to TeamRadio.
agent_view_size: If it's partially observable, then the size of the
square that the agent can view.
action_space: The actions that each agent can take.
training_agent: The training agent to pass to done.
is_communicative: Whether the action depends on communication
observations as well.
Returns:
steps: The list of step results, which are each a dict of "obs",
"next_obs", "reward", "action".
board: Updated board.
agents: Updated agents, same models though.
bombs: Updated bombs.
items: Updated items.
flames: Updated flames.
done: Whether we completed the game in these steps.
info: The result of the game if it's completed.
"""
steps = []
for _ in num_times:
obs = self.get_observations(
board, agents, bombs, is_partially_observable, agent_view_size)
actions = self.act(
agents, obs, action_space, is_communicative=is_communicative)
board, agents, bombs, items, flames = self.step(
actions, board, agents, bombs, items, flames)
next_obs = self.get_observations(
board, agents, bombs, is_partially_observable, agent_view_size)
reward = self.get_rewards(agents, game_type, step_count, max_steps)
done = self.get_done(agents, game_type, step_count, max_steps,
training_agent)
info = self.get_info(done, rewards, game_type, agents)
steps.append({
"obs": obs,
"next_obs": next_obs,
"reward": reward,
"actions": actions,
})
if done:
# Callback to let the agents know that the game has ended.
for agent in agents:
agent.episode_end(reward[agent.agent_id])
break
return steps, board, agents, bombs, items, flames, done, info
@staticmethod
def act(agents, obs, action_space, is_communicative=False):
"""Returns actions for each agent in this list.
Args:
agents: A list of agent objects.
obs: A list of matching observations per agent.
action_space: The action space for the environment using this model.
is_communicative: Whether the action depends on communication
observations as well.
Returns a list of actions.
"""
def act_ex_communication(agent):
'''Handles agent's move without communication'''
if agent.is_alive:
return agent.act(obs[agent.agent_id], action_space=action_space)
else:
return constants.Action.Stop.value
def act_with_communication(agent):
'''Handles agent's move with communication'''
if agent.is_alive:
action = agent.act(
obs[agent.agent_id], action_space=action_space)
if type(action) == int:
action = [action] + [0, 0]
assert (type(action) == list)
return action
else:
return [constants.Action.Stop.value, 0, 0]
ret = []
for agent in agents:
if is_communicative:
ret.append(act_with_communication(agent))
else:
ret.append(act_ex_communication(agent))
return ret
@staticmethod
def step(actions,
curr_board,
curr_agents,
curr_bombs,
curr_items,
curr_flames,
max_blast_strength=10):
board_size = len(curr_board)
# Tick the flames. Replace any dead ones with passages. If there is an
# item there, then reveal that item.
flames = []
for flame in curr_flames:
position = flame.position
if flame.is_dead():
item_value = curr_items.get(position)
if item_value:
del curr_items[position]
else:
item_value = constants.Item.Passage.value
curr_board[position] = item_value
else:
flame.tick()
flames.append(flame)
curr_flames = flames
# Redraw all current flames
# Multiple flames may share a position and the map should contain
# a flame until all flames are dead to avoid issues with bomb
# movements and explosions.
for flame in curr_flames:
curr_board[flame.position] = constants.Item.Flames.value
# Step the living agents and moving bombs.
# If two agents try to go to the same spot, they should bounce back to
# their previous spots. This is complicated with one example being when
# there are three agents all in a row. If the one in the middle tries
# to go to the left and bounces with the one on the left, and then the
# one on the right tried to go to the middle one's position, she should
# also bounce. A way of doing this is to gather all the new positions
# before taking any actions. Then, if there are disputes, correct those
# disputes iteratively.
# Additionally, if two agents try to switch spots by moving into each
# Figure out desired next position for alive agents
alive_agents = [agent for agent in curr_agents if agent.is_alive]
desired_agent_positions = [agent.position for agent in alive_agents]
for num_agent, agent in enumerate(alive_agents):
position = agent.position
# We change the curr_board here as a safeguard. We will later
# update the agent's new position.
curr_board[position] = constants.Item.Passage.value
action = actions[agent.agent_id]
if action == constants.Action.Stop.value:
pass
elif action == constants.Action.Bomb.value:
position = agent.position
if not utility.position_is_bomb(curr_bombs, position):
bomb = agent.maybe_lay_bomb()
if bomb:
curr_bombs.append(bomb)
elif utility.is_valid_direction(curr_board, position, action):
desired_agent_positions[num_agent] = agent.get_next_position(
action)
# Gather desired next positions for moving bombs. Handle kicks later.
desired_bomb_positions = [bomb.position for bomb in curr_bombs]
for num_bomb, bomb in enumerate(curr_bombs):
curr_board[bomb.position] = constants.Item.Passage.value
if bomb.is_moving():
desired_position = utility.get_next_position(
bomb.position, bomb.moving_direction)
if utility.position_on_board(curr_board, desired_position) \
and not utility.position_is_powerup(curr_board, desired_position) \
and not utility.position_is_wall(curr_board, desired_position):
desired_bomb_positions[num_bomb] = desired_position
# Position switches:
# Agent <-> Agent => revert both to previous position.
# Bomb <-> Bomb => revert both to previous position.
# Agent <-> Bomb => revert Bomb to previous position.
crossings = {}
def crossing(current, desired):
'''Checks to see if an agent is crossing paths'''
current_x, current_y = current
desired_x, desired_y = desired
if current_x != desired_x:
assert current_y == desired_y
return ('X', min(current_x, desired_x), current_y)
assert current_x == desired_x
return ('Y', current_x, min(current_y, desired_y))
for num_agent, agent in enumerate(alive_agents):
if desired_agent_positions[num_agent] != agent.position:
desired_position = desired_agent_positions[num_agent]
border = crossing(agent.position, desired_position)
if border in crossings:
# Crossed another agent - revert both to prior positions.
desired_agent_positions[num_agent] = agent.position
num_agent2, _ = crossings[border]
desired_agent_positions[num_agent2] = alive_agents[
num_agent2].position
else:
crossings[border] = (num_agent, True)
for num_bomb, bomb in enumerate(curr_bombs):
if desired_bomb_positions[num_bomb] != bomb.position:
desired_position = desired_bomb_positions[num_bomb]
border = crossing(bomb.position, desired_position)
if border in crossings:
# Crossed - revert to prior position.
desired_bomb_positions[num_bomb] = bomb.position
num, is_agent = crossings[border]
if not is_agent:
# Crossed bomb - revert that to prior position as well.
desired_bomb_positions[num] = curr_bombs[num].position
else:
crossings[border] = (num_bomb, False)
# Deal with multiple agents or multiple bomb collisions on desired next
# position by resetting desired position to current position for
# everyone involved in the collision.
agent_occupancy = defaultdict(int)
bomb_occupancy = defaultdict(int)
for desired_position in desired_agent_positions:
agent_occupancy[desired_position] += 1
for desired_position in desired_bomb_positions:
bomb_occupancy[desired_position] += 1
# Resolve >=2 agents or >=2 bombs trying to occupy the same space.
change = True
while change:
change = False
for num_agent, agent in enumerate(alive_agents):
desired_position = desired_agent_positions[num_agent]
curr_position = agent.position
# Either another agent is going to this position or more than
# one bomb is going to this position. In both scenarios, revert
# to the original position.
if desired_position != curr_position and \
(agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] > 1):
desired_agent_positions[num_agent] = curr_position
agent_occupancy[curr_position] += 1
change = True
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
curr_position = bomb.position
if desired_position != curr_position and \
(bomb_occupancy[desired_position] > 1 or agent_occupancy[desired_position] > 1):
desired_bomb_positions[num_bomb] = curr_position
bomb_occupancy[curr_position] += 1
change = True
# Handle kicks.
agent_indexed_by_kicked_bomb = {}
kicked_bomb_indexed_by_agent = {}
delayed_bomb_updates = []
delayed_agent_updates = []
# Loop through all bombs to see if they need a good kicking or cause
# collisions with an agent.
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
if agent_occupancy[desired_position] == 0:
# There was never an agent around to kick or collide.
continue
agent_list = [
(num_agent, agent) for (num_agent, agent) in enumerate(alive_agents) \
if desired_position == desired_agent_positions[num_agent]]
if not agent_list:
# Agents moved from collision.
continue
# The agent_list should contain a single element at this point.
assert (len(agent_list) == 1)
num_agent, agent = agent_list[0]
if desired_position == agent.position:
# Agent did not move
if desired_position != bomb.position:
# Bomb moved, but agent did not. The bomb should revert
# and stop.
delayed_bomb_updates.append((num_bomb, bomb.position))
continue
# NOTE: At this point, we have that the agent in question tried to
# move into this position.
if not agent.can_kick:
# If we move the agent at this point, then we risk having two
# agents on a square in future iterations of the loop. So we
# push this change to the next stage instead.
delayed_bomb_updates.append((num_bomb, bomb.position))
delayed_agent_updates.append((num_agent, agent.position))
continue
# Agent moved and can kick - see if the target for the kick never had anyhing on it
direction = constants.Action(actions[agent.agent_id])
target_position = utility.get_next_position(desired_position,
direction)
if utility.position_on_board(curr_board, target_position) and \
agent_occupancy[target_position] == 0 and \
bomb_occupancy[target_position] == 0 and \
not utility.position_is_powerup(curr_board, target_position) and \
not utility.position_is_wall(curr_board, target_position):
# Ok to update bomb desired location as we won't iterate over it again here
# but we can not update bomb_occupancy on target position and need to check it again
# However we need to set the bomb count on the current position to zero so
# that the agent can stay on this position.
bomb_occupancy[desired_position] = 0
delayed_bomb_updates.append((num_bomb, target_position))
agent_indexed_by_kicked_bomb[num_bomb] = num_agent
kicked_bomb_indexed_by_agent[num_agent] = num_bomb
bomb.moving_direction = direction
# Bombs may still collide and we then need to reverse bomb and agent ..
else:
delayed_bomb_updates.append((num_bomb, bomb.position))
delayed_agent_updates.append((num_agent, agent.position))
for (num_bomb, bomb_position) in delayed_bomb_updates:
desired_bomb_positions[num_bomb] = bomb_position
bomb_occupancy[bomb_position] += 1
change = True
for (num_agent, agent_position) in delayed_agent_updates:
desired_agent_positions[num_agent] = agent_position
agent_occupancy[agent_position] += 1
change = True
while change:
change = False
for num_agent, agent in enumerate(alive_agents):
desired_position = desired_agent_positions[num_agent]
curr_position = agent.position
# Agents and bombs can only share a square if they are both in their
# original position (Agent dropped bomb and has not moved)
if desired_position != curr_position and \
(agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] != 0):
# Late collisions resulting from failed kicks force this agent to stay at the
# original position. Check if this agent successfully kicked a bomb above and undo
# the kick.
if num_agent in kicked_bomb_indexed_by_agent:
num_bomb = kicked_bomb_indexed_by_agent[num_agent]
bomb = curr_bombs[num_bomb]
desired_bomb_positions[num_bomb] = bomb.position
bomb_occupancy[bomb.position] += 1
del agent_indexed_by_kicked_bomb[num_bomb]
del kicked_bomb_indexed_by_agent[num_agent]
desired_agent_positions[num_agent] = curr_position
agent_occupancy[curr_position] += 1
change = True
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
curr_position = bomb.position
# This bomb may be a boomerang, i.e. it was kicked back to the
# original location it moved from. If it is blocked now, it
# can't be kicked and the agent needs to move back to stay
# consistent with other movements.
if desired_position == curr_position and num_bomb not in agent_indexed_by_kicked_bomb:
continue
bomb_occupancy_ = bomb_occupancy[desired_position]
agent_occupancy_ = agent_occupancy[desired_position]
# Agents and bombs can only share a square if they are both in their
# original position (Agent dropped bomb and has not moved)
if bomb_occupancy_ > 1 or agent_occupancy_ != 0:
desired_bomb_positions[num_bomb] = curr_position
bomb_occupancy[curr_position] += 1
num_agent = agent_indexed_by_kicked_bomb.get(num_bomb)
if num_agent is not None:
agent = alive_agents[num_agent]
desired_agent_positions[num_agent] = agent.position
agent_occupancy[agent.position] += 1
del kicked_bomb_indexed_by_agent[num_agent]
del agent_indexed_by_kicked_bomb[num_bomb]
change = True
for num_bomb, bomb in enumerate(curr_bombs):
if desired_bomb_positions[num_bomb] == bomb.position and \
not num_bomb in agent_indexed_by_kicked_bomb:
# Bomb was not kicked this turn and its desired position is its
# current location. Stop it just in case it was moving before.
bomb.stop()
else:
# Move bomb to the new position.
# NOTE: We already set the moving direction up above.
bomb.position = desired_bomb_positions[num_bomb]
for num_agent, agent in enumerate(alive_agents):
if desired_agent_positions[num_agent] != agent.position:
agent.move(actions[agent.agent_id])
if utility.position_is_powerup(curr_board, agent.position):
agent.pick_up(
constants.Item(curr_board[agent.position]),
max_blast_strength=max_blast_strength)
# Explode bombs.
exploded_map = np.zeros_like(curr_board)
has_new_explosions = False
for bomb in curr_bombs:
bomb.tick()
if bomb.exploded():
has_new_explosions = True
elif curr_board[bomb.position] == constants.Item.Flames.value:
bomb.fire()
has_new_explosions = True
# Chain the explosions.
while has_new_explosions:
next_bombs = []
has_new_explosions = False
for bomb in curr_bombs:
if not bomb.exploded():
next_bombs.append(bomb)
continue
bomb.bomber.incr_ammo()
for _, indices in bomb.explode().items():
for r, c in indices:
if not all(
[r >= 0, c >= 0, r < board_size, c < board_size]):
break
if curr_board[r][c] == constants.Item.Rigid.value:
break
exploded_map[r][c] = 1
if curr_board[r][c] == constants.Item.Wood.value:
break
curr_bombs = next_bombs
for bomb in curr_bombs:
if bomb.in_range(exploded_map):
bomb.fire()
has_new_explosions = True
# Update the board's bombs.
for bomb in curr_bombs:
curr_board[bomb.position] = constants.Item.Bomb.value
# Update the board's flames.
flame_positions = np.where(exploded_map == 1)
for row, col in zip(flame_positions[0], flame_positions[1]):
curr_flames.append(characters.Flame((row, col)))
for flame in curr_flames:
curr_board[flame.position] = constants.Item.Flames.value
# Kill agents on flames. Otherwise, update position on curr_board.
for agent in alive_agents:
if curr_board[agent.position] == constants.Item.Flames.value:
agent.die()
else:
curr_board[agent.position] = utility.agent_value(agent.agent_id)
return curr_board, curr_agents, curr_bombs, curr_items, curr_flames
def get_observations(self, curr_board, agents, bombs,
is_partially_observable, agent_view_size,
game_type, game_env):
"""Gets the observations as an np.array of the visible squares.
The agent gets to choose whether it wants to keep the fogged part in
memory.
"""
board_size = len(curr_board)
def make_bomb_maps(position):
''' Makes an array of an agents bombs and the bombs attributes '''
blast_strengths = np.zeros((board_size, board_size))
life = np.zeros((board_size, board_size))
for bomb in bombs:
x, y = bomb.position
if not is_partially_observable \
or in_view_range(position, x, y):
blast_strengths[(x, y)] = bomb.blast_strength
life[(x, y)] = bomb.life
return blast_strengths, life
def in_view_range(position, v_row, v_col):
'''Checks to see if a tile is in an agents viewing area'''
row, col = position
return all([
row >= v_row - agent_view_size, row <= v_row + agent_view_size,
col >= v_col - agent_view_size, col <= v_col + agent_view_size
])
attrs = [
'position', 'blast_strength', 'can_kick', 'teammate', 'ammo',
'enemies'
]
alive_agents = [
utility.agent_value(agent.agent_id)
for agent in agents
if agent.is_alive
]
observations = []
for agent in agents:
agent_obs = {'alive': alive_agents}
board = curr_board
if is_partially_observable:
board = board.copy()
for row in range(board_size):
for col in range(board_size):
if not in_view_range(agent.position, row, col):
board[row, col] = constants.Item.Fog.value
agent_obs['board'] = board
bomb_blast_strengths, bomb_life = make_bomb_maps(agent.position)
agent_obs['bomb_blast_strength'] = bomb_blast_strengths
agent_obs['bomb_life'] = bomb_life
agent_obs['game_type'] = game_type.value
agent_obs['game_env'] = game_env
for attr in attrs:
assert hasattr(agent, attr)
agent_obs[attr] = getattr(agent, attr)
observations.append(agent_obs)
return observations
@staticmethod
def get_done(agents, step_count, max_steps, game_type, training_agent):
# print('get_done called...', training_agent)
alive = [agent for agent in agents if agent.is_alive]
alive_ids = sorted([agent.agent_id for agent in alive])
if step_count >= max_steps:
print('gameover : max timestep over')
return True
elif game_type == constants.GameType.FFA:
if training_agent is not None and training_agent not in alive_ids:
print('gameover : ffa training_agent has died')
return True
if len(alive) <= 1:
print('checkout : ffa only %s player survived' % len(alive))
return len(alive) <= 1
elif len(alive_ids) <= 1:
print('gameover : only one player survived')
return True
elif alive_ids == [0, 2]:
print('gameover : [0,2] team won')
return True
elif any([ alive_ids == [1, 3] ]):
print('gameover : [1,3] team won')
return True
return False
@staticmethod
def get_info(done, rewards, game_type, agents):
if game_type == constants.GameType.FFA:
alive = [agent for agent in agents if agent.is_alive]
if done:
if len(alive) != 1:
# Either we have more than 1 alive (reached max steps) or
# we have 0 alive (last agents died at the same time).
return {
'result': constants.Result.Tie,
}
else:
return {
'result': constants.Result.Win,
'winners': [num for num, reward in enumerate(rewards) \
if reward == 1]
}
else:
return {
'result': constants.Result.Incomplete,
}
elif done:
# We are playing a team game.
if rewards == [-1] * 4:
return {
'result': constants.Result.Tie,
}
else:
return {
'result': constants.Result.Win,
'winners': [num for num, reward in enumerate(rewards) \
if reward == 1],
}
else:
return {
'result': constants.Result.Incomplete,
}
@staticmethod
def get_rewards(agents, game_type, step_count, max_steps):
print('get_rewards called..', self.training_agent)
def any_lst_equal(lst, values):
'''Checks if list are equal'''
return any([lst == v for v in values])
alive_agents = [num for num, agent in enumerate(agents) \
if agent.is_alive]
if game_type == constants.GameType.FFA:
if len(alive_agents) == 1:
# An agent won. Give them +1, others -1.
return [2 * int(agent.is_alive) - 1 for agent in agents]
elif step_count >= max_steps:
# Game is over from time. Everyone gets -1.
return [-1] * 4
else:
# Game running: 0 for alive, -1 for dead.
return [int(agent.is_alive) - 1 for agent in agents]
else:
# We are playing a team game.
if any_lst_equal(alive_agents, [[0, 2], [0], [2]]):
# Team [0, 2] wins.
return [1, -1, 1, -1]
elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]):
# Team [1, 3] wins.
return [-1, 1, -1, 1]
elif step_count >= max_steps:
# Game is over by max_steps. All agents tie.
return [-1] * 4
elif len(alive_agents) == 0:
# Everyone's dead. All agents tie.
return [-1] * 4
else:
# No team has yet won or lost.
return [0] * 4
| pommerman/forward_model.py | 29,143 | Class for helping with the [forward] modeling of the game state.
Returns actions for each agent in this list.
Args:
agents: A list of agent objects.
obs: A list of matching observations per agent.
action_space: The action space for the environment using this model.
is_communicative: Whether the action depends on communication
observations as well.
Returns a list of actions.
Handles agent's move without communication
Handles agent's move with communication
Checks if list are equal
Checks to see if an agent is crossing paths
Gets the observations as an np.array of the visible squares.
The agent gets to choose whether it wants to keep the fogged part in
memory.
Checks to see if a tile is in an agents viewing area
Makes an array of an agents bombs and the bombs attributes
Run the forward model.
Args:
num_times: The number of times to run it for. This is a maximum and
it will stop early if we reach a done.
board: The board state to run it from.
agents: The agents to use to run it.
bombs: The starting bombs.
items: The starting items.
flames: The starting flames.
is_partially_observable: Whether the board is partially observable or
not. Only applies to TeamRadio.
agent_view_size: If it's partially observable, then the size of the
square that the agent can view.
action_space: The actions that each agent can take.
training_agent: The training agent to pass to done.
is_communicative: Whether the action depends on communication
observations as well.
Returns:
steps: The list of step results, which are each a dict of "obs",
"next_obs", "reward", "action".
board: Updated board.
agents: Updated agents, same models though.
bombs: Updated bombs.
items: Updated items.
flames: Updated flames.
done: Whether we completed the game in these steps.
info: The result of the game if it's completed.
Module to manage and advanced game state
Callback to let the agents know that the game has ended. Tick the flames. Replace any dead ones with passages. If there is an item there, then reveal that item. Redraw all current flames Multiple flames may share a position and the map should contain a flame until all flames are dead to avoid issues with bomb movements and explosions. Step the living agents and moving bombs. If two agents try to go to the same spot, they should bounce back to their previous spots. This is complicated with one example being when there are three agents all in a row. If the one in the middle tries to go to the left and bounces with the one on the left, and then the one on the right tried to go to the middle one's position, she should also bounce. A way of doing this is to gather all the new positions before taking any actions. Then, if there are disputes, correct those disputes iteratively. Additionally, if two agents try to switch spots by moving into each Figure out desired next position for alive agents We change the curr_board here as a safeguard. We will later update the agent's new position. Gather desired next positions for moving bombs. Handle kicks later. Position switches: Agent <-> Agent => revert both to previous position. Bomb <-> Bomb => revert both to previous position. Agent <-> Bomb => revert Bomb to previous position. Crossed another agent - revert both to prior positions. Crossed - revert to prior position. Crossed bomb - revert that to prior position as well. Deal with multiple agents or multiple bomb collisions on desired next position by resetting desired position to current position for everyone involved in the collision. Resolve >=2 agents or >=2 bombs trying to occupy the same space. Either another agent is going to this position or more than one bomb is going to this position. In both scenarios, revert to the original position. Handle kicks. Loop through all bombs to see if they need a good kicking or cause collisions with an agent. There was never an agent around to kick or collide. Agents moved from collision. The agent_list should contain a single element at this point. Agent did not move Bomb moved, but agent did not. The bomb should revert and stop. NOTE: At this point, we have that the agent in question tried to move into this position. If we move the agent at this point, then we risk having two agents on a square in future iterations of the loop. So we push this change to the next stage instead. Agent moved and can kick - see if the target for the kick never had anyhing on it Ok to update bomb desired location as we won't iterate over it again here but we can not update bomb_occupancy on target position and need to check it again However we need to set the bomb count on the current position to zero so that the agent can stay on this position. Bombs may still collide and we then need to reverse bomb and agent .. Agents and bombs can only share a square if they are both in their original position (Agent dropped bomb and has not moved) Late collisions resulting from failed kicks force this agent to stay at the original position. Check if this agent successfully kicked a bomb above and undo the kick. This bomb may be a boomerang, i.e. it was kicked back to the original location it moved from. If it is blocked now, it can't be kicked and the agent needs to move back to stay consistent with other movements. Agents and bombs can only share a square if they are both in their original position (Agent dropped bomb and has not moved) Bomb was not kicked this turn and its desired position is its current location. Stop it just in case it was moving before. Move bomb to the new position. NOTE: We already set the moving direction up above. Explode bombs. Chain the explosions. Update the board's bombs. Update the board's flames. Kill agents on flames. Otherwise, update position on curr_board. print('get_done called...', training_agent) Either we have more than 1 alive (reached max steps) or we have 0 alive (last agents died at the same time). We are playing a team game. An agent won. Give them +1, others -1. Game is over from time. Everyone gets -1. Game running: 0 for alive, -1 for dead. We are playing a team game. Team [0, 2] wins. Team [1, 3] wins. Game is over by max_steps. All agents tie. Everyone's dead. All agents tie. No team has yet won or lost. | 6,254 | en | 0.940042 |
import copy
import datetime
import glob
import json
import os
import sys
import threading
from os import path
from urllib.parse import urlparse, urljoin, ParseResult
import xmltodict
import yaml
from bs4 import BeautifulSoup
from flask import Flask, render_template, Response, send_from_directory, request
from flask.views import View
from flask.helpers import url_for, send_file, make_response
from flask_frozen import Freezer, walk_directory
from hashlib import md5
from yaml import FullLoader
from src.Feature import Feature
from src.dist import get_dist_pages
from src.github import assert_valid_git_hub_url
from src.navigation import process_video_nav, process_nav, get_current_url
from src.api import get_api_page
from src.encoder import DateAwareEncoder
from src.externals import process_nav_includes
from src.grammar import get_grammar
from src.markdown.makrdown import jinja_aware_markdown
from src.pages.MyFlatPages import MyFlatPages
from src.pdf import generate_pdf
from src.processors.processors import process_code_blocks
from src.processors.processors import set_replace_simple_code
from src.search import build_search_indices
from src.sitemap import generate_sitemap, generate_temporary_sitemap
from src.ktl_components import KTLComponentExtension
app = Flask(__name__, static_folder='_assets')
app.config.from_pyfile('mysettings.py')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
pages = MyFlatPages(app)
freezer = Freezer(app)
ignore_stdlib = False
build_mode = False
build_contenteditable = False
build_check_links = True
build_errors = []
url_adapter = app.create_url_adapter(None)
root_folder = path.join(os.path.dirname(__file__))
data_folder = path.join(os.path.dirname(__file__), "data")
_nav_cache = None
_nav_lock = threading.RLock()
_cached_asset_version = {}
def get_asset_version(filename):
if filename in _cached_asset_version:
return _cached_asset_version[filename]
filepath = (root_folder if root_folder else ".") + filename
if filename and path.exists(filepath):
with open(filepath, 'rb') as file:
digest = md5(file.read()).hexdigest()
_cached_asset_version[filename] = digest
return digest
return None
def get_site_data():
data = {}
for data_file in os.listdir(data_folder):
if data_file.startswith('_'):
continue
if not data_file.endswith(".yml"):
continue
data_file_path = path.join(data_folder, data_file)
with open(data_file_path, encoding="UTF-8") as stream:
try:
file_name_without_extension = data_file[:-4] if data_file.endswith(".yml") else data_file
data[file_name_without_extension] = yaml.load(stream, Loader=FullLoader)
except yaml.YAMLError as exc:
sys.stderr.write('Cant parse data file ' + data_file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
except IOError as exc:
sys.stderr.write('Cant read data file ' + data_file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
return data
site_data = get_site_data()
def get_nav():
global _nav_cache
global _nav_lock
with _nav_lock:
if _nav_cache is not None:
nav = _nav_cache
else:
nav = get_nav_impl()
nav = copy.deepcopy(nav)
if build_mode:
_nav_cache = copy.deepcopy(nav)
# NOTE. This call depends on `request.path`, cannot cache
process_nav(request.path, nav)
return nav
def get_nav_impl():
with open(path.join(data_folder, "_nav.yml")) as stream:
nav = yaml.load(stream, Loader=FullLoader)
nav = process_nav_includes(build_mode, nav)
return nav
def get_kotlin_features():
features_dir = path.join(os.path.dirname(__file__), "kotlin-features")
features = []
for feature_meta in yaml.load(open(path.join(features_dir, "kotlin-features.yml"))):
file_path = path.join(features_dir, feature_meta['content_file'])
with open(file_path, encoding='utf-8') as f:
content = f.read()
content = content.replace("\r\n", "\n")
if file_path.endswith(".md"):
html_content = BeautifulSoup(jinja_aware_markdown(content, pages), 'html.parser')
content = process_code_blocks(html_content)
features.append(Feature(content, feature_meta))
return features
@app.context_processor
def add_year_to_context():
return {
'year': datetime.datetime.now().year
}
app.jinja_env.add_extension(KTLComponentExtension)
@app.context_processor
def add_data_to_context():
nav = get_nav()
return {
'nav': nav,
'data': site_data,
'site': {
'pdf_url': app.config['PDF_URL'],
'forum_url': app.config['FORUM_URL'],
'site_github_url': app.config['SITE_GITHUB_URL'],
'data': site_data,
'text_using_gradle': app.config['TEXT_USING_GRADLE'],
'code_baseurl': app.config['CODE_URL'],
'contenteditable': build_contenteditable
},
'headerCurrentUrl': get_current_url(nav['subnav']['content'])
}
@app.template_filter('get_domain')
def get_domain(url):
return urlparse(url).netloc
app.jinja_env.globals['get_domain'] = get_domain
@app.template_filter('split_chunk')
def split_chunk(list, size):
return [list[i:i+size] for i in range(len(list))[::size]]
app.jinja_env.globals['split_chunk'] = split_chunk
@app.template_filter('autoversion')
def autoversion_filter(filename):
asset_version = get_asset_version(filename)
if asset_version is None: return filename
original = urlparse(filename)._asdict()
original.update(query=original.get('query') + '&v=' + asset_version)
return ParseResult(**original).geturl()
@app.route('/data/events.json')
def get_events():
with open(path.join(data_folder, "events.xml"), encoding="UTF-8") as events_file:
events = xmltodict.parse(events_file.read())['events']['event']
return Response(json.dumps(events, cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/cities.json')
def get_cities():
return Response(json.dumps(site_data['cities'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/kotlinconf.json')
def get_kotlinconf():
return Response(json.dumps(site_data['kotlinconf'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/universities.json')
def get_universities():
return Response(json.dumps(site_data['universities'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/user-groups.json')
def get_user_groups():
return Response(json.dumps(site_data['user-groups'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/docs/reference/grammar.html')
def grammar():
grammar = get_grammar(build_mode)
if grammar is None:
return "Grammar file not found", 404
return render_template('pages/grammar.html', kotlinGrammar=grammar)
@app.route('/docs/videos.html')
def videos_page():
return render_template('pages/videos.html', videos=process_video_nav(site_data['videos']))
@app.route('/docs/kotlin-reference.pdf')
def kotlin_reference_pdf():
return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf"))
@app.route('/docs/kotlin-docs.pdf')
def kotlin_docs_pdf():
return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf"))
@app.route('/community/')
def community_page():
return render_template('pages/community.html')
@app.route('/user-groups/user-group-list.html')
def user_group_list():
return render_template(
'pages/user-groups/user-group-list.html',
user_groups_data=site_data['user-groups'],
number_of_groups=sum(map(lambda section: len(section['groups']), site_data['user-groups'])))
@app.route('/education/')
def education_page():
return render_template('pages/education/index.html')
@app.route('/')
def index_page():
features = get_kotlin_features()
return render_template('pages/index.html',
is_index_page=True,
features=features
)
def process_page(page_path):
# get_nav() has side effect to copy and patch files from the `external` folder
# under site folder. We need it for dev mode to make sure file is up-to-date
# TODO: extract get_nav and implement the explicit way to avoid side-effects
get_nav()
page = pages.get_or_404(page_path)
if 'redirect_path' in page.meta and page.meta['redirect_path'] is not None:
page_path = page.meta['redirect_path']
if page_path.startswith('https://') or page_path.startswith('http://'):
return render_template('redirect.html', url=page_path)
else:
return render_template('redirect.html', url=url_for('page', page_path = page_path))
if 'date' in page.meta and page['date'] is not None:
page.meta['formatted_date'] = page.meta['date'].strftime('%d %B %Y')
if page.meta['formatted_date'].startswith('0'):
page.meta['formatted_date'] = page.meta['formatted_date'][1:]
if 'github_edit_url' in page.meta:
edit_on_github_url = page.meta['github_edit_url']
else:
edit_on_github_url = app.config['EDIT_ON_GITHUB_URL'] + app.config['FLATPAGES_ROOT'] + "/" + page_path + \
app.config['FLATPAGES_EXTENSION']
assert_valid_git_hub_url(edit_on_github_url, page_path)
template = page.meta["layout"] if 'layout' in page.meta else 'default.html'
if not template.endswith(".html"):
template += ".html"
if build_check_links:
validate_links_weak(page, page_path)
return render_template(
template,
page=page,
baseurl="",
edit_on_github_url=edit_on_github_url,
)
def validate_links_weak(page, page_path):
for link in page.parsed_html.select('a'):
if 'href' not in link.attrs:
continue
href = urlparse(urljoin('/' + page_path, link['href']))
if href.scheme != '':
continue
endpoint, params = url_adapter.match(href.path, 'GET', query_args={})
if endpoint != 'page' and endpoint != 'get_index_page':
response = app.test_client().get(href.path)
if response.status_code == 404:
build_errors.append("Broken link: " + str(href.path) + " on page " + page_path)
continue
referenced_page = pages.get(params['page_path'])
if referenced_page is None:
build_errors.append("Broken link: " + str(href.path) + " on page " + page_path)
continue
if href.fragment == '':
continue
ids = []
for x in referenced_page.parsed_html.select('h1,h2,h3,h4'):
try:
ids.append(x['id'])
except KeyError:
pass
for x in referenced_page.parsed_html.select('a'):
try:
ids.append(x['name'])
except KeyError:
pass
if href.fragment not in ids:
build_errors.append("Bad anchor: " + str(href.fragment) + " on page " + page_path)
if not build_mode and len(build_errors) > 0:
errors_copy = []
for item in build_errors:
errors_copy.append(item)
build_errors.clear()
raise Exception("Validation errors " + str(len(errors_copy)) + ":\n\n" +
"\n".join(str(item) for item in errors_copy))
@freezer.register_generator
def page():
for page in pages:
yield {'page_path': page.path}
@app.route('/<path:page_path>.html')
def page(page_path):
return process_page(page_path)
@app.route('/404.html')
def page_404():
return render_template('pages/404.html')
@freezer.register_generator
def api_page():
api_folder = path.join(root_folder, 'api')
for root, dirs, files in os.walk(api_folder):
for file in files:
yield {'page_path': path.join(path.relpath(root, api_folder), file).replace(os.sep, '/')}
class RedirectTemplateView(View):
def __init__(self, url):
self.redirect_url = url
def dispatch_request(self):
return render_template('redirect.html', url=self.redirect_url)
def generate_redirect_pages():
redirects_folder = path.join(root_folder, 'redirects')
for root, dirs, files in os.walk(redirects_folder):
for file in files:
if not file.endswith(".yml"):
continue
redirects_file_path = path.join(redirects_folder, file)
with open(redirects_file_path, encoding="UTF-8") as stream:
try:
redirects = yaml.load(stream, Loader=FullLoader)
for entry in redirects:
url_to = entry["to"]
url_from = entry["from"]
url_list = url_from if isinstance(url_from, list) else [url_from]
for url in url_list:
app.add_url_rule(url, view_func=RedirectTemplateView.as_view(url, url=url_to))
except yaml.YAMLError as exc:
sys.stderr.write('Cant parse data file ' + file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
except IOError as exc:
sys.stderr.write('Cant read data file ' + file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
@app.errorhandler(404)
def page_not_found(e):
return render_template('pages/404.html'), 404
app.register_error_handler(404, page_not_found)
@app.route('/api/<path:page_path>')
def api_page(page_path):
path_other, ext = path.splitext(page_path)
if ext == '.html':
return process_api_page(page_path[:-5])
elif path.basename(page_path) == "package-list" or ext:
return respond_with_package_list(page_path)
elif not page_path.endswith('/'):
page_path += '/'
return process_api_page(page_path + 'index')
def process_api_page(page_path):
return render_template(
'api.html',
page=get_api_page(build_mode, page_path)
)
def respond_with_package_list(page_path):
file_path = path.join(root_folder, 'api', page_path)
if not path.exists(file_path):
return make_response(path.basename(page_path) + " not found", 404)
return send_file(file_path, mimetype="text/plain")
@app.route('/assets/<path:path>')
def asset(path):
return send_from_directory('assets', path)
@app.route('/assets/images/tutorials/<path:filename>')
def tutorial_img(filename):
return send_from_directory(path.join('assets', 'images', 'tutorials'), filename)
@freezer.register_generator
def asset():
for filename in walk_directory(path.join(root_folder, "assets")):
yield {'path': filename}
@app.route('/<path:page_path>')
def get_index_page(page_path):
"""
Handle requests which urls don't end with '.html' (for example, '/doc/')
We don't need any generator here, because such urls are equivalent to the same urls
with 'index.html' at the end.
:param page_path: str
:return: str
"""
if not page_path.endswith('/'):
page_path += '/'
return process_page(page_path + 'index')
generate_redirect_pages()
@app.after_request
def add_header(request):
request.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
request.headers["Pragma"] = "no-cache"
request.headers["Expires"] = "0"
request.headers['Cache-Control'] = 'public, max-age=0'
return request
if __name__ == '__main__':
print("\n\n\nRunning new KotlinWebSite generator/dev-mode:\n")
argv_copy = []
for arg in sys.argv:
print("arg: " + arg)
if arg == "--ignore-stdlib":
ignore_stdlib = True
elif arg == "--no-check-links":
build_check_links = False
elif arg == "--editable":
build_contenteditable = True
else:
argv_copy.append(arg)
print("\n\n")
print("ignore_stdlib: " + str(ignore_stdlib))
print("build_check_links: " + str(build_check_links))
print("build_contenteditable: " + str(build_contenteditable))
print("\n\n")
set_replace_simple_code(build_contenteditable)
with (open(path.join(root_folder, "_nav-mapped.yml"), 'w')) as output:
yaml.dump(get_nav_impl(), output)
if len(argv_copy) > 1:
if argv_copy[1] == "build":
build_mode = True
urls = freezer.freeze()
if len(build_errors) > 0:
for error in build_errors:
sys.stderr.write(error + '\n')
sys.exit(-1)
elif argv_copy[1] == "sitemap":
generate_sitemap(get_dist_pages())
# temporary sitemap
generate_temporary_sitemap()
elif argv_copy[1] == "index":
build_search_indices(get_dist_pages())
elif argv_copy[1] == "reference-pdf":
generate_pdf("kotlin-docs.pdf", site_data)
else:
print("Unknown argument: " + argv_copy[1])
sys.exit(1)
else:
app.run(host="0.0.0.0", debug=True, threaded=True, **{"extra_files": {
'/src/data/_nav.yml',
*glob.glob("/src/pages-includes/**/*", recursive=True),
}})
| kotlin-website.py | 17,589 | Handle requests which urls don't end with '.html' (for example, '/doc/')
We don't need any generator here, because such urls are equivalent to the same urls
with 'index.html' at the end.
:param page_path: str
:return: str
NOTE. This call depends on `request.path`, cannot cache get_nav() has side effect to copy and patch files from the `external` folder under site folder. We need it for dev mode to make sure file is up-to-date TODO: extract get_nav and implement the explicit way to avoid side-effects temporary sitemap | 526 | en | 0.88326 |
# -*- coding: utf-8 -*-
class TestInvalidPathTweenFactory:
def test_it_400s_if_the_requested_path_isnt_utf8(self, app):
app.get("/%c5", status=400)
| tests/functional/test_tweens.py | 162 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
"""
Simple million word count program.
main idea is Python pairs words
with the number of times
that number appears in the triple quoted string.
Credit to William J. Turkel and Adam Crymble for the word
frequency code used below. I just merged the two ideas.
"""
wordstring = '''SCENE I. Yorkshire. Gaultree Forest.
Enter the ARCHBISHOP OF YORK, MOWBRAY, LORD HASTINGS, and others
ARCHBISHOP OF YORK
What is this forest call'd?
HASTINGS
'Tis Gaultree Forest, an't shall please your grace.
ARCHBISHOP OF YORK
Here stand, my lords; and send discoverers forth
To know the numbers of our enemies.
HASTINGS
We have sent forth already.
ARCHBISHOP OF YORK
'Tis well done.
My friends and brethren in these great affairs,
I must acquaint you that I have received
New-dated letters from Northumberland;
Their cold intent, tenor and substance, thus:
Here doth he wish his person, with such powers
As might hold sortance with his quality,
The which he could not levy; whereupon
He is retired, to ripe his growing fortunes,
To Scotland: and concludes in hearty prayers
That your attempts may overlive the hazard
And fearful melting of their opposite.
MOWBRAY
Thus do the hopes we have in him touch ground
And dash themselves to pieces.
Enter a Messenger
HASTINGS
Now, what news?
Messenger
West of this forest, scarcely off a mile,
In goodly form comes on the enemy;
And, by the ground they hide, I judge their number
Upon or near the rate of thirty thousand.
MOWBRAY
The just proportion that we gave them out
Let us sway on and face them in the field.
ARCHBISHOP OF YORK
What well-appointed leader fronts us here?
Enter WESTMORELAND
MOWBRAY
I think it is my Lord of Westmoreland.
WESTMORELAND
Health and fair greeting from our general,
The prince, Lord John and Duke of Lancaster.
ARCHBISHOP OF YORK
Say on, my Lord of Westmoreland, in peace:
What doth concern your coming?
WESTMORELAND
Then, my lord,
Unto your grace do I in chief address
The substance of my speech. If that rebellion
Came like itself, in base and abject routs,
Led on by bloody youth, guarded with rags,
And countenanced by boys and beggary,
I say, if damn'd commotion so appear'd,
In his true, native and most proper shape,
You, reverend father, and these noble lords
Had not been here, to dress the ugly form
Of base and bloody insurrection
With your fair honours. You, lord archbishop,
Whose see is by a civil peace maintained,
Whose beard the silver hand of peace hath touch'd,
Whose learning and good letters peace hath tutor'd,
Whose white investments figure innocence,
The dove and very blessed spirit of peace,
Wherefore do you so ill translate ourself
Out of the speech of peace that bears such grace,
Into the harsh and boisterous tongue of war;
Turning your books to graves, your ink to blood,
Your pens to lances and your tongue divine
To a trumpet and a point of war?
ARCHBISHOP OF YORK
Wherefore do I this? so the question stands.
Briefly to this end: we are all diseased,
And with our surfeiting and wanton hours
Have brought ourselves into a burning fever,
And we must bleed for it; of which disease
Our late king, Richard, being infected, died.
But, my most noble Lord of Westmoreland,
I take not on me here as a physician,
Nor do I as an enemy to peace
Troop in the throngs of military men;
But rather show awhile like fearful war,
To diet rank minds sick of happiness
And purge the obstructions which begin to stop
Our very veins of life. Hear me more plainly.
I have in equal balance justly weigh'd
What wrongs our arms may do, what wrongs we suffer,
And find our griefs heavier than our offences.
We see which way the stream of time doth run,
And are enforced from our most quiet there
By the rough torrent of occasion;
And have the summary of all our griefs,
When time shall serve, to show in articles;
Which long ere this we offer'd to the king,
And might by no suit gain our audience:
When we are wrong'd and would unfold our griefs,
We are denied access unto his person
Even by those men that most have done us wrong.
The dangers of the days but newly gone,
Whose memory is written on the earth
With yet appearing blood, and the examples
Of every minute's instance, present now,
Hath put us in these ill-beseeming arms,
Not to break peace or any branch of it,
But to establish here a peace indeed,
Concurring both in name and quality.
WESTMORELAND
When ever yet was your appeal denied?
Wherein have you been galled by the king?
What peer hath been suborn'd to grate on you,
That you should seal this lawless bloody book
Of forged rebellion with a seal divine
And consecrate commotion's bitter edge?
ARCHBISHOP OF YORK
My brother general, the commonwealth,
To brother born an household cruelty,
I make my quarrel in particular.
WESTMORELAND
There is no need of any such redress;
Or if there were, it not belongs to you.
MOWBRAY
Why not to him in part, and to us all
That feel the bruises of the days before,
And suffer the condition of these times
To lay a heavy and unequal hand
Upon our honours?
WESTMORELAND
O, my good Lord Mowbray,
Construe the times to their necessities,
And you shall say indeed, it is the time,
And not the king, that doth you injuries.
Yet for your part, it not appears to me
Either from the king or in the present time
That you should have an inch of any ground
To build a grief on: were you not restored
To all the Duke of Norfolk's signories,
Your noble and right well remember'd father's?
MOWBRAY
What thing, in honour, had my father lost,
That need to be revived and breathed in me?
The king that loved him, as the state stood then,
Was force perforce compell'd to banish him:
And then that Harry Bolingbroke and he,
Being mounted and both roused in their seats,
Their neighing coursers daring of the spur,
Their armed staves in charge, their beavers down,
Their eyes of fire sparking through sights of steel
And the loud trumpet blowing them together,
Then, then, when there was nothing could have stay'd
My father from the breast of Bolingbroke,
O when the king did throw his warder down,
His own life hung upon the staff he threw;
Then threw he down himself and all their lives
That by indictment and by dint of sword
Have since miscarried under Bolingbroke.
WESTMORELAND
You speak, Lord Mowbray, now you know not what.
The Earl of Hereford was reputed then
In England the most valiant gentlemen:
Who knows on whom fortune would then have smiled?
But if your father had been victor there,
He ne'er had borne it out of Coventry:
For all the country in a general voice
Cried hate upon him; and all their prayers and love
Were set on Hereford, whom they doted on
And bless'd and graced indeed, more than the king.
But this is mere digression from my purpose.
Here come I from our princely general
To know your griefs; to tell you from his grace
That he will give you audience; and wherein
It shall appear that your demands are just,
You shall enjoy them, every thing set off
That might so much as think you enemies.
MOWBRAY
But he hath forced us to compel this offer;
And it proceeds from policy, not love.
WESTMORELAND
Mowbray, you overween to take it so;
This offer comes from mercy, not from fear:
For, lo! within a ken our army lies,
Upon mine honour, all too confident
To give admittance to a thought of fear.
Our battle is more full of names than yours,
Our men more perfect in the use of arms,
Our armour all as strong, our cause the best;
Then reason will our heart should be as good
Say you not then our offer is compell'd.
MOWBRAY
Well, by my will we shall admit no parley.
WESTMORELAND
That argues but the shame of your offence:
A rotten case abides no handling.
HASTINGS
Hath the Prince John a full commission,
In very ample virtue of his father,
To hear and absolutely to determine
Of what conditions we shall stand upon?
WESTMORELAND
That is intended in the general's name:
I muse you make so slight a question.
ARCHBISHOP OF YORK
Then take, my Lord of Westmoreland, this schedule,
For this contains our general grievances:
Each several article herein redress'd,
All members of our cause, both here and hence,
That are insinew'd to this action,
Acquitted by a true substantial form
And present execution of our wills
To us and to our purposes confined,
We come within our awful banks again
And knit our powers to the arm of peace.
WESTMORELAND
This will I show the general. Please you, lords,
In sight of both our battles we may meet;
And either end in peace, which God so frame!
Or to the place of difference call the swords
Which must decide it.
ARCHBISHOP OF YORK
My lord, we will do so.
Exit WESTMORELAND
MOWBRAY
There is a thing within my bosom tells me
That no conditions of our peace can stand.
HASTINGS
Fear you not that: if we can make our peace
Upon such large terms and so absolute
As our conditions shall consist upon,
Our peace shall stand as firm as rocky mountains.
MOWBRAY
Yea, but our valuation shall be such
That every slight and false-derived cause,
Yea, every idle, nice and wanton reason
Shall to the king taste of this action;
That, were our royal faiths martyrs in love,
We shall be winnow'd with so rough a wind
That even our corn shall seem as light as chaff
And good from bad find no partition.
ARCHBISHOP OF YORK
No, no, my lord. Note this; the king is weary
Of dainty and such picking grievances:
For he hath found to end one doubt by death
Revives two greater in the heirs of life,
And therefore will he wipe his tables clean
And keep no tell-tale to his memory
That may repeat and history his loss
To new remembrance; for full well he knows
He cannot so precisely weed this land
As his misdoubts present occasion:
His foes are so enrooted with his friends
That, plucking to unfix an enemy,
He doth unfasten so and shake a friend:
So that this land, like an offensive wife
That hath enraged him on to offer strokes,
As he is striking, holds his infant up
And hangs resolved correction in the arm
That was uprear'd to execution.
HASTINGS
Besides, the king hath wasted all his rods
On late offenders, that he now doth lack
The very instruments of chastisement:
So that his power, like to a fangless lion,
May offer, but not hold.
ARCHBISHOP OF YORK
'Tis very true:
And therefore be assured, my good lord marshal,
If we do now make our atonement well,
Our peace will, like a broken limb united,
Grow stronger for the breaking.
MOWBRAY
Be it so.
Here is return'd my Lord of Westmoreland.
Re-enter WESTMORELAND
WESTMORELAND
The prince is here at hand: pleaseth your lordship
To meet his grace just distance 'tween our armies.
MOWBRAY
Your grace of York, in God's name then, set forward.
ARCHBISHOP OF YORK
Before, and greet his grace: my lord, we come.
Exeunt'''
wordlist = wordstring.split()
wordfreq = [wordlist.count(w) for w in wordlist]
print("String\n {} \n".format(wordstring))
print("List\n {} \n".format(str(wordlist)))
print("Frequencies\n {} \n".format(str(wordfreq)))
print("Pairs\n {}".format(str(dict(zip(wordlist, wordfreq)))))
print("Edit I made to show how to pull from IntellijIdea")
print("Adding my two cents here")
| CountMillionCharacter.py | 10,991 | Simple million word count program.
main idea is Python pairs words
with the number of times
that number appears in the triple quoted string.
Credit to William J. Turkel and Adam Crymble for the word
frequency code used below. I just merged the two ideas. | 254 | en | 0.891765 |
""" YQL out mkt cap and currency to fill out yahoo table """
""" TODO: retreive lists of 100 symbols from database and update"""
""" Results are intented to use while matching yahoo tickers, which one has mkt cap? which ones has sector? """
import mysql.connector
import stockretriever
import sys
import time
from random import randint
cnx = mysql.connector.connect(user='root', password='root', database='yahoo')
cursor = cnx.cursor()
sleeptime = 10
add_market_cap = ("INSERT INTO stocks "
"(symbol, market_cap, currency) "
"VALUES (%s, %s, %s) "
"ON DUPLICATE KEY UPDATE market_cap=VALUES(market_cap), currency=VALUES(currency)")
get_new_symbols = """SELECT symbol
FROM yahoo.stocks
WHERE market_cap is NULL
and currency is NULL"""
try:
cursor.execute(get_new_symbols)
except mysql.connector.errors.IntegrityError, e:
print(e)
for result in cursor.fetchall():
for symbol in result:
data = []
market_cap = ""
currency = ""
try:
data = stockretriever.get_current_info([symbol])
except TypeError as e:
#print "Typerror {0}: {1}".format(e.errno, e.strerror)
print "Type error, could not fetch current info on ", symbol
except Exception as e:
print(e)
try:
currency = data['Currency']
market_cap = data['MarketCapitalization']
except Exception as e:
print "No currency or mkt cap error", e
continue
data_company = (symbol, market_cap, currency)
try:
cursor.execute(add_market_cap, data_company)
except mysql.connector.errors.IntegrityError, e:
print(e)
continue
try:
print "Success updating", symbol, currency, market_cap
except UnicodeEncodeError as e:
print e
cnx.commit()
time.sleep(randint(0,sleeptime))
cursor.close()
cnx.close()
| script/StockScraper-master/update_market_cap_yahoo.py | 2,002 | print "Typerror {0}: {1}".format(e.errno, e.strerror) | 53 | en | 0.081891 |
# Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 HP Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Clusters(horizon.Panel):
name = _("Clusters")
slug = 'database_clusters'
permissions = ('openstack.services.database',
'openstack.services.object-store',)
dashboard.Project.register(Clusters)
| trove_dashboard/content/database_clusters/panel.py | 1,044 | Copyright (c) 2014 eBay Software Foundation Copyright 2015 HP Software, LLC All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 646 | en | 0.861553 |
# Generated by Django 2.2.1 on 2019-07-06 21:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publish', '0031_bundle_description'),
]
operations = [
migrations.CreateModel(
name='Docset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docset_id', models.CharField(max_length=255)),
('name', models.CharField(default='', max_length=255)),
],
),
]
| sfdoc/publish/migrations/0032_docset.py | 591 | Generated by Django 2.2.1 on 2019-07-06 21:53 | 45 | en | 0.529291 |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class ChannelAdminLogEventActionTogglePreHistoryHidden(Object):
"""Attributes:
ID: ``0x5f5c95f1``
Args:
new_value: ``bool``
"""
ID = 0x5f5c95f1
def __init__(self, new_value: bool):
self.new_value = new_value # Bool
@staticmethod
def read(b: BytesIO, *args) -> "ChannelAdminLogEventActionTogglePreHistoryHidden":
# No flags
new_value = Bool.read(b)
return ChannelAdminLogEventActionTogglePreHistoryHidden(new_value)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
b.write(Bool(self.new_value))
return b.getvalue()
| ENV/lib/python3.5/site-packages/pyrogram/api/types/channel_admin_log_event_action_toggle_pre_history_hidden.py | 1,574 | Attributes:
ID: ``0x5f5c95f1``
Args:
new_value: ``bool``
Pyrogram - Telegram MTProto API Client Library for Python Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance> This file is part of Pyrogram. Pyrogram is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Pyrogram is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. Bool No flags No flags | 862 | en | 0.851332 |
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
import numpy as np
def mean_precision_k(y_true, y_score, k=10):
"""Mean precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean precision @k : float
"""
p_ks = []
for y_t, y_s in zip(y_true, y_score):
if np.sum(y_t == 1):
p_ks.append(ranking_precision_score(y_t, y_s, k=k))
return np.mean(p_ks)
def mean_recall_k(y_true, y_score, k=10):
"""Mean recall at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean recall @k : float
"""
r_ks = []
for y_t, y_s in zip(y_true, y_score):
if np.sum(y_t == 1):
r_ks.append(ranking_recall_score(y_t, y_s, k=k))
return np.mean(r_ks)
def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
Mean NDCG @k : float
"""
ndcg_s = []
for y_t, y_s in zip(y_true, y_score):
if np.sum(y_t == 1):
ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains))
return np.mean(ndcg_s)
def mean_rprecision_k(y_true, y_score, k=10):
"""Mean precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean precision @k : float
"""
p_ks = []
for y_t, y_s in zip(y_true, y_score):
if np.sum(y_t == 1):
p_ks.append(ranking_rprecision_score(y_t, y_s, k=k))
return np.mean(p_ks)
def ranking_recall_score(y_true, y_score, k=10):
# https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf
"""Recall at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) == 1:
return ValueError("The score cannot be approximated.")
elif len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_true == pos_label)
return float(n_relevant) / n_pos
def ranking_precision_score(y_true, y_score, k=10):
"""Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) == 1:
return ValueError("The score cannot be approximated.")
elif len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_true == pos_label)
return float(n_relevant) / k
def ranking_rprecision_score(y_true, y_score, k=10):
"""Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) == 1:
return ValueError("The score cannot be approximated.")
elif len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_true == pos_label)
# Divide by min(n_pos, k) such that the best achievable score is always 1.0.
return float(n_relevant) / min(k, n_pos)
def average_precision_score(y_true, y_score, k=10):
"""Average precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
average precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) == 1:
return ValueError("The score cannot be approximated.")
elif len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1][:min(n_pos, k)]
y_true = np.asarray(y_true)[order]
score = 0
for i in range(len(y_true)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
if n_pos == 0:
return 0
return score / n_pos
def dcg_score(y_true, y_score, k=10, gains="exponential"):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
if gains == "exponential":
gains = 2 ** y_true - 1
elif gains == "linear":
gains = y_true
else:
raise ValueError("Invalid gains option.")
# highest rank is 1 so +2 instead of +1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float
"""
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return actual / best
# Alternative API.
def dcg_from_ranking(y_true, ranking):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
DCG @k : float
"""
y_true = np.asarray(y_true)
ranking = np.asarray(ranking)
rel = y_true[ranking]
gains = 2 ** rel - 1
discounts = np.log2(np.arange(len(ranking)) + 2)
return np.sum(gains / discounts)
def ndcg_from_ranking(y_true, ranking):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
NDCG @k : float
"""
k = len(ranking)
best_ranking = np.argsort(y_true)[::-1]
best = dcg_from_ranking(y_true, best_ranking[:k])
return dcg_from_ranking(y_true, ranking) / best
def colwise_accuracy(y_true,y_pred):
y_pred=y_pred.T
y_true=y_true.T
acc_list=[]
for cate in range(0,y_pred.shape[0]):
acc_list.append(accuracy_score(y_pred[cate],y_true[cate]))
return sum(acc_list)/len(acc_list)
def calculate_metrics(pred, target, threshold=0.5):
pred = np.array(pred > threshold, dtype=float)
return {'Accuracy': accuracy_score(y_true=target, y_pred=pred),
'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred),
'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'),
'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'),
'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'),
'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'),
'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'),
'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'),
'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'),
'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'),
'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'),
} | voc_classifier/metrics_for_multilabel.py | 10,465 | Average precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
average precision @k : float
Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
DCG @k : float
Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float
Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
Mean NDCG @k : float
Mean precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean precision @k : float
Mean recall at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean recall @k : float
Mean precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
mean precision @k : float
Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
NDCG @k : float
Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float
Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
Recall at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf Divide by min(n_pos, k) such that the best achievable score is always 1.0. Compute precision up to document i i.e, percentage of relevant documents up to document i. highest rank is 1 so +2 instead of +1 Alternative API. | 3,955 | en | 0.500208 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import time
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.nn import Embedding
from paddle.fluid.optimizer import SGDOptimizer
PRINT_STEP = 20
SEED = 2020
program_translator = ProgramTranslator()
class SimpleLSTMRNN(fluid.Layer):
def __init__(self,
hidden_size,
num_steps,
num_layers=2,
init_scale=0.1,
dropout=None):
super(SimpleLSTMRNN, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._init_scale = init_scale
self._dropout = dropout
self._num_steps = num_steps
self.cell_array = []
self.hidden_array = []
self.weight_1_arr = []
self.weight_2_arr = []
self.bias_arr = []
self.mask_array = []
for i in range(self._num_layers):
weight_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 2, self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale))
self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
bias_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.0))
self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
def forward(self, input_embedding, init_hidden=None, init_cell=None):
cell_array = []
hidden_array = []
for i in range(self._num_layers):
hidden_array.append(init_hidden[i])
cell_array.append(init_cell[i])
res = []
for index in range(self._num_steps):
step_input = input_embedding[:, index, :]
for k in range(self._num_layers):
pre_hidden = hidden_array[k]
pre_cell = cell_array[k]
weight_1 = self.weight_1_arr[k]
bias = self.bias_arr[k]
nn = fluid.layers.concat([step_input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1)
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
i) * fluid.layers.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
hidden_array[k] = m
cell_array[k] = c
step_input = m
if self._dropout is not None and self._dropout > 0.0:
step_input = fluid.layers.dropout(
step_input,
dropout_prob=self._dropout,
dropout_implementation='upscale_in_train')
res.append(step_input)
real_res = fluid.layers.concat(res, 1)
real_res = fluid.layers.reshape(
real_res, [-1, self._num_steps, self._hidden_size])
last_hidden = fluid.layers.concat(hidden_array, 1)
last_hidden = fluid.layers.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size])
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(cell_array, 1)
last_cell = fluid.layers.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size])
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
return real_res, last_hidden, last_cell
class PtbModel(fluid.Layer):
def __init__(self,
hidden_size,
vocab_size,
num_layers=2,
num_steps=20,
init_scale=0.1,
dropout=None):
super(PtbModel, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
self.simple_lstm_rnn = SimpleLSTMRNN(
hidden_size,
num_steps,
num_layers=num_layers,
init_scale=init_scale,
dropout=dropout)
self.embedding = Embedding(
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
def build_once(self, input, label, init_hidden, init_cell):
pass
@declarative
def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
init_c = fluid.layers.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size])
x_emb = self.embedding(input)
x_emb = fluid.layers.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size])
if self.dropout is not None and self.dropout > 0.0:
x_emb = fluid.layers.dropout(
x_emb,
dropout_prob=self.dropout,
dropout_implementation='upscale_in_train')
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,
init_c)
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell
def debug_emb(self):
np.save("emb_grad", self.x_emb.gradient())
def train(place):
num_layers = 1
batch_size = 4
hidden_size = 10
num_steps = 3
init_scale = 0.1
max_epoch = 1
dropout = 0.0
vocab_size = 1000
batch_num = 200
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale,
dropout=dropout)
sgd = SGDOptimizer(
learning_rate=1e-3, parameter_list=ptb_model.parameters())
for epoch_id in range(max_epoch):
total_loss = 0.0
iters = 0.0
total_sample = 0
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
for step_id in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
x_data = x_data.reshape((-1, num_steps, 1))
y_data = y_data.reshape((-1, num_steps, 1))
x = to_variable(x_data)
y = to_variable(y_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
out_loss = dy_loss.numpy()
dy_loss.backward()
sgd.minimize(dy_loss)
ptb_model.clear_gradients()
total_loss += out_loss
iters += num_steps
total_sample += 1
if step_id % PRINT_STEP == 0:
if step_id == 0:
logging.info("epoch %d | step %d, loss %0.3f" % (
epoch_id, step_id, total_loss / total_sample))
avg_batch_time = time.time()
else:
speed = PRINT_STEP / (time.time() - avg_batch_time)
logging.info(
"epoch %d | step %d, loss %0.3f, speed %.3f steps/s"
% (epoch_id, step_id, total_loss / total_sample,
speed))
avg_batch_time = time.time()
return out_loss, last_hidden.numpy(), last_cell.numpy()
def train_dygraph(place):
program_translator.enable(False)
return train(place)
def train_static(place):
program_translator.enable(True)
return train(place)
class TestPtb(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
def test_check_result(self):
loss_1, hidden_1, cell_1 = train_static(self.place)
loss_2, hidden_2, cell_2 = train_dygraph(self.place)
self.assertTrue(
np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(loss_1, loss_2))
self.assertTrue(
np.allclose(hidden_1, hidden_2),
msg="static hidden: {} \ndygraph acc1: {}".format(hidden_1,
hidden_2))
self.assertTrue(
np.allclose(cell_1, cell_2),
msg="static cell: {} \ndygraph cell: {}".format(cell_1, cell_2))
if __name__ == '__main__':
unittest.main()
| python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py | 11,862 | Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 583 | en | 0.863545 |
# Generated by Django 3.2.9 on 2022-01-03 10:15
import cloudinary.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighbourhood', '0003_auto_20211222_2324'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, null=True)),
('created_on', models.DateTimeField(auto_now_add=True, null=True)),
('updated_on', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AddField(
model_name='neighbourhood',
name='description',
field=models.TextField(max_length=200, null=True),
),
migrations.AddField(
model_name='neighbourhood',
name='hood_image',
field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='hood_image'),
),
migrations.AddField(
model_name='neighbourhood',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.location'),
),
migrations.AddField(
model_name='profile',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.location'),
),
]
| neighbourhood/migrations/0004_auto_20220103_1315.py | 1,708 | Generated by Django 3.2.9 on 2022-01-03 10:15 | 45 | en | 0.730283 |
import deepSI
from deepSI.systems.system import System_ss, System_data
import numpy as np
class NarendraLiBenchmark(System_ss): #https://arxiv.org/pdf/2003.14162.pdf
"""docstring for NarendraLiBenchmark"""
def __init__(self):
'''Noise, system setting and x0 settings'''
super(NarendraLiBenchmark, self).__init__(nx=2)
def f(self,x,u):
x1,x2 = x
x1new = (x1/(1+x1**2)+1)*np.sin(x2)
x2new = x2*np.cos(x2) + x1*np.exp(-(x1**2+x2**2)/8) + u**3/(1+u**2+0.5*np.cos(x1+x2))
return [x1new,x2new]
def h(self,x):
x1,x2 = x
return x1/(1+0.5*np.sin(x2)) + x2/(1+0.5*np.sin(x1)) + self.random.normal(scale=0.1)
def get_train_data(self):
exp = System_data(u=self.random.uniform(low=-2.5,high=2.5,size=(2000,)))
return self.apply_experiment(exp)
def get_test_data(self):
exp = System_data(u=self.random.uniform(low=-2.5,high=2.5,size=(2000,)))
return self.apply_experiment(exp)
if __name__ == '__main__':
from deepSI import fit_systems
sys = NarendraLiBenchmark()
sys_data = sys.get_train_data()
SYS = fit_systems.System_IO_fit_linear
# sys_fit, score, kwargs = fit_systems.fit_system_tuner(SYS, sys_data, dict(na=range(0,7),nb=range(1,7)))
score, sys_fit, kwargs, _ = fit_systems.grid_search(SYS, sys_data, dict(na=range(0,7),nb=range(1,7)))
sys_data_predict = sys_fit.apply_experiment(sys_data)
sys_data.plot()
sys_data_predict.plot(show=True)
| deepSI/systems/narendra_li_benchmark.py | 1,494 | docstring for NarendraLiBenchmark
Noise, system setting and x0 settings
https://arxiv.org/pdf/2003.14162.pdf sys_fit, score, kwargs = fit_systems.fit_system_tuner(SYS, sys_data, dict(na=range(0,7),nb=range(1,7))) | 213 | en | 0.397673 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to implement an AWS Lambda function that publishes messages to an
AWS IoT Greengrass connector.
"""
# snippet-start:[greengrass.python.connector-modbus-rtu-usage.complete]
import json
import greengrasssdk
iot_client = greengrasssdk.client('iot-data')
send_topic = 'modbus/adapter/request'
def create_read_coils_request():
return {
"request": {
"operation": "ReadCoilsRequest",
"device": 1,
"address": 0x01,
"count": 1},
"id": "TestRequest"}
def publish_basic_message():
iot_client.publish(
topic=send_topic, payload=json.dumps(create_read_coils_request()))
publish_basic_message()
# In this example, the required AWS Lambda handler is never called.
def function_handler(event, context):
return
# snippet-end:[greengrass.python.connector-modbus-rtu-usage.complete]
| python/example_code/greengrass/snippets/connector_modbus_rtu_usage.py | 989 | Purpose
Shows how to implement an AWS Lambda function that publishes messages to an
AWS IoT Greengrass connector.
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 snippet-start:[greengrass.python.connector-modbus-rtu-usage.complete] In this example, the required AWS Lambda handler is never called. snippet-end:[greengrass.python.connector-modbus-rtu-usage.complete] | 423 | en | 0.646866 |
#!/usr/bin/env python
# file trying to apply and test the pid controller on carla.
import glob
import os
import sys
import time
import matplotlib.pyplot as plt
from PID_controller import PID
import numpy as np
import speed_profile_reader as spr
try:
sys.path.append(glob.glob('../**/*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import random
import time
class TestData:
def __init__(self, total_duration, time_increment):
self._iter_num = 0
self.time = np.empty([int(total_duration / time_increment) + 1, 1])
self.setpoint = np.empty([int(total_duration / time_increment) + 1, 1])
self.actual_velocity = np.empty([int(total_duration / time_increment) + 1, 1])
self.error = np.empty([int(total_duration / time_increment) + 1, 1])
def append_data(self, t, sp, vel, error):
self.time[self._iter_num] = t
self.setpoint[self._iter_num] = sp
self.actual_velocity[self._iter_num] = vel
self.error[self._iter_num] = error
self._iter_num+=1
def plot(self):
plt.figure()
plt.plot(self.time, self.setpoint)
plt.plot(self.time, self.actual_velocity)
plt.xlabel('Time (s)')
plt.ylabel('Velocity (m/s)')
plt.title("PID Result")
plt.figure()
plt.plot(self.time, self.error, 'r--', label='error', alpha=0.75, linewidth=0.5)
plt.plot(self.time, np.zeros(len(self.time)), 'k--', linewidth=0.5)
plt.title("Controller Error")
plt.show()
class DataInit:
K = {
"Kp": 0.055734,
"Ki": 0.0114169,
"Kd": .00006
# For 10 m/s
# "Kp": 0.055734,
# "Ki": 0.0130169,
# "Kd": .000006
# "Kp": 1,
# "Ki": 0.0112,
# "Kd": 0.000006
}
total_duration = 20
sampling_period = 0.025
def main():
actor_list = []
verboseIsEnabled = None
try:
"""
Section for starting the client and connecting to the server
"""
client = carla.Client('localhost', 2000)
client.set_timeout(2.0)
for arg in sys.argv:
if (arg == '--verbose'):
verboseIsEnabled = True
if verboseIsEnabled:
print('client version: %s' % client.get_client_version())
print('server version: %s' % client.get_server_version())
print('client to server connection status: {}'.format(client.get_server_version()))
print('Retrieving the world data from server...')
world = client.get_world()
if verboseIsEnabled:
print('{} \n'.format(world))
"""
Section for retrieving the blueprints and spawn the actors
"""
blueprint_library = world.get_blueprint_library()
if verboseIsEnabled:
print('\nRetrieving CARLA blueprint library...')
print('\nobject: %s\n\nblueprint methods: %s\n\nblueprint list:' % (type(blueprint_library), dir(blueprint_library)) )
for blueprint in blueprint_library:
print(blueprint)
audi_blueprint = blueprint_library.find('vehicle.audi.tt')
print('\n%s\n' % audi_blueprint)
color = '191,191,191'
audi_blueprint.set_attribute('color', color)
transform = carla.Transform(
carla.Location(
x=10.5, y=-1.8,
z=38.5),carla.Rotation(yaw=0.0)
)
vehicleEgo = world.spawn_actor(audi_blueprint, transform)
actor_list.append(vehicleEgo)
print('created %s' % vehicleEgo.type_id)
color = random.choice(audi_blueprint.get_attribute('color').recommended_values)
audi_blueprint.set_attribute('color', color)
"""
Section for initializing the PID testing
"""
user_input_sp = None
while (not isinstance(user_input_sp, int)) and (not isinstance(user_input_sp, float)):
user_input_sp = input('Enter the desired Setpoint:\n')
data = TestData(DataInit.total_duration, DataInit.sampling_period)
start = time.time()
print('\nStarting test:\n\n' + 'Time(s) current_vel(m/s) setpoint_vel(m/s) throttle(%) pid_demand')
time.sleep(2.5)
print('.................................................................\n')
time.sleep(1)
# raise SystemExit
p = PID(
DataInit.K['Kp'],
DataInit.K['Ki'],
DataInit.K['Kd']
)
p.setPoint(user_input_sp)
p.Integrator_min = -5
p.Integrator_max = 40
pid = 0
for _ in range(int(DataInit.total_duration / DataInit.sampling_period) + 1):
measurement_value = vehicleEgo.get_velocity().x
vehicleEgo.apply_control(carla.VehicleControl(pid)) if 1 > pid > 0 else vehicleEgo.apply_control(carla.VehicleControl(1))
if 0 > pid: vehicleEgo.apply_control(carla.VehicleControl(brake=abs(pid)))
pid = p.update(measurement_value)
data.append_data(round(time.time() - start, 2), p.getSetPoint(), round(vehicleEgo.get_velocity().x, 5), p.getError())
time.sleep(DataInit.sampling_period)
print('%0.3f\t%0.2f\t\t\t%0.2f\t\t%0.2f\t%0.2f' % (time.time() - start,
vehicleEgo.get_velocity().x,
p.set_point,
vehicleEgo.get_control().throttle,
pid))
data.plot()
print('\nError Mean (Steady State):\n' +
str(round(np.absolute(np.mean(data.error[data.error.shape[0]/2:data.error.shape[0]])), 5)*100) +
'%\n')
finally:
print('destroying actors')
for actor in actor_list:
actor.destroy()
print('done.')
if __name__ == '__main__':
main()
| PythonAPI/carissma_project/PID_apply_static_sp.py | 6,114 | !/usr/bin/env python file trying to apply and test the pid controller on carla. For 10 m/s "Kp": 0.055734, "Ki": 0.0130169, "Kd": .000006 "Kp": 1, "Ki": 0.0112, "Kd": 0.000006 raise SystemExit | 192 | en | 0.565658 |
{% if cookiecutter.use_celery == 'y' %}
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('{{cookiecutter.project_slug}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.project_slug}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
{% if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
{%- endif %}
{% if cookiecutter.use_opbeat == 'y' -%}
if hasattr(settings, 'OPBEAT'):
from opbeat.contrib.django.models import client as opbeat_client
from opbeat.contrib.django.models import logger as opbeat_logger
from opbeat.contrib.django.models import register_handlers as opbeat_register_handlers
from opbeat.contrib.celery import register_signal as opbeat_register_signal
try:
opbeat_register_signal(opbeat_client)
except Exception as e:
opbeat_logger.exception('Failed installing celery hook: %s' % e)
if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
opbeat_register_handlers()
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
| {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py | 2,410 | set the default Django settings module for the 'celery' program. pragma: no cover Using a string here means the worker will not have to pickle the object when using Windows. Celery signal registration pragma: no cover Use this as a starting point for your project with celery. If you are not using celery, you can remove this app | 329 | en | 0.818092 |
import logging
import os
from typing import Generator
import pytest
@pytest.fixture(scope="module", autouse=True)
def change_to_resources_dir(test_resources, request):
os.chdir(test_resources)
yield
os.chdir(request.config.invocation_dir)
@pytest.fixture()
def test_filename(
change_to_resources_dir, storage, request
) -> Generator[str, None, None]:
"""Pushes a file to remote storage, yields its filename and then deletes it from remote storage"""
filename = request.param
storage.push_file(filename)
yield filename
storage.delete(filename)
NAME_COLLISIONS_DIR_NAME = "storage_name_collisions"
@pytest.fixture()
def setup_name_collision(change_to_resources_dir, storage):
"""
Pushes files and dirs with colliding names to remote storage, yields files pushed
and deletes everything at cleanup
"""
pushed_objects = storage.push(NAME_COLLISIONS_DIR_NAME)
yield pushed_objects
storage.delete(NAME_COLLISIONS_DIR_NAME)
@pytest.fixture()
def test_dirname(
change_to_resources_dir, storage, request
) -> Generator[str, None, None]:
"""Pushes a directory to remote storage, yields its name and then deletes it from remote storage"""
dirname = request.param
storage.push_directory(dirname)
yield dirname
storage.delete(dirname)
def test_delete_no_matches(storage, caplog):
with caplog.at_level(logging.WARNING):
deleted_files = storage.delete("there is no such file")
assert len(deleted_files) == 0
assert "Not deleting anything" in caplog.text
def test_delete_file(storage):
storage.push_file("sample.txt", overwrite_existing=True)
assert len(storage.list_objects("sample.txt")) == 1
deleted_objects = storage.delete("sample.txt")
assert len(deleted_objects) == 1
assert len(storage.list_objects("sample.txt")) == 0
def test_delete_with_base_path(storage):
base_path = "base_path"
storage.set_remote_base_path(base_path)
storage.push_file("sample.txt", overwrite_existing=True)
assert len(storage.list_objects("sample.txt")) == 1
deleted_objects = storage.delete("sample.txt")
assert len(deleted_objects) == 1
assert deleted_objects[0].name == f"{base_path}/sample.txt"
def test_delete_dir(storage):
storage.push_directory("sample_dir", overwrite_existing=True)
assert len(storage.list_objects("sample_dir")) == 2
deleted_objects = storage.delete("sample_dir")
assert len(deleted_objects) == 2
assert len(storage.list_objects("sample_dir")) == 0
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_push_file_empty_base_path(storage, test_filename):
remote_objects = storage.push(test_filename)
assert len(remote_objects) == 1
# we need lstrip because s3 paths (and names) start with "/" while google storage paths start without it...
assert remote_objects[0].name.lstrip("/") == test_filename
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_push_file_nonempty_base_path(storage, test_filename):
base_path = "base_path"
storage.set_remote_base_path(base_path)
remote_objects = storage.push(test_filename)
assert len(remote_objects) == 1
assert remote_objects[0].name.lstrip("/") == f"{base_path}/{test_filename}"
@pytest.mark.parametrize(
"test_dirname",
["sample_dir"],
indirect=["test_dirname"],
)
def test_push_directory(storage, test_dirname):
remote_objects = storage.push(test_dirname)
assert len(remote_objects) == 2
assert len(storage.list_objects(test_dirname)) == 2
@pytest.mark.parametrize(
"file_or_dir_name", ["non_existing_file.txt", "non_existing_dir"]
)
def test_push_non_existing(storage, file_or_dir_name):
with pytest.raises(
FileNotFoundError, match="does not refer to a file or directory"
):
storage.push(file_or_dir_name)
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_pull_file(storage, test_filename, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
storage.pull(test_filename, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, test_filename))
pulled_files = storage.pull(test_filename)
assert len(pulled_files) == 0
@pytest.mark.parametrize(
"test_filename",
["sample.txt"],
indirect=["test_filename"],
)
def test_pull_file_to_existing_dir_path(storage, test_filename, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
local_base_dir.mkdir(test_filename)
with pytest.raises(
FileExistsError,
match="Cannot pull file to a path which is an existing directory:",
):
storage.pull(test_filename, local_base_dir=local_base_dir)
@pytest.mark.parametrize(
"test_dirname",
["sample_dir"],
indirect=["test_dirname"],
)
def test_pull_dir(storage, test_dirname, tmpdir):
local_base_dir = tmpdir.mkdir("remote_storage")
storage.pull(test_dirname, local_base_dir=local_base_dir)
assert os.path.isdir(os.path.join(local_base_dir, test_dirname))
assert len(os.listdir(os.path.join(local_base_dir, test_dirname))) == 2
pulled_files = storage.pull(test_dirname)
assert len(pulled_files) == 0
@pytest.mark.parametrize(
"file_or_dir_name", ["non_existing_file.txt", "non_existing_dir"]
)
def test_pull_non_existing(storage, file_or_dir_name, caplog):
with caplog.at_level(logging.WARNING):
pulled_files = storage.pull(file_or_dir_name)
assert len(pulled_files) == 0
assert "No such remote file or directory" in caplog.text
def test_name_collisions_pulling_properly(setup_name_collision, storage, tmpdir):
storage.set_remote_base_path(NAME_COLLISIONS_DIR_NAME)
local_base_dir = tmpdir.mkdir("remote_storage")
colliding_file_name = "file.txt.collision"
colliding_dir_name = "dir_name_collision"
storage.pull("file.txt", local_base_dir=local_base_dir)
storage.pull("dir_name", local_base_dir=local_base_dir)
assert not os.path.isfile(os.path.join(local_base_dir, colliding_file_name))
assert os.path.isfile(os.path.join(local_base_dir, "file.txt"))
assert not os.path.isdir(os.path.join(local_base_dir, colliding_dir_name))
assert os.path.isdir(os.path.join(local_base_dir, "dir_name"))
storage.pull(colliding_file_name, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, colliding_file_name))
storage.pull(colliding_dir_name, local_base_dir=local_base_dir)
assert os.path.isfile(os.path.join(local_base_dir, colliding_dir_name, "file.txt"))
def test_name_collisions_deleting_properly(setup_name_collision, storage):
storage.set_remote_base_path(NAME_COLLISIONS_DIR_NAME)
storage.delete("file.txt")
remaining_object_names = [
obj.name.lstrip("/").lstrip(f"{NAME_COLLISIONS_DIR_NAME}/")
for obj in storage.list_objects("")
]
assert "file.txt" not in remaining_object_names
assert "file.txt.collision" in remaining_object_names
assert "dir_name/file.txt" in remaining_object_names
# TODO or not TODO: many cases are missing - checking names, testing overwriting.
| tests/accsr/test_remote_storage.py | 7,258 | Pushes files and dirs with colliding names to remote storage, yields files pushed
and deletes everything at cleanup
Pushes a directory to remote storage, yields its name and then deletes it from remote storage
Pushes a file to remote storage, yields its filename and then deletes it from remote storage
we need lstrip because s3 paths (and names) start with "/" while google storage paths start without it... TODO or not TODO: many cases are missing - checking names, testing overwriting. | 490 | en | 0.905044 |
"""Performs face alignment and stores face thumbnails in the output directory."""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
from detect_face import create_mtcnn, detect_face
import random
from time import sleep
def main(args):
sleep(random.random())
output_dir = os.path.expanduser(args.output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
dataset = facenet.get_dataset(args.input_dir, False)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = create_mtcnn(sess, None)
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
# Add a random key to the filename to allow alignment using multiple processes
random_key = np.random.randint(0, high=99999)
bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
with open(bounding_boxes_filename, "w") as text_file:
nrof_images_total = 0
nrof_successfully_aligned = 0
if args.random_order:
random.shuffle(dataset)
for cls in dataset:
output_class_dir = os.path.join(output_dir, cls.name)
if not os.path.exists(output_class_dir):
os.makedirs(output_class_dir)
if args.random_order:
random.shuffle(cls.image_paths)
for image_path in cls.image_paths:
nrof_images_total += 1
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(output_class_dir, filename+'.png')
print(image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim<2:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
bounding_boxes, _ = detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces>0:
det = bounding_boxes[:,0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
if nrof_faces>1:
if args.detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
det_arr.append(det[index,:])
else:
det_arr.append(np.squeeze(det))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-args.margin/2, 0)
bb[1] = np.maximum(det[1]-args.margin/2, 0)
bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])
bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
nrof_successfully_aligned += 1
filename_base, file_extension = os.path.splitext(output_filename)
if args.detect_multiple_faces:
output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
else:
output_filename_n = "{}{}".format(filename_base, file_extension)
misc.imsave(output_filename_n, scaled)
text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
else:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
print('Total number of images: %d' % nrof_images_total)
print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str, help='Directory with unaligned images.')
parser.add_argument('--output_dir', type=str, help='Directory with aligned face thumbnails.')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=182)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--random_order',
help='Shuffles the order of images to enable alignment using multiple processes.', action='store_true')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--detect_multiple_faces', type=bool,
help='Detect and align multiple faces per image.', default=False)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| facenet/align/align_dataset_mtcnn.py | 8,302 | Performs face alignment and stores face thumbnails in the output directory.
MIT License Copyright (c) 2016 David Sandberg Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Store some git revision info in a text file in the log directory minimum size of face three steps's threshold scale factor Add a random key to the filename to allow alignment using multiple processes some extra weight on the centering | 1,383 | en | 0.852149 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MessageContact'
db.create_table('umessages_messagecontact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('from_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='from_users', to=orm['auth.User'])),
('to_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='to_users', to=orm['auth.User'])),
('latest_message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['umessages.Message'])),
))
db.send_create_signal('umessages', ['MessageContact'])
# Adding unique constraint on 'MessageContact', fields ['from_user', 'to_user']
db.create_unique('umessages_messagecontact', ['from_user_id', 'to_user_id'])
# Adding model 'MessageRecipient'
db.create_table('umessages_messagerecipient', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('message', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['umessages.Message'])),
('read_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('umessages', ['MessageRecipient'])
# Adding model 'Message'
db.create_table('umessages_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('sender', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sent_messages', to=orm['auth.User'])),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('sender_deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('umessages', ['Message'])
def backwards(self, orm):
# Removing unique constraint on 'MessageContact', fields ['from_user', 'to_user']
db.delete_unique('umessages_messagecontact', ['from_user_id', 'to_user_id'])
# Deleting model 'MessageContact'
db.delete_table('umessages_messagecontact')
# Deleting model 'MessageRecipient'
db.delete_table('umessages_messagerecipient')
# Deleting model 'Message'
db.delete_table('umessages_message')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'umessages.message': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_messages'", 'symmetrical': 'False', 'through': "orm['umessages.MessageRecipient']", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_messages'", 'to': "orm['auth.User']"}),
'sender_deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'umessages.messagecontact': {
'Meta': {'ordering': "['latest_message']", 'unique_together': "(('from_user', 'to_user'),)", 'object_name': 'MessageContact'},
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_users'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['umessages.Message']"}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_users'", 'to': "orm['auth.User']"})
},
'umessages.messagerecipient': {
'Meta': {'object_name': 'MessageRecipient'},
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['umessages.Message']"}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['umessages']
| userena/contrib/umessages/migrations/0001_initial.py | 8,080 | encoding: utf-8 Adding model 'MessageContact' Adding unique constraint on 'MessageContact', fields ['from_user', 'to_user'] Adding model 'MessageRecipient' Adding model 'Message' Removing unique constraint on 'MessageContact', fields ['from_user', 'to_user'] Deleting model 'MessageContact' Deleting model 'MessageRecipient' Deleting model 'Message' | 349 | en | 0.408663 |
#!/bin/env python
import csv
from datetime import datetime
import os
import xml.etree.ElementTree as ET
import xml
# https://stackabuse.com/reading-and-writing-xml-files-in-python/
# xmlformatter:
# https://www.freeformatter.com/xml-formatter.html#ad-output
infile = "./RAJAPerf-timing.csv"
def read_infile(infile):
"""STUB"""
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',')
def get_date():
"""STUB"""
date = datetime.now().strftime("%-Y-%m-%dT%H:%M:%S")
return date
date = get_date()
perf_report = ET.Element("performance-report")
name ="RAJAPerf" + date + ".xml"
time_units="seconds"
perf_report.set("date", date)
perf_report.set("name", name)
perf_report.set("time-units", time_units)
perf_root = ET.SubElement(perf_report, 'timing')
perf_root.set("end-time",date)
perf_root.set("name", "kokkos_perf_suite")
#print(ET.tostring(perf_report))
# b'<performance-report time-units="seconds" date="2020-12-16T14:34:40"
# name="RAJAPerf-timing.csv"><timing end-time="2020-12-16T14:34:40"
# name="kokkos_perf_suite" /></performance-report>'
# metadata TBD
# create hierarchy
test_suite_list = []
with open(infile) as csvfile:
rps_reader = csv.reader(csvfile, delimiter=',')
for row in rps_reader:
test_suite_list.append(row)
suite_names_set = set([x[0][:x[0].find("_")] for x in test_suite_list[2:]])
#suite_names_set
#Out[135]: {'Basic', 'KokkosMechanics'}
heirarch_dict = dict()
for name in suite_names_set:
heirarch_dict[name] = []
# heirarch_dict
# Out[137]: {'KokkosMechanics': [], 'Basic': []}
for item in test_suite_list[2:]:
key = item[0][:item[0].find("_")]
heirarch_dict[key].append(item)
#print(item)
#NEXT STEPS: For the main test categories, Basic and KokkosMechanics, sum
# the test times over all of the kernels for each of their variants
col_meanings_dict = dict()
for index, item in enumerate(test_suite_list[1]):
#print(index, item)
col_meanings_dict[index] = item
#col_meanings_dict
# Out[152]:
# {0: 'Kernel ',
# 1: ' Base_Seq ',
# 2: ' Lambda_Seq ',
# 3: ' RAJA_Seq ',
# 4: ' Base_CUDA ',
# 5: ' RAJA_CUDA ',
# 6: ' Kokkos_Lambda_Seq ',
# 7: ' Kokkos_Functor_Seq ',
# 8: ' Kokkos_Lambda_CUDA ',
# 9: ' Kokkos_Functor_CUDA'}
def associate_timings_with_xml(xml_element, timing_dict, suite_or_test_name):
"""STUB -- xml_element will be an element of perf_report;
timing_dict = a map of variant names to test run times
"""
for key, value in timing_dict.items():
xml_element.set(key.lower(), str(value))
xml_element.set("name", suite_or_test_name.strip())
def create_RPS_xml_report(suite_name, suite_data_list):
"""STUB - suite_name is a string = Basic, KokkosMechanics, etc.;
suite_data_list will be the values for a key, Basic or KokkosMechanics
"""
aggregate_results_dict = dict()
#print(suite_data_list)
for list_item in suite_data_list:
for index, timing in enumerate(list_item[1:]):
if "Not run" in timing:
continue
variant_name = col_meanings_dict[index + 1]
if variant_name not in aggregate_results_dict:
aggregate_results_dict[variant_name] = 0.0
# sums values of all the basic kernels
aggregate_results_dict[variant_name] += float(timing)
#print(aggregate_results_dict)
suite_root = ET.SubElement(perf_root, "timing")
associate_timings_with_xml(suite_root, aggregate_results_dict, suite_name)
for list_item in suite_data_list:
test_timings_dict = dict()
for index, timing in enumerate(list_item[1:]):
if "Not run" in timing:
continue
variant_name = col_meanings_dict[index + 1]
test_timings_dict[variant_name] = float(timing)
xml_element_for_a_kernel_test = ET.SubElement(suite_root, "timing")
associate_timings_with_xml(xml_element_for_a_kernel_test,
test_timings_dict, list_item[0])
def run():
"""STUB"""
read_infile(infile)
#create_RPS_xml_report("Basic", heirarch_dict["Basic"])
for key in heirarch_dict.keys():
create_RPS_xml_report(key, heirarch_dict[key])
# Aided in debugging
#print(heirarch_dict["KokkosMechanics"])
# Prints xml to screen as string
#print(ET.tostring(perf_report))
ET.dump(perf_report)
if __name__ == "__main__":
run()
| scripts/csv_xml.py | 4,448 | STUB -- xml_element will be an element of perf_report;
timing_dict = a map of variant names to test run times
STUB - suite_name is a string = Basic, KokkosMechanics, etc.;
suite_data_list will be the values for a key, Basic or KokkosMechanics
STUB
STUB
STUB
!/bin/env python https://stackabuse.com/reading-and-writing-xml-files-in-python/ xmlformatter: https://www.freeformatter.com/xml-formatter.htmlad-outputprint(ET.tostring(perf_report)) b'<performance-report time-units="seconds" date="2020-12-16T14:34:40" name="RAJAPerf-timing.csv"><timing end-time="2020-12-16T14:34:40" name="kokkos_perf_suite" /></performance-report>' metadata TBD create hierarchysuite_names_setOut[135]: {'Basic', 'KokkosMechanics'} heirarch_dict Out[137]: {'KokkosMechanics': [], 'Basic': []}print(item)NEXT STEPS: For the main test categories, Basic and KokkosMechanics, sum the test times over all of the kernels for each of their variantsprint(index, item)col_meanings_dict Out[152]: {0: 'Kernel ', 1: ' Base_Seq ', 2: ' Lambda_Seq ', 3: ' RAJA_Seq ', 4: ' Base_CUDA ', 5: ' RAJA_CUDA ', 6: ' Kokkos_Lambda_Seq ', 7: ' Kokkos_Functor_Seq ', 8: ' Kokkos_Lambda_CUDA ', 9: ' Kokkos_Functor_CUDA'}print(suite_data_list) sums values of all the basic kernelsprint(aggregate_results_dict)create_RPS_xml_report("Basic", heirarch_dict["Basic"]) Aided in debuggingprint(heirarch_dict["KokkosMechanics"]) Prints xml to screen as stringprint(ET.tostring(perf_report)) | 1,473 | en | 0.463663 |
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
from PyQt5.QtWidgets import QVBoxLayout, QLabel
from qtum_electrum.gui.qt.password_dialog import PasswordLayout, PW_PASSPHRASE
from qtum_electrum.gui.qt.util import *
from qtum_electrum.i18n import _
from qtum_electrum.util import PrintError
# The trickiest thing about this handler was getting windows properly
# parented on MacOSX.
class QtHandlerBase(QObject, PrintError):
'''An interface between the GUI (here, QT) and the device handling
logic for handling I/O.'''
passphrase_signal = pyqtSignal(object, object)
message_signal = pyqtSignal(object, object)
error_signal = pyqtSignal(object, object)
word_signal = pyqtSignal(object)
clear_signal = pyqtSignal()
query_signal = pyqtSignal(object, object)
yes_no_signal = pyqtSignal(object)
status_signal = pyqtSignal(object)
def __init__(self, win, device):
super(QtHandlerBase, self).__init__()
self.clear_signal.connect(self.clear_dialog)
self.error_signal.connect(self.error_dialog)
self.message_signal.connect(self.message_dialog)
self.passphrase_signal.connect(self.passphrase_dialog)
self.word_signal.connect(self.word_dialog)
self.query_signal.connect(self.win_query_choice)
self.yes_no_signal.connect(self.win_yes_no_question)
self.status_signal.connect(self._update_status)
self.win = win
self.device = device
self.dialog = None
self.done = threading.Event()
def top_level_window(self):
return self.win.top_level_window()
def update_status(self, paired):
self.status_signal.emit(paired)
def _update_status(self, paired):
if hasattr(self, 'button'):
button = self.button
icon_name = button.icon_paired if paired else button.icon_unpaired
button.setIcon(read_QIcon(icon_name))
def query_choice(self, msg, labels):
self.done.clear()
self.query_signal.emit(msg, labels)
self.done.wait()
return self.choice
def yes_no_question(self, msg):
self.done.clear()
self.yes_no_signal.emit(msg)
self.done.wait()
return self.ok
def show_message(self, msg, on_cancel=None):
self.message_signal.emit(msg, on_cancel)
def show_error(self, msg, blocking=False):
self.done.clear()
self.error_signal.emit(msg, blocking)
if blocking:
self.done.wait()
def finished(self):
self.clear_signal.emit()
def get_word(self, msg):
self.done.clear()
self.word_signal.emit(msg)
self.done.wait()
return self.word
def get_passphrase(self, msg, confirm):
self.done.clear()
self.passphrase_signal.emit(msg, confirm)
self.done.wait()
return self.passphrase
def passphrase_dialog(self, msg, confirm):
# If confirm is true, require the user to enter the passphrase twice
parent = self.top_level_window()
d = WindowModalDialog(parent, _("Enter Passphrase"))
if confirm:
OK_button = OkButton(d)
playout = PasswordLayout(msg=msg, kind=PW_PASSPHRASE, OK_button=OK_button)
vbox = QVBoxLayout()
vbox.addLayout(playout.layout())
vbox.addLayout(Buttons(CancelButton(d), OK_button))
d.setLayout(vbox)
passphrase = playout.new_password() if d.exec_() else None
else:
pw = QLineEdit()
pw.setEchoMode(2)
pw.setMinimumWidth(200)
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(msg))
vbox.addWidget(pw)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
passphrase = pw.text() if d.exec_() else None
self.passphrase = passphrase
self.done.set()
def word_dialog(self, msg):
dialog = WindowModalDialog(self.top_level_window(), "")
hbox = QHBoxLayout(dialog)
hbox.addWidget(QLabel(msg))
text = QLineEdit()
text.setMaximumWidth(100)
text.returnPressed.connect(dialog.accept)
hbox.addWidget(text)
hbox.addStretch(1)
dialog.exec_() # Firmware cannot handle cancellation
self.word = text.text()
self.done.set()
def message_dialog(self, msg, on_cancel):
# Called more than once during signing, to confirm output and fee
self.clear_dialog()
title = _('Please check your %s device') % self.device
self.dialog = dialog = WindowModalDialog(self.top_level_window(), title)
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
if on_cancel:
dialog.rejected.connect(on_cancel)
vbox.addLayout(Buttons(CancelButton(dialog)))
dialog.show()
def error_dialog(self, msg, blocking):
self.win.show_error(msg, parent=self.top_level_window())
if blocking:
self.done.set()
def clear_dialog(self):
if self.dialog:
self.dialog.accept()
self.dialog = None
def win_query_choice(self, msg, labels):
self.choice = self.win.query_choice(msg, labels)
self.done.set()
def win_yes_no_question(self, msg):
self.ok = self.win.question(msg)
self.done.set()
from qtum_electrum.plugin import hook
from qtum_electrum.util import UserCancelled
from qtum_electrum.gui.qt.main_window import StatusBarButton
class QtPluginBase(object):
@hook
def load_wallet(self, wallet, window):
for keystore in wallet.get_keystores():
if not isinstance(keystore, self.keystore_class):
continue
if not self.libraries_available:
message = keystore.plugin.get_library_not_available_message()
window.show_error(message)
return
tooltip = self.device + '\n' + (keystore.label or 'unnamed')
cb = partial(self.show_settings_dialog, window, keystore)
button = StatusBarButton(QIcon(self.icon_unpaired), tooltip, cb)
button.icon_paired = self.icon_paired
button.icon_unpaired = self.icon_unpaired
window.statusBar().addPermanentWidget(button)
handler = self.create_handler(window)
handler.button = button
keystore.handler = handler
keystore.thread = TaskThread(window, window.on_error)
self.add_show_address_on_hw_device_button_for_receive_addr(wallet, keystore, window)
# Trigger a pairing
keystore.thread.add(partial(self.get_client, keystore))
def choose_device(self, window, keystore):
'''This dialog box should be usable even if the user has
forgotten their PIN or it is in bootloader mode.'''
device_id = self.device_manager().xpub_id(keystore.xpub)
if not device_id:
try:
info = self.device_manager().select_device(self, keystore.handler, keystore)
except UserCancelled:
return
device_id = info.device.id_
return device_id
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
def add_show_address_on_hw_device_button_for_receive_addr(self, wallet, keystore, main_window):
plugin = keystore.plugin
receive_address_e = main_window.receive_address_e
def show_address():
addr = receive_address_e.text()
keystore.thread.add(partial(plugin.show_address, wallet, addr, keystore))
receive_address_e.addButton("eye1.png", show_address, _("Show on {}").format(plugin.device))
| qtum_electrum/plugins/hw_wallet/qt.py | 8,946 | An interface between the GUI (here, QT) and the device handling
logic for handling I/O.
This dialog box should be usable even if the user has
forgotten their PIN or it is in bootloader mode.
!/usr/bin/env python2 -*- mode: python -*- Electrum - lightweight Bitcoin client Copyright (C) 2016 The Electrum developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. The trickiest thing about this handler was getting windows properly parented on MacOSX. If confirm is true, require the user to enter the passphrase twice Firmware cannot handle cancellation Called more than once during signing, to confirm output and fee Trigger a pairing | 1,610 | en | 0.886344 |
"""Python Crypto Bot consuming Coinbase Pro or Binance APIs"""
import functools
import os
import sched
import sys
import time
import pandas as pd
from datetime import datetime
from models.PyCryptoBot import PyCryptoBot, truncate as _truncate
from models.AppState import AppState
from models.Trading import TechnicalAnalysis
from models.TradingAccount import TradingAccount
from models.helper.MarginHelper import calculate_margin
from views.TradingGraphs import TradingGraphs
from models.Strategy import Strategy
from models.helper.LogHelper import Logger
# minimal traceback
sys.tracebacklimit = 1
app = PyCryptoBot()
account = TradingAccount(app)
technical_analysis = None
state = AppState(app, account)
state.initLastAction()
s = sched.scheduler(time.time, time.sleep)
def executeJob(sc=None, app: PyCryptoBot=None, state: AppState=None, trading_data=pd.DataFrame()):
"""Trading bot job which runs at a scheduled interval"""
global technical_analysis
# connectivity check (only when running live)
if app.isLive() and app.getTime() is None:
Logger.warning('Your connection to the exchange has gone down, will retry in 1 minute!')
# poll every 5 minute
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
return
# increment state.iterations
state.iterations = state.iterations + 1
if not app.isSimulation():
# retrieve the app.getMarket() data
trading_data = app.getHistoricalData(app.getMarket(), app.getGranularity())
else:
if len(trading_data) == 0:
return None
# analyse the market data
if app.isSimulation() and len(trading_data.columns) > 8:
df = trading_data
else:
trading_dataCopy = trading_data.copy()
technical_analysis = TechnicalAnalysis(trading_dataCopy)
technical_analysis.addAll()
df = technical_analysis.getDataFrame()
if app.isSimulation():
df_last = app.getInterval(df, state.iterations)
else:
df_last = app.getInterval(df)
if len(df_last.index.format()) > 0:
current_df_index = str(df_last.index.format()[0])
else:
current_df_index = state.last_df_index
formatted_current_df_index = f'{current_df_index} 00:00:00' if len(current_df_index) == 10 else current_df_index
if app.getSmartSwitch() == 1 and app.getGranularity() == 3600 and app.is1hEMA1226Bull() is True and app.is6hEMA1226Bull() is True:
Logger.info('*** smart switch from granularity 3600 (1 hour) to 900 (15 min) ***')
app.notifyTelegram(app.getMarket() + " smart switch from granularity 3600 (1 hour) to 900 (15 min)")
app.setGranularity(900)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if app.getSmartSwitch() == 1 and app.getGranularity() == 900 and app.is1hEMA1226Bull() is False and app.is6hEMA1226Bull() is False:
Logger.info("*** smart switch from granularity 900 (15 min) to 3600 (1 hour) ***")
app.notifyTelegram(app.getMarket() + " smart switch from granularity 900 (15 min) to 3600 (1 hour)")
app.setGranularity(3600)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if app.getExchange() == 'binance' and app.getGranularity() == 86400:
if len(df) < 250:
# data frame should have 250 rows, if not retry
Logger.error('error: data frame length is < 250 (' + str(len(df)) + ')')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
else:
if len(df) < 300:
if not app.isSimulation():
# data frame should have 300 rows, if not retry
Logger.error('error: data frame length is < 300 (' + str(len(df)) + ')')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
if len(df_last) > 0:
now = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
if not app.isSimulation():
ticker = app.getTicker(app.getMarket())
now = ticker[0]
price = ticker[1]
if price < df_last['low'].values[0] or price == 0:
price = float(df_last['close'].values[0])
else:
price = float(df_last['close'].values[0])
if price < 0.0001:
raise Exception(app.getMarket() + ' is unsuitable for trading, quote price is less than 0.0001!')
# technical indicators
ema12gtema26 = bool(df_last['ema12gtema26'].values[0])
ema12gtema26co = bool(df_last['ema12gtema26co'].values[0])
goldencross = bool(df_last['goldencross'].values[0])
macdgtsignal = bool(df_last['macdgtsignal'].values[0])
macdgtsignalco = bool(df_last['macdgtsignalco'].values[0])
ema12ltema26 = bool(df_last['ema12ltema26'].values[0])
ema12ltema26co = bool(df_last['ema12ltema26co'].values[0])
macdltsignal = bool(df_last['macdltsignal'].values[0])
macdltsignalco = bool(df_last['macdltsignalco'].values[0])
obv = float(df_last['obv'].values[0])
obv_pc = float(df_last['obv_pc'].values[0])
elder_ray_buy = bool(df_last['eri_buy'].values[0])
elder_ray_sell = bool(df_last['eri_sell'].values[0])
# if simulation interations < 200 set goldencross to true
if app.isSimulation() and state.iterations < 200:
goldencross = True
# candlestick detection
hammer = bool(df_last['hammer'].values[0])
inverted_hammer = bool(df_last['inverted_hammer'].values[0])
hanging_man = bool(df_last['hanging_man'].values[0])
shooting_star = bool(df_last['shooting_star'].values[0])
three_white_soldiers = bool(df_last['three_white_soldiers'].values[0])
three_black_crows = bool(df_last['three_black_crows'].values[0])
morning_star = bool(df_last['morning_star'].values[0])
evening_star = bool(df_last['evening_star'].values[0])
three_line_strike = bool(df_last['three_line_strike'].values[0])
abandoned_baby = bool(df_last['abandoned_baby'].values[0])
morning_doji_star = bool(df_last['morning_doji_star'].values[0])
evening_doji_star = bool(df_last['evening_doji_star'].values[0])
two_black_gapping = bool(df_last['two_black_gapping'].values[0])
strategy = Strategy(app, state, df, state.iterations)
state.action = strategy.getAction()
immediate_action = False
margin, profit, sell_fee = 0, 0, 0
if state.last_buy_size > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':
# update last buy high
if price > state.last_buy_high:
state.last_buy_high = price
if state.last_buy_high > 0:
change_pcnt_high = ((price / state.last_buy_high) - 1) * 100
else:
change_pcnt_high = 0
# buy and sell calculations
state.last_buy_fee = round(state.last_buy_size * app.getTakerFee(), 8)
state.last_buy_filled = round(((state.last_buy_size - state.last_buy_fee) / state.last_buy_price), 8)
# if not a simulation, sync with exchange orders
if not app.isSimulation():
exchange_last_buy = app.getLastBuy()
if exchange_last_buy is not None:
if state.last_buy_size != exchange_last_buy['size']:
state.last_buy_size = exchange_last_buy['size']
if state.last_buy_filled != exchange_last_buy['filled']:
state.last_buy_filled = exchange_last_buy['filled']
if state.last_buy_price != exchange_last_buy['price']:
state.last_buy_price = exchange_last_buy['price']
if app.getExchange() == 'coinbasepro':
if state.last_buy_fee != exchange_last_buy['fee']:
state.last_buy_fee = exchange_last_buy['fee']
margin, profit, sell_fee = calculate_margin(
buy_size=state.last_buy_size,
buy_filled=state.last_buy_filled,
buy_price=state.last_buy_price,
buy_fee=state.last_buy_fee,
sell_percent=app.getSellPercent(),
sell_price=price,
sell_taker_fee=app.getTakerFee())
# handle immedate sell actions
if strategy.isSellTrigger(price, technical_analysis.getTradeExit(price), margin, change_pcnt_high, obv_pc, macdltsignal):
state.action = 'SELL'
state.last_action = 'BUY'
immediate_action = True
# handle overriding wait actions (do not sell if sell at loss disabled!)
if strategy.isWaitTrigger(margin):
state.action = 'WAIT'
state.last_action = 'BUY'
immediate_action = False
bullbeartext = ''
if app.disableBullOnly() is True or (df_last['sma50'].values[0] == df_last['sma200'].values[0]):
bullbeartext = ''
elif goldencross is True:
bullbeartext = ' (BULL)'
elif goldencross is False:
bullbeartext = ' (BEAR)'
# polling is every 5 minutes (even for hourly intervals), but only process once per interval
if (immediate_action is True or state.last_df_index != current_df_index):
precision = 4
if (price < 0.01):
precision = 8
# Since precision does not change after this point, it is safe to prepare a tailored `truncate()` that would
# work with this precision. It should save a couple of `precision` uses, one for each `truncate()` call.
truncate = functools.partial(_truncate, n=precision)
price_text = 'Close: ' + truncate(price)
ema_text = app.compare(df_last['ema12'].values[0], df_last['ema26'].values[0], 'EMA12/26', precision)
macd_text = ''
if app.disableBuyMACD() is False:
macd_text = app.compare(df_last['macd'].values[0], df_last['signal'].values[0], 'MACD', precision)
obv_text = ''
if app.disableBuyOBV() is False:
obv_text = 'OBV: ' + truncate(df_last['obv'].values[0]) + ' (' + str(
truncate(df_last['obv_pc'].values[0])) + '%)'
state.eri_text = ''
if app.disableBuyElderRay() is False:
if elder_ray_buy is True:
state.eri_text = 'ERI: buy | '
elif elder_ray_sell is True:
state.eri_text = 'ERI: sell | '
else:
state.eri_text = 'ERI: | '
if hammer is True:
log_text = '* Candlestick Detected: Hammer ("Weak - Reversal - Bullish Signal - Up")'
Logger.info(log_text)
if shooting_star is True:
log_text = '* Candlestick Detected: Shooting Star ("Weak - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
if hanging_man is True:
log_text = '* Candlestick Detected: Hanging Man ("Weak - Continuation - Bearish Pattern - Down")'
Logger.info(log_text)
if inverted_hammer is True:
log_text = '* Candlestick Detected: Inverted Hammer ("Weak - Continuation - Bullish Pattern - Up")'
Logger.info(log_text)
if three_white_soldiers is True:
log_text = '*** Candlestick Detected: Three White Soldiers ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if three_black_crows is True:
log_text = '* Candlestick Detected: Three Black Crows ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if morning_star is True:
log_text = '*** Candlestick Detected: Morning Star ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if evening_star is True:
log_text = '*** Candlestick Detected: Evening Star ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if three_line_strike is True:
log_text = '** Candlestick Detected: Three Line Strike ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if abandoned_baby is True:
log_text = '** Candlestick Detected: Abandoned Baby ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if morning_doji_star is True:
log_text = '** Candlestick Detected: Morning Doji Star ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if evening_doji_star is True:
log_text = '** Candlestick Detected: Evening Doji Star ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if two_black_gapping is True:
log_text = '*** Candlestick Detected: Two Black Gapping ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
ema_co_prefix = ''
ema_co_suffix = ''
if ema12gtema26co is True:
ema_co_prefix = '*^ '
ema_co_suffix = ' ^*'
elif ema12ltema26co is True:
ema_co_prefix = '*v '
ema_co_suffix = ' v*'
elif ema12gtema26 is True:
ema_co_prefix = '^ '
ema_co_suffix = ' ^'
elif ema12ltema26 is True:
ema_co_prefix = 'v '
ema_co_suffix = ' v'
macd_co_prefix = ''
macd_co_suffix = ''
if app.disableBuyMACD() is False:
if macdgtsignalco is True:
macd_co_prefix = '*^ '
macd_co_suffix = ' ^*'
elif macdltsignalco is True:
macd_co_prefix = '*v '
macd_co_suffix = ' v*'
elif macdgtsignal is True:
macd_co_prefix = '^ '
macd_co_suffix = ' ^'
elif macdltsignal is True:
macd_co_prefix = 'v '
macd_co_suffix = ' v'
obv_prefix = ''
obv_suffix = ''
if app.disableBuyOBV() is False:
if float(obv_pc) > 0:
obv_prefix = '^ '
obv_suffix = ' ^ | '
elif float(obv_pc) < 0:
obv_prefix = 'v '
obv_suffix = ' v | '
if not app.isVerbose():
if state.last_action != '':
output_text = formatted_current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + \
app.printGranularity() + ' | ' + price_text + ' | ' + ema_co_prefix + \
ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + \
obv_prefix + obv_text + obv_suffix + state.eri_text + ' | ' + state.action + \
' | Last Action: ' + state.last_action
else:
output_text = formatted_current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + \
app.printGranularity() + ' | ' + price_text + ' | ' + ema_co_prefix + \
ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + \
obv_prefix + obv_text + obv_suffix + state.eri_text + ' | ' + state.action + ' '
if state.last_action == 'BUY':
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
output_text += ' | ' + margin_text + ' (delta: ' + str(round(price - state.last_buy_price, precision)) + ')'
Logger.info(output_text)
# Seasonal Autoregressive Integrated Moving Average (ARIMA) model (ML prediction for 3 intervals from now)
if not app.isSimulation():
try:
prediction = technical_analysis.seasonalARIMAModelPrediction(int(app.getGranularity() / 60) * 3) # 3 intervals from now
Logger.info(f'Seasonal ARIMA model predicts the closing price will be {str(round(prediction[1], 2))} at {prediction[0]} (delta: {round(prediction[1] - price, 2)})')
except:
pass
if state.last_action == 'BUY':
# display support, resistance and fibonacci levels
Logger.info(technical_analysis.printSupportResistanceFibonacciLevels(price))
else:
Logger.debug('-- Iteration: ' + str(state.iterations) + ' --' + bullbeartext)
if state.last_action == 'BUY':
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
Logger.debug('-- Margin: ' + margin_text + ' --')
Logger.debug('price: ' + truncate(price))
Logger.debug('ema12: ' + truncate(float(df_last['ema12'].values[0])))
Logger.debug('ema26: ' + truncate(float(df_last['ema26'].values[0])))
Logger.debug('ema12gtema26co: ' + str(ema12gtema26co))
Logger.debug('ema12gtema26: ' + str(ema12gtema26))
Logger.debug('ema12ltema26co: ' + str(ema12ltema26co))
Logger.debug('ema12ltema26: ' + str(ema12ltema26))
Logger.debug('sma50: ' + truncate(float(df_last['sma50'].values[0])))
Logger.debug('sma200: ' + truncate(float(df_last['sma200'].values[0])))
Logger.debug('macd: ' + truncate(float(df_last['macd'].values[0])))
Logger.debug('signal: ' + truncate(float(df_last['signal'].values[0])))
Logger.debug('macdgtsignal: ' + str(macdgtsignal))
Logger.debug('macdltsignal: ' + str(macdltsignal))
Logger.debug('obv: ' + str(obv))
Logger.debug('obv_pc: ' + str(obv_pc))
Logger.debug('action: ' + state.action)
# informational output on the most recent entry
Logger.info('')
Logger.info('================================================================================')
txt = ' Iteration : ' + str(state.iterations) + bullbeartext
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Timestamp : ' + str(df_last.index.format()[0])
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' Close : ' + truncate(price)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' EMA12 : ' + truncate(float(df_last['ema12'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' EMA26 : ' + truncate(float(df_last['ema26'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Crossing Above : ' + str(ema12gtema26co)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Above : ' + str(ema12gtema26)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Crossing Below : ' + str(ema12ltema26co)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Below : ' + str(ema12ltema26)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
if (ema12gtema26 is True and ema12gtema26co is True):
txt = ' Condition : EMA12 is currently crossing above EMA26'
elif (ema12gtema26 is True and ema12gtema26co is False):
txt = ' Condition : EMA12 is currently above EMA26 and has crossed over'
elif (ema12ltema26 is True and ema12ltema26co is True):
txt = ' Condition : EMA12 is currently crossing below EMA26'
elif (ema12ltema26 is True and ema12ltema26co is False):
txt = ' Condition : EMA12 is currently below EMA26 and has crossed over'
else:
txt = ' Condition : -'
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' SMA20 : ' + truncate(float(df_last['sma20'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' SMA200 : ' + truncate(float(df_last['sma200'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' MACD : ' + truncate(float(df_last['macd'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Signal : ' + truncate(float(df_last['signal'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Above : ' + str(macdgtsignal)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Below : ' + str(macdltsignal)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
if (macdgtsignal is True and macdgtsignalco is True):
txt = ' Condition : MACD is currently crossing above Signal'
elif (macdgtsignal is True and macdgtsignalco is False):
txt = ' Condition : MACD is currently above Signal and has crossed over'
elif (macdltsignal is True and macdltsignalco is True):
txt = ' Condition : MACD is currently crossing below Signal'
elif (macdltsignal is True and macdltsignalco is False):
txt = ' Condition : MACD is currently below Signal and has crossed over'
else:
txt = ' Condition : -'
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' Action : ' + state.action
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('================================================================================')
if state.last_action == 'BUY':
txt = ' Margin : ' + margin_text
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('================================================================================')
# if a buy signal
if state.action == 'BUY':
state.last_buy_price = price
state.last_buy_high = state.last_buy_price
# if live
if app.isLive():
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') BUY at ' + price_text)
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | BUY')
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
# display balances
Logger.info(app.getBaseCurrency() + ' balance before order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance before order: ' + str(account.getBalance(app.getQuoteCurrency())))
# execute a live market buy
state.last_buy_size = float(account.getBalance(app.getQuoteCurrency()))
if app.getBuyMaxSize() and state.last_buy_size > app.getBuyMaxSize():
state.last_buy_size = app.getBuyMaxSize()
resp = app.marketBuy(app.getMarket(), state.last_buy_size, app.getBuyPercent())
Logger.debug(resp)
# display balances
Logger.info(app.getBaseCurrency() + ' balance after order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance after order: ' + str(account.getBalance(app.getQuoteCurrency())))
# if not live
else:
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') TEST BUY at ' + price_text)
# TODO: Improve simulator calculations by including calculations for buy and sell limit configurations.
if state.last_buy_size == 0 and state.last_buy_filled == 0:
state.last_buy_size = 1000
state.first_buy_size = 1000
state.buy_count = state.buy_count + 1
state.buy_sum = state.buy_sum + state.last_buy_size
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | BUY')
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info(' Fibonacci Retracement Levels:' + str(bands))
technical_analysis.printSupportResistanceLevel(float(price))
if len(bands) >= 1 and len(bands) <= 2:
if len(bands) == 1:
first_key = list(bands.keys())[0]
if first_key == 'ratio1':
state.fib_low = 0
state.fib_high = bands[first_key]
if first_key == 'ratio1_618':
state.fib_low = bands[first_key]
state.fib_high = bands[first_key] * 2
else:
state.fib_low = bands[first_key]
elif len(bands) == 2:
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = app.getMarket() + '_' + app.printGranularity() + '_buy_' + str(ts) + '.png'
tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)
# if a sell signal
elif state.action == 'SELL':
# if live
if app.isLive():
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') SELL at ' +
price_text + ' (margin: ' + margin_text + ', (delta: ' +
str(round(price - state.last_buy_price, precision)) + ')')
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | SELL')
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info(' Fibonacci Retracement Levels:' + str(bands))
if len(bands) >= 1 and len(bands) <= 2:
if len(bands) == 1:
first_key = list(bands.keys())[0]
if first_key == 'ratio1':
state.fib_low = 0
state.fib_high = bands[first_key]
if first_key == 'ratio1_618':
state.fib_low = bands[first_key]
state.fib_high = bands[first_key] * 2
else:
state.fib_low = bands[first_key]
elif len(bands) == 2:
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
# display balances
Logger.info(app.getBaseCurrency() + ' balance before order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance before order: ' + str(account.getBalance(app.getQuoteCurrency())))
# execute a live market sell
resp = app.marketSell(app.getMarket(), float(account.getBalance(app.getBaseCurrency())),
app.getSellPercent())
Logger.debug(resp)
# display balances
Logger.info(app.getBaseCurrency() + ' balance after order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance after order: ' + str(account.getBalance(app.getQuoteCurrency())))
# if not live
else:
margin, profit, sell_fee = calculate_margin(
buy_size=state.last_buy_size,
buy_filled=state.last_buy_filled,
buy_price=state.last_buy_price,
buy_fee=state.last_buy_fee,
sell_percent=app.getSellPercent(),
sell_price=price,
sell_taker_fee=app.getTakerFee())
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') TEST SELL at ' +
price_text + ' (margin: ' + margin_text + ', (delta: ' +
str(round(price - state.last_buy_price, precision)) + ')')
# Preserve next buy values for simulator
state.sell_count = state.sell_count + 1
buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = buy_size - sell_fee
state.sell_sum = state.sell_sum + state.last_buy_size
if not app.isVerbose():
if price > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' +
app.printGranularity() + ' | SELL | ' + str(price) + ' | BUY | ' +
str(state.last_buy_price) + ' | DIFF | ' + str(price - state.last_buy_price) +
' | DIFF | ' + str(profit) + ' | MARGIN NO FEES | ' +
margin_text + ' | MARGIN FEES | ' + str(round(sell_fee, precision)))
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = app.getMarket() + '_' + app.printGranularity() + '_sell_' + str(ts) + '.png'
tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)
# last significant action
if state.action in ['BUY', 'SELL']:
state.last_action = state.action
state.last_df_index = str(df_last.index.format()[0])
if not app.isLive() and state.iterations == len(df):
Logger.info("\nSimulation Summary: ")
if state.buy_count > state.sell_count and app.allowSellAtLoss():
# Calculate last sell size
state.last_buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
# Reduce sell fee from last sell size
state.last_buy_size = state.last_buy_size - state.last_buy_price * app.getTakerFee()
state.sell_sum = state.sell_sum + state.last_buy_size
state.sell_count = state.sell_count + 1
elif state.buy_count > state.sell_count and not app.allowSellAtLoss():
Logger.info("\n")
Logger.info(' Note : "sell at loss" is disabled and you have an open trade, if the margin')
Logger.info(' result below is negative it will assume you sold at the end of the')
Logger.info(' simulation which may not be ideal. Try setting --sellatloss 1')
Logger.info("\n")
Logger.info(' Buy Count : ' + str(state.buy_count))
Logger.info(' Sell Count : ' + str(state.sell_count))
Logger.info(' First Buy : ' + str(state.first_buy_size))
Logger.info(' Last Sell : ' + str(state.last_buy_size))
app.notifyTelegram(f"Simulation Summary\n Buy Count: {state.buy_count}\n Sell Count: {state.sell_count}\n First Buy: {state.first_buy_size}\n Last Sell: {state.last_buy_size}\n")
if state.sell_count > 0:
Logger.info("\n")
Logger.info(' Margin : ' + _truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4) + '%')
Logger.info("\n")
Logger.info(' ** non-live simulation, assuming highest fees')
app.notifyTelegram(f" Margin: {_truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4)}%\n ** non-live simulation, assuming highest fees\n")
else:
if state.last_buy_size > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':
# show profit and margin if already bought
Logger.info(now + ' | ' + app.getMarket() + bullbeartext + ' | ' + app.printGranularity() + ' | Current Price: ' + str(price) + ' | Margin: ' + str(margin) + ' | Profit: ' + str(profit))
else:
Logger.info(now + ' | ' + app.getMarket() + bullbeartext + ' | ' + app.printGranularity() + ' | Current Price: ' + str(price))
# decrement ignored iteration
state.iterations = state.iterations - 1
# if live
if not app.disableTracker() and app.isLive():
# update order tracker csv
if app.getExchange() == 'binance':
account.saveTrackerCSV(app.getMarket())
elif app.getExchange() == 'coinbasepro':
account.saveTrackerCSV()
if app.isSimulation():
if state.iterations < 300:
if app.simuluationSpeed() in ['fast', 'fast-sample']:
# fast processing
list(map(s.cancel, s.queue))
s.enter(0, 1, executeJob, (sc, app, state, df))
else:
# slow processing
list(map(s.cancel, s.queue))
s.enter(1, 1, executeJob, (sc, app, state, df))
else:
# poll every 1 minute
list(map(s.cancel, s.queue))
s.enter(60, 1, executeJob, (sc, app, state))
def main():
try:
message = 'Starting '
if app.getExchange() == 'coinbasepro':
message += 'Coinbase Pro bot'
elif app.getExchange() == 'binance':
message += 'Binance bot'
message += ' for ' + app.getMarket() + ' using granularity ' + app.printGranularity()
app.notifyTelegram(message)
# initialise and start application
trading_data = app.startApp(account, state.last_action)
def runApp():
# run the first job immediately after starting
if app.isSimulation():
executeJob(s, app, state, trading_data)
else:
executeJob(s, app, state)
s.run()
try:
runApp()
except KeyboardInterrupt:
raise
except(BaseException, Exception) as e:
if app.autoRestart():
# Wait 30 second and try to relaunch application
time.sleep(30)
Logger.critical('Restarting application after exception: ' + repr(e))
app.notifyTelegram('Auto restarting bot for ' + app.getMarket() + ' after exception: ' + repr(e))
# Cancel the events queue
map(s.cancel, s.queue)
# Restart the app
runApp()
else:
raise
# catches a keyboard break of app, exits gracefully
except KeyboardInterrupt:
Logger.warning(str(datetime.now()) + ' bot is closed via keyboard interrupt...')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except(BaseException, Exception) as e:
# catch all not managed exceptions and send a Telegram message if configured
app.notifyTelegram('Bot for ' + app.getMarket() + ' got an exception: ' + repr(e))
Logger.critical(repr(e))
raise
main()
| pycryptobot.py | 41,319 | Trading bot job which runs at a scheduled interval
Python Crypto Bot consuming Coinbase Pro or Binance APIs
minimal traceback connectivity check (only when running live) poll every 5 minute increment state.iterations retrieve the app.getMarket() data analyse the market data data frame should have 250 rows, if not retry data frame should have 300 rows, if not retry technical indicators if simulation interations < 200 set goldencross to true candlestick detection update last buy high buy and sell calculations if not a simulation, sync with exchange orders handle immedate sell actions handle overriding wait actions (do not sell if sell at loss disabled!) polling is every 5 minutes (even for hourly intervals), but only process once per interval Since precision does not change after this point, it is safe to prepare a tailored `truncate()` that would work with this precision. It should save a couple of `precision` uses, one for each `truncate()` call. Seasonal Autoregressive Integrated Moving Average (ARIMA) model (ML prediction for 3 intervals from now) 3 intervals from now display support, resistance and fibonacci levels informational output on the most recent entry if a buy signal if live display balances execute a live market buy display balances if not live TODO: Improve simulator calculations by including calculations for buy and sell limit configurations. if a sell signal if live display balances execute a live market sell display balances if not live Preserve next buy values for simulator last significant action Calculate last sell size Reduce sell fee from last sell size show profit and margin if already bought decrement ignored iteration if live update order tracker csv fast processing slow processing poll every 1 minute initialise and start application run the first job immediately after starting Wait 30 second and try to relaunch application Cancel the events queue Restart the app catches a keyboard break of app, exits gracefully catch all not managed exceptions and send a Telegram message if configured | 2,050 | en | 0.798867 |
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Automatic speech recognition model training script."""
import logging
import os
import random
import subprocess
import sys
from distutils.version import LooseVersion
import configargparse
import numpy as np
import torch
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
is_torch_1_2_plus = LooseVersion(torch.__version__) >= LooseVersion("1.2")
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
"""Get default arguments."""
if parser is None:
parser = configargparse.ArgumentParser(
description="Train an automatic speech recognition (ASR) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings in "
"`--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--dict", required=required, help="Dictionary")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--debugdir", type=str, help="Output directory for debugging")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
parser.add_argument(
"--save-interval-iters",
default=0,
type=int,
help="Save snapshot interval iterations",
)
# task related
parser.add_argument(
"--train-json",
type=str,
default=None,
help="Filename of train label data (json)",
)
parser.add_argument(
"--valid-json",
type=str,
default=None,
help="Filename of validation label data (json)",
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default=None,
help="model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)",
)
# encoder
parser.add_argument(
"--num-encs", default=1, type=int, help="Number of encoders in the model."
)
# loss related
parser.add_argument(
"--ctc_type",
default="warpctc",
type=str,
choices=["builtin", "warpctc"],
help="Type of CTC implementation to calculate loss.",
)
parser.add_argument(
"--mtlalpha",
default=0.5,
type=float,
help="Multitask learning coefficient, "
"alpha: alpha*ctc_loss + (1-alpha)*att_loss ",
)
parser.add_argument(
"--lsm-weight", default=0.0, type=float, help="Label smoothing weight"
)
# recognition options to compute CER/WER
parser.add_argument(
"--report-cer",
default=False,
action="store_true",
help="Compute CER on development set",
)
parser.add_argument(
"--report-wer",
default=False,
action="store_true",
help="Compute WER on development set",
)
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=4, help="Beam size")
parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
default=0.0,
type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
default=0.0,
type=float,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--ctc-weight", default=0.3, type=float, help="CTC weight in joint decoding"
)
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument("--lm-weight", default=0.1, type=float, help="RNNLM weight.")
parser.add_argument("--sym-space", default="<space>", type=str, help="Space symbol")
parser.add_argument("--sym-blank", default="<blank>", type=str, help="Blank symbol")
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=800,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=150,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--n-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
nargs="?",
help="The configuration file for the pre-processing",
)
# optimization related
parser.add_argument(
"--opt",
default="adadelta",
type=str,
choices=["adadelta", "adam", "noam"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--eps", default=1e-8, type=float, help="Epsilon constant for optimizer"
)
parser.add_argument(
"--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon"
)
parser.add_argument(
"--weight-decay", default=0.0, type=float, help="Weight decay ratio"
)
parser.add_argument(
"--criterion",
default="acc",
type=str,
choices=["loss", "acc"],
help="Criterion to perform epsilon decay",
)
parser.add_argument(
"--threshold", default=1e-4, type=float, help="Threshold to stop iteration"
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Maximum number of epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/acc",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait without improvement "
"before stopping the training",
)
parser.add_argument(
"--grad-clip", default=5, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=3,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--num-save-ctc",
default=3,
type=int,
help="Number of samples of CTC probability to be saved",
)
parser.add_argument(
"--grad-noise",
type=strtobool,
default=False,
help="The flag to switch to use noise injection to gradients during training",
)
# asr_mix related
parser.add_argument(
"--num-spkrs",
default=1,
type=int,
choices=[1, 2],
help="Number of speakers in the speech.",
)
# decoder related
parser.add_argument(
"--context-residual",
default=False,
type=strtobool,
nargs="?",
help="The flag to switch to use context vector residual in the decoder network",
)
# finetuning related
parser.add_argument(
"--enc-init",
default=None,
type=str,
help="Pre-trained ASR model to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
help="Pre-trained ASR, MT or LM model to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="att., dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--freeze-mods",
default=None,
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of modules to freeze, separated by a comma.",
)
# front end related
parser.add_argument(
"--use-frontend",
type=strtobool,
default=False,
help="The flag to switch to use frontend system.",
)
# WPE related
parser.add_argument(
"--use-wpe",
type=strtobool,
default=False,
help="Apply Weighted Prediction Error",
)
parser.add_argument(
"--wtype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for WPE. "
"",
)
parser.add_argument("--wlayers", type=int, default=2, help="")
parser.add_argument("--wunits", type=int, default=300, help="")
parser.add_argument("--wprojs", type=int, default=300, help="")
parser.add_argument("--wdropout-rate", type=float, default=0.0, help="")
parser.add_argument("--wpe-taps", type=int, default=5, help="")
parser.add_argument("--wpe-delay", type=int, default=3, help="")
parser.add_argument(
"--use-dnn-mask-for-wpe",
type=strtobool,
default=False,
help="Use DNN to estimate the power spectrogram. "
"This option is experimental.",
)
# Beamformer related
parser.add_argument("--use-beamformer", type=strtobool, default=True, help="")
parser.add_argument(
"--btype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for Beamformer.",
)
parser.add_argument("--blayers", type=int, default=2, help="")
parser.add_argument("--bunits", type=int, default=300, help="")
parser.add_argument("--bprojs", type=int, default=300, help="")
parser.add_argument("--badim", type=int, default=320, help="")
parser.add_argument(
"--bnmask",
type=int,
default=2,
help="Number of beamforming masks, " "default is 2 for [speech, noise].",
)
parser.add_argument(
"--ref-channel",
type=int,
default=-1,
help="The reference channel used for beamformer. "
"By default, the channel is estimated by DNN.",
)
parser.add_argument("--bdropout-rate", type=float, default=0.0, help="")
# Feature transform: Normalization
parser.add_argument(
"--stats-file",
type=str,
default=None,
help="The stats file for the feature normalization",
)
parser.add_argument(
"--apply-uttmvn",
type=strtobool,
default=True,
help="Apply utterance level mean " "variance normalization.",
)
parser.add_argument("--uttmvn-norm-means", type=strtobool, default=True, help="")
parser.add_argument("--uttmvn-norm-vars", type=strtobool, default=False, help="")
# Feature transform: Fbank
parser.add_argument(
"--fbank-fs",
type=int,
default=16000,
help="The sample frequency used for " "the mel-fbank creation.",
)
parser.add_argument(
"--n-mels", type=int, default=80, help="The number of mel-frequency bins."
)
parser.add_argument("--fbank-fmin", type=float, default=0.0, help="")
parser.add_argument("--fbank-fmax", type=float, default=None, help="")
return parser
def main(cmd_args):
"""Run the main training function."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if "chainer_backend" in args.model_module:
args.backend = "chainer"
if "pytorch_backend" in args.model_module:
args.backend = "pytorch"
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
else:
if is_torch_1_2_plus and args.ngpu != 1:
logging.debug(
"There are some bugs with multi-GPU processing in PyTorch 1.2+"
+ " (see https://github.com/pytorch/pytorch/issues/21108)"
)
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
# for non-autoregressive training using Transformer
if hasattr(args, "decoder_mode") and args.decoder_mode == "maskctc":
char_list.append("<mask>")
args.char_list = char_list
else:
args.char_list = None
# train
logging.info("backend = " + args.backend)
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import train
train(args)
elif args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
else:
# FIXME(kamo): Support --model-module
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| espnet/bin/asr_train.py | 20,058 | Get default arguments.
Run the main training function.
Automatic speech recognition model training script.
!/usr/bin/env python3 encoding: utf-8 Copyright 2017 Tomoki Hayashi (Nagoya University) Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) NOTE: you need this func to generate our sphinx doc general configuration task related network architecture encoder loss related recognition options to compute CER/WER minibatch related optimization related asr_mix related decoder related finetuning related front end related WPE related Beamformer related Feature transform: Normalization Feature transform: Fbank logging info If --ngpu is not given, 1. if CUDA_VISIBLE_DEVICES is set, all visible devices 2. if nvidia-smi exists, use all devices 3. else ngpu=0 display PYTHONPATH set random seed load dictionary for debug log for non-autoregressive training using Transformer train FIXME(kamo): Support --model-module | 929 | en | 0.55731 |
#(c) 2016 by Authors
#This file is a part of ABruijn program.
#Released under the BSD license (see LICENSE file)
"""
Runs repeat/contigger binary
"""
from __future__ import absolute_import
import subprocess
import logging
import os
from flye.utils.utils import which
REPEAT_BIN = "flye-modules"
CONTIGGER_BIN = "flye-modules"
logger = logging.getLogger()
class RepeatException(Exception):
pass
def check_binaries():
if not which(REPEAT_BIN) or not which(CONTIGGER_BIN):
raise RepeatException("Repeat/contigger binaries were not found. "
"Did you run 'make'?")
try:
devnull = open(os.devnull, "w")
subprocess.check_call([REPEAT_BIN, "repeat", "-h"], stderr=devnull)
except subprocess.CalledProcessError as e:
raise RepeatException(str(e))
except OSError as e:
raise RepeatException(str(e))
def analyse_repeats(args, run_params, input_assembly, out_folder,
log_file, config_file):
logger.debug("-----Begin repeat analyser log------")
cmdline = [REPEAT_BIN, "repeat", "--disjointigs", input_assembly,
"--reads", ",".join(args.reads), "--out-dir", out_folder,
"--config", config_file, "--log", log_file,
"--threads", str(args.threads)]
if args.debug:
cmdline.append("--debug")
if args.meta:
cmdline.append("--meta")
if args.keep_haplotypes:
cmdline.append("--keep-haplotypes")
#if args.kmer_size:
# cmdline.extend(["--kmer", str(args.kmer_size)])
cmdline.extend(["--min-ovlp", str(run_params["min_overlap"])])
if args.hifi_error:
cmdline.extend(["--extra-params",
"repeat_graph_ovlp_divergence={}".format(args.hifi_error)])
try:
logger.debug("Running: " + " ".join(cmdline))
subprocess.check_call(cmdline)
except subprocess.CalledProcessError as e:
if e.returncode == -9:
logger.error("Looks like the system ran out of memory")
raise RepeatException(str(e))
except OSError as e:
raise RepeatException(str(e))
def generate_contigs(args, run_params, graph_edges, out_folder,
log_file, config_file, repeat_graph, reads_alignment):
logger.debug("-----Begin contigger analyser log------")
cmdline = [CONTIGGER_BIN, "contigger", "--graph-edges", graph_edges,
"--reads", ",".join(args.reads), "--out-dir", out_folder,
"--config", config_file, "--repeat-graph", repeat_graph,
"--graph-aln", reads_alignment, "--log", log_file,
"--threads", str(args.threads)]
if args.debug:
cmdline.append("--debug")
if args.keep_haplotypes:
cmdline.append("--no-scaffold")
#if args.kmer_size:
# cmdline.extend(["--kmer", str(args.kmer_size)])
cmdline.extend(["--min-ovlp", str(run_params["min_overlap"])])
try:
logger.debug("Running: " + " ".join(cmdline))
subprocess.check_call(cmdline)
except subprocess.CalledProcessError as e:
if e.returncode == -9:
logger.error("Looks like the system ran out of memory")
raise RepeatException(str(e))
except OSError as e:
raise RepeatException(str(e))
| flye/assembly/repeat_graph.py | 3,285 | Runs repeat/contigger binary
(c) 2016 by AuthorsThis file is a part of ABruijn program.Released under the BSD license (see LICENSE file)if args.kmer_size: cmdline.extend(["--kmer", str(args.kmer_size)])if args.kmer_size: cmdline.extend(["--kmer", str(args.kmer_size)]) | 275 | en | 0.712033 |
import json
import typing
import collections
from matplotlib import cm
from matplotlib.colors import Normalize, to_hex, CSS4_COLORS, BASE_COLORS
import matplotlib.pyplot as plt
from clldutils.color import qualitative_colors, sequential_colors, rgb_as_hex
from cldfviz.multiparameter import CONTINUOUS, CATEGORICAL, Parameter
__all__ = ['COLORMAPS', 'hextriplet', 'Colormap']
COLORMAPS = {
CATEGORICAL: ['boynton', 'tol', 'base', 'seq'],
CONTINUOUS: [cm for cm in plt.colormaps() if not cm.endswith('_r')],
}
def hextriplet(s):
"""
Wrap clldutils.color.rgb_as_hex to provide unified error handling.
"""
if s in BASE_COLORS:
return rgb_as_hex([float(d) for d in BASE_COLORS[s]])
if s in CSS4_COLORS:
return CSS4_COLORS[s]
try:
return rgb_as_hex(s)
except (AssertionError, ValueError) as e:
raise ValueError('Invalid color spec: "{}" ({})'.format(s, str(e)))
class Colormap:
def __init__(self, parameter: Parameter, name: typing.Optional[str] = None, novalue=None):
domain = parameter.domain
self.explicit_cm = None
if name and name.startswith('{'):
self.explicit_cm = collections.OrderedDict()
raw = json.loads(name, object_pairs_hook=collections.OrderedDict)
if novalue:
raw.setdefault('None', novalue)
label_to_code = {v: k for k, v in parameter.domain.items()}
for v, c in raw.items():
if (v not in parameter.value_to_code) and v not in label_to_code:
raise ValueError('Colormap value "{}" not in domain {}'.format(
v, list(parameter.value_to_code.keys())))
v = parameter.value_to_code.get(v, label_to_code.get(v))
self.explicit_cm[v] = hextriplet(c)
vals = list(parameter.value_to_code)
if len(vals) > len(self.explicit_cm):
raise ValueError('Colormap {} does not cover all values {}!'.format(
dict(raw), vals))
name = None
# reorder the domain of the parameter (and prune it to valid values):
parameter.domain = collections.OrderedDict(
(c, l) for c, l in sorted(
[i for i in parameter.domain.items() if i[0] in self.explicit_cm],
key=lambda i: list(self.explicit_cm.keys()).index(i[0]))
)
self.novalue = hextriplet(novalue) if novalue else None
self._cm = getattr(cm, name or 'yyy', cm.jet)
if isinstance(domain, tuple):
assert not self.explicit_cm
# Initialize matplotlib colormap and normalizer:
norm = Normalize(domain[0], domain[1])
self.cm = lambda v: to_hex(self._cm(norm(float(v))))
else:
if self.explicit_cm:
self.cm = lambda v: self.explicit_cm[v]
else:
if name == 'seq':
colors = sequential_colors(len(domain))
else:
colors = qualitative_colors(len(domain), set=name)
self.cm = lambda v: dict(zip(domain, colors))[v]
def scalar_mappable(self):
return cm.ScalarMappable(norm=None, cmap=self._cm)
def __call__(self, value):
if value is None:
return self.novalue
return self.cm(value)
| src/cldfviz/colormap.py | 3,386 | Wrap clldutils.color.rgb_as_hex to provide unified error handling.
reorder the domain of the parameter (and prune it to valid values): Initialize matplotlib colormap and normalizer: | 183 | en | 0.296911 |
import torch
class KFold:
def __init__(self, dataset, n_fold=10, batch_size=32, num_workers=0, pin_memory=False):
self.fold = 0
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.dataset = dataset
self.n_fold = n_fold
self.fold_size = len(self.dataset) // self.n_fold
self.folded_size = self.n_fold * self.fold_size
self.fold_idx = self.fold_split()
def fold_split(self, random_seed=None):
"""
Splitting the folds.
Args:
random_seed: Random seed for reproducibility
Returns:
tensor containing indices for folds, where dim=0 is the fold number
"""
if random_seed is not None:
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view(-1, self.fold_size)
return fold_idx
def fold_loaders(self, fold=-1):
"""
Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of
the original data set.
Args:
fold: fold number to return
Returns:
(train data loader, test data loader)
"""
if fold == -1:
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if i != fold]].view(-1)
train_loader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size, # args.batch_size,
num_workers=self.num_workers, # args.loader_num_workers,
pin_memory=self.pin_memory,
sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size, # args.batch_size,
num_workers=self.num_workers, # args.loader_num_workers,
pin_memory=self.pin_memory,
sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = (self.fold + 1) % self.n_fold
return train_loader, test_loader
| pymatch/utils/KFold.py | 2,558 | Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of
the original data set.
Args:
fold: fold number to return
Returns:
(train data loader, test data loader)
Splitting the folds.
Args:
random_seed: Random seed for reproducibility
Returns:
tensor containing indices for folds, where dim=0 is the fold number
args.batch_size, args.loader_num_workers, args.batch_size, args.loader_num_workers, | 509 | en | 0.745565 |
# -*- coding: utf-8 -*- #
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.api import users
from google.appengine.ext import webapp
from util import *
import webapp2
class WhoAmIHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(Util.getUsernameFromEmail(users.get_current_user().email()))
app = webapp2.WSGIApplication(
[
('/whoami', WhoAmIHandler),
], debug=True)
| whoami.py | 970 | -*- coding: utf-8 -*- Copyright 2011 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http:www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 568 | en | 0.867166 |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAJOR = 1
MINOR = 7
PATCH = 0
PRE_RELEASE = 'rc'
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'nemo_toolkit'
__contact_names__ = 'NVIDIA'
__contact_emails__ = 'nemo-toolkit@nvidia.com'
__homepage__ = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/'
__repository_url__ = 'https://github.com/nvidia/nemo'
__download_url__ = 'https://github.com/NVIDIA/NeMo/releases'
__description__ = 'NeMo - a toolkit for Conversational AI'
__license__ = 'Apache2'
__keywords__ = 'deep learning, machine learning, gpu, NLP, NeMo, nvidia, pytorch, torch, tts, speech, language'
| nemo/package_info.py | 1,402 | Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Use the following formatting: (major, minor, patch, pre-release) | 648 | en | 0.886921 |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import TimeoutException
class TestPageLoadTimeout(object):
@pytest.mark.xfail_phantomjs(
reason='PhantomJS does not implement page load timeouts')
def testShouldTimeoutOnPageLoadTakingTooLong(self, driver, pages):
driver.set_page_load_timeout(0.01)
with pytest.raises(TimeoutException):
pages.load("simpleTest.html")
@pytest.mark.xfail_marionette(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1309231')
@pytest.mark.xfail_phantomjs(
reason='PhantomJS does not implement page load timeouts')
def testClickShouldTimeout(self, driver, pages):
pages.load("simpleTest.html")
driver.set_page_load_timeout(0.01)
with pytest.raises(TimeoutException):
driver.find_element_by_id("multilinelink").click()
| py/test/selenium/webdriver/common/page_load_timeout_tests.py | 1,649 | Licensed to the Software Freedom Conservancy (SFC) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The SFC licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 754 | en | 0.875826 |
from bs4 import BeautifulSoup
from django.forms import (
BaseForm,
BaseFormSet,
BoundField,
CheckboxInput,
CheckboxSelectMultiple,
DateInput,
EmailInput,
FileInput,
MultiWidget,
NumberInput,
PasswordInput,
RadioSelect,
Select,
SelectDateWidget,
TextInput,
URLInput,
)
from django.utils.html import conditional_escape, escape, strip_tags
from django.utils.safestring import mark_safe
from .bootstrap import get_bootstrap_setting
from .exceptions import BootstrapError
from .forms import (
FORM_GROUP_CLASS,
is_widget_with_placeholder,
render_field,
render_form,
render_form_group,
render_label,
)
from .text import text_value
from .utils import add_css_class, render_template_file
try:
# If Django is set up without a database, importing this widget gives RuntimeError
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget
except RuntimeError:
ReadOnlyPasswordHashWidget = None
class BaseRenderer(object):
"""A content renderer."""
def __init__(self, *args, **kwargs):
self.layout = kwargs.get("layout", "")
self.form_group_class = kwargs.get("form_group_class", FORM_GROUP_CLASS)
self.field_class = kwargs.get("field_class", "")
self.label_class = kwargs.get("label_class", "")
self.show_help = kwargs.get("show_help", True)
self.show_label = kwargs.get("show_label", True)
self.exclude = kwargs.get("exclude", "")
self.set_placeholder = kwargs.get("set_placeholder", True)
self.size = self.parse_size(kwargs.get("size", ""))
self.horizontal_label_class = kwargs.get(
"horizontal_label_class", get_bootstrap_setting("horizontal_label_class")
)
self.horizontal_field_class = kwargs.get(
"horizontal_field_class", get_bootstrap_setting("horizontal_field_class")
)
def parse_size(self, size):
size = text_value(size).lower().strip()
if size in ("sm", "small"):
return "small"
if size in ("lg", "large"):
return "large"
if size in ("md", "medium", ""):
return "medium"
raise BootstrapError('Invalid value "%s" for parameter "size" (expected "sm", "md", "lg" or "").' % size)
def get_size_class(self, prefix="form-control"):
if self.size == "small":
return prefix + "-sm"
if self.size == "large":
return prefix + "-lg"
return ""
def _render(self):
return ""
def render(self):
return mark_safe(self._render())
class FormsetRenderer(BaseRenderer):
"""Default formset renderer."""
def __init__(self, formset, *args, **kwargs):
if not isinstance(formset, BaseFormSet):
raise BootstrapError('Parameter "formset" should contain a valid Django Formset.')
self.formset = formset
super().__init__(*args, **kwargs)
def render_management_form(self):
return text_value(self.formset.management_form)
def render_form(self, form, **kwargs):
return render_form(form, **kwargs)
def render_forms(self):
rendered_forms = []
for form in self.formset.forms:
rendered_forms.append(
self.render_form(
form,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
)
)
return "\n".join(rendered_forms)
def get_formset_errors(self):
return self.formset.non_form_errors()
def render_errors(self):
formset_errors = self.get_formset_errors()
if formset_errors:
return render_template_file(
"bootstrap4/form_errors.html",
context={"errors": formset_errors, "form": self.formset, "layout": self.layout},
)
return ""
def _render(self):
return "".join([self.render_errors(), self.render_management_form(), self.render_forms()])
class FormRenderer(BaseRenderer):
"""Default form renderer."""
def __init__(self, form, *args, **kwargs):
if not isinstance(form, BaseForm):
raise BootstrapError('Parameter "form" should contain a valid Django Form.')
self.form = form
super().__init__(*args, **kwargs)
self.error_css_class = kwargs.get("error_css_class", None)
self.required_css_class = kwargs.get("required_css_class", None)
self.bound_css_class = kwargs.get("bound_css_class", None)
self.alert_error_type = kwargs.get("alert_error_type", "non_fields")
self.form_check_class = kwargs.get("form_check_class", "form-check")
def render_fields(self):
rendered_fields = []
for field in self.form:
rendered_fields.append(
render_field(
field,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
form_check_class=self.form_check_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
error_css_class=self.error_css_class,
required_css_class=self.required_css_class,
bound_css_class=self.bound_css_class,
)
)
return "\n".join(rendered_fields)
def get_fields_errors(self):
form_errors = []
for field in self.form:
if not field.is_hidden and field.errors:
form_errors += field.errors
return form_errors
def render_errors(self, type="all"):
form_errors = None
if type == "all":
form_errors = self.get_fields_errors() + self.form.non_field_errors()
elif type == "fields":
form_errors = self.get_fields_errors()
elif type == "non_fields":
form_errors = self.form.non_field_errors()
if form_errors:
return render_template_file(
"bootstrap4/form_errors.html",
context={"errors": form_errors, "form": self.form, "layout": self.layout, "type": type},
)
return ""
def _render(self):
return self.render_errors(self.alert_error_type) + self.render_fields()
class FieldRenderer(BaseRenderer):
"""Default field renderer."""
# These widgets will not be wrapped in a form-control class
WIDGETS_NO_FORM_CONTROL = (CheckboxInput, RadioSelect, CheckboxSelectMultiple, FileInput)
def __init__(self, field, *args, **kwargs):
if not isinstance(field, BoundField):
raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
self.field = field
super().__init__(*args, **kwargs)
self.widget = field.field.widget
self.is_multi_widget = isinstance(field.field.widget, MultiWidget)
self.initial_attrs = self.widget.attrs.copy()
self.field_help = text_value(mark_safe(field.help_text)) if self.show_help and field.help_text else ""
self.field_errors = [conditional_escape(text_value(error)) for error in field.errors]
self.form_check_class = kwargs.get("form_check_class", "form-check")
if "placeholder" in kwargs:
# Find the placeholder in kwargs, even if it's empty
self.placeholder = kwargs["placeholder"]
elif get_bootstrap_setting("set_placeholder"):
# If not found, see if we set the label
self.placeholder = field.label
else:
# Or just set it to empty
self.placeholder = ""
if self.placeholder:
self.placeholder = text_value(self.placeholder)
self.addon_before = kwargs.get("addon_before", self.widget.attrs.pop("addon_before", ""))
self.addon_after = kwargs.get("addon_after", self.widget.attrs.pop("addon_after", ""))
self.addon_before_class = kwargs.get(
"addon_before_class", self.widget.attrs.pop("addon_before_class", "input-group-text")
)
self.addon_after_class = kwargs.get(
"addon_after_class", self.widget.attrs.pop("addon_after_class", "input-group-text")
)
# These are set in Django or in the global BOOTSTRAP4 settings, and
# they can be overwritten in the template
error_css_class = kwargs.get("error_css_class", None)
required_css_class = kwargs.get("required_css_class", None)
bound_css_class = kwargs.get("bound_css_class", None)
if error_css_class is not None:
self.error_css_class = error_css_class
else:
self.error_css_class = getattr(field.form, "error_css_class", get_bootstrap_setting("error_css_class"))
if required_css_class is not None:
self.required_css_class = required_css_class
else:
self.required_css_class = getattr(
field.form, "required_css_class", get_bootstrap_setting("required_css_class")
)
if bound_css_class is not None:
self.success_css_class = bound_css_class
else:
self.success_css_class = getattr(field.form, "bound_css_class", get_bootstrap_setting("success_css_class"))
# If the form is marked as form.empty_permitted, do not set required class
if self.field.form.empty_permitted:
self.required_css_class = ""
def restore_widget_attrs(self):
self.widget.attrs = self.initial_attrs.copy()
def add_class_attrs(self, widget=None):
if widget is None:
widget = self.widget
classes = widget.attrs.get("class", "")
if ReadOnlyPasswordHashWidget is not None and isinstance(widget, ReadOnlyPasswordHashWidget):
# Render this is a static control
classes = add_css_class(classes, "form-control-static", prepend=True)
elif not isinstance(widget, self.WIDGETS_NO_FORM_CONTROL):
classes = add_css_class(classes, "form-control", prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
elif isinstance(widget, CheckboxInput):
classes = add_css_class(classes, "form-check-input", prepend=True)
elif isinstance(widget, FileInput):
classes = add_css_class(classes, "form-control-file", prepend=True)
if self.field.errors:
if self.error_css_class:
classes = add_css_class(classes, self.error_css_class)
else:
if self.field.form.is_bound:
classes = add_css_class(classes, self.success_css_class)
widget.attrs["class"] = classes
def add_placeholder_attrs(self, widget=None):
if widget is None:
widget = self.widget
placeholder = widget.attrs.get("placeholder", self.placeholder)
if placeholder and self.set_placeholder and is_widget_with_placeholder(widget):
# TODO: Should this be stripped and/or escaped?
widget.attrs["placeholder"] = placeholder
def add_help_attrs(self, widget=None):
if widget is None:
widget = self.widget
if not isinstance(widget, CheckboxInput):
widget.attrs["title"] = widget.attrs.get("title", escape(strip_tags(self.field_help)))
def add_widget_attrs(self):
if self.is_multi_widget:
widgets = self.widget.widgets
else:
widgets = [self.widget]
for widget in widgets:
self.add_class_attrs(widget)
self.add_placeholder_attrs(widget)
self.add_help_attrs(widget)
def list_to_class(self, html, klass):
classes = add_css_class(klass, self.get_size_class())
mapping = [
("<ul", '<div class="{classes}"'.format(classes=classes)),
("</ul>", "</div>"),
("<li", '<div class="{form_check_class}"'.format(form_check_class=self.form_check_class)),
("</li>", "</div>"),
]
for k, v in mapping:
html = html.replace(k, v)
# Apply bootstrap4 classes to labels and inputs.
# A simple 'replace' isn't enough as we don't want to have several 'class' attr definition, which would happen
# if we tried to 'html.replace("input", "input class=...")'
soup = BeautifulSoup(html, features="html.parser")
enclosing_div = soup.find("div", {"class": classes})
if enclosing_div:
for label in enclosing_div.find_all("label"):
label.attrs["class"] = label.attrs.get("class", []) + ["form-check-label"]
try:
label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
except AttributeError:
pass
return str(soup)
def add_checkbox_label(self, html):
return html + render_label(
content=self.field.label,
label_for=self.field.id_for_label,
label_title=escape(strip_tags(self.field_help)),
label_class="form-check-label",
)
def fix_date_select_input(self, html):
div1 = '<div class="col-4">'
div2 = "</div>"
html = html.replace("<select", div1 + "<select")
html = html.replace("</select>", "</select>" + div2)
return '<div class="row bootstrap4-multi-input">{html}</div>'.format(html=html)
def fix_file_input_label(self, html):
if self.layout != "horizontal":
html = "<br>" + html
return html
def post_widget_render(self, html):
if isinstance(self.widget, RadioSelect):
html = self.list_to_class(html, "radio radio-success")
elif isinstance(self.widget, CheckboxSelectMultiple):
html = self.list_to_class(html, "checkbox")
elif isinstance(self.widget, SelectDateWidget):
html = self.fix_date_select_input(html)
elif isinstance(self.widget, CheckboxInput):
html = self.add_checkbox_label(html)
elif isinstance(self.widget, FileInput):
html = self.fix_file_input_label(html)
return html
def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
# Wrap checkboxes
# Note checkboxes do not get size classes, see #318
html = '<div class="form-check">{html}</div>'.format(html=html)
return html
def make_input_group_addon(self, inner_class, outer_class, content):
if not content:
return ""
if inner_class:
content = '<span class="{inner_class}">{content}</span>'.format(inner_class=inner_class, content=content)
return '<div class="{outer_class}">{content}</div>'.format(outer_class=outer_class, content=content)
@property
def is_input_group(self):
allowed_widget_types = (TextInput, PasswordInput, DateInput, NumberInput, Select, EmailInput, URLInput)
return (self.addon_before or self.addon_after) and isinstance(self.widget, allowed_widget_types)
def make_input_group(self, html):
if self.is_input_group:
before = self.make_input_group_addon(self.addon_before_class, "input-group-prepend", self.addon_before)
after = self.make_input_group_addon(self.addon_after_class, "input-group-append", self.addon_after)
html = self.append_errors("{before}{html}{after}".format(before=before, html=html, after=after))
html = '<div class="input-group">{html}</div>'.format(html=html)
return html
def append_help(self, html):
field_help = self.field_help or None
if field_help:
help_html = render_template_file(
"bootstrap4/field_help_text.html",
context={
"field": self.field,
"field_help": field_help,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += help_html
return html
def append_errors(self, html):
field_errors = self.field_errors
if field_errors:
errors_html = render_template_file(
"bootstrap4/field_errors.html",
context={
"field": self.field,
"field_errors": field_errors,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += errors_html
return html
def append_to_field(self, html):
if isinstance(self.widget, CheckboxInput):
# we have already appended errors and help to checkboxes
# in append_to_checkbox_field
return html
if not self.is_input_group:
# we already appended errors for input groups in make_input_group
html = self.append_errors(html)
return self.append_help(html)
def append_to_checkbox_field(self, html):
if not isinstance(self.widget, CheckboxInput):
# we will append errors and help to normal fields later in append_to_field
return html
html = self.append_errors(html)
return self.append_help(html)
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == "horizontal":
field_class = self.horizontal_field_class
return field_class
def wrap_field(self, html):
field_class = self.get_field_class()
if field_class:
html = '<div class="{field_class}">{html}</div>'.format(field_class=field_class, html=html)
return html
def get_label_class(self):
label_class = self.label_class
if not label_class and self.layout == "horizontal":
label_class = self.horizontal_label_class
label_class = add_css_class(label_class, "col-form-label")
label_class = text_value(label_class)
if not self.show_label or self.show_label == "sr-only":
label_class = add_css_class(label_class, "sr-only")
return label_class
def get_label(self):
if self.show_label == "skip":
return None
elif isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
if self.layout == "horizontal" and not label:
return mark_safe(" ")
return label
def add_label(self, html):
label = self.get_label()
if label:
html = render_label(label, label_for=self.field.id_for_label, label_class=self.get_label_class()) + html
return html
def get_form_group_class(self):
form_group_class = self.form_group_class
if self.field.errors:
if self.error_css_class:
form_group_class = add_css_class(form_group_class, self.error_css_class)
else:
if self.field.form.is_bound:
form_group_class = add_css_class(form_group_class, self.success_css_class)
if self.field.field.required and self.required_css_class:
form_group_class = add_css_class(form_group_class, self.required_css_class)
if self.layout == "horizontal":
form_group_class = add_css_class(form_group_class, "row")
return form_group_class
def wrap_label_and_field(self, html):
return render_form_group(html, self.get_form_group_class())
def _render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(" ", "").split(","):
return ""
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
# Start post render
html = self.post_widget_render(html)
html = self.append_to_checkbox_field(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
html = self.add_label(html)
html = self.wrap_label_and_field(html)
return html
class InlineFieldRenderer(FieldRenderer):
"""Inline field renderer."""
def add_error_attrs(self):
field_title = self.widget.attrs.get("title", "")
field_title += " " + " ".join([strip_tags(e) for e in self.field_errors])
self.widget.attrs["title"] = field_title.strip()
def add_widget_attrs(self):
super().add_widget_attrs()
self.add_error_attrs()
def append_to_field(self, html):
return html
def get_field_class(self):
return self.field_class
def get_label_class(self):
return add_css_class(self.label_class, "sr-only")
| env/lib/python3.8/site-packages/bootstrap4/renderers.py | 21,882 | A content renderer.
Default field renderer.
Default form renderer.
Default formset renderer.
Inline field renderer.
If Django is set up without a database, importing this widget gives RuntimeError These widgets will not be wrapped in a form-control class Find the placeholder in kwargs, even if it's empty If not found, see if we set the label Or just set it to empty These are set in Django or in the global BOOTSTRAP4 settings, and they can be overwritten in the template If the form is marked as form.empty_permitted, do not set required class Render this is a static control For these widget types, add the size class here TODO: Should this be stripped and/or escaped? Apply bootstrap4 classes to labels and inputs. A simple 'replace' isn't enough as we don't want to have several 'class' attr definition, which would happen if we tried to 'html.replace("input", "input class=...")' Wrap checkboxes Note checkboxes do not get size classes, see 318 we have already appended errors and help to checkboxes in append_to_checkbox_field we already appended errors for input groups in make_input_group we will append errors and help to normal fields later in append_to_field See if we're not excluded Hidden input requires no special treatment Render the widget Start post render | 1,278 | en | 0.788749 |
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
from tqdm import tqdm
class RBM(object):
def __init__(self,num_visible,num_hidden,visible_unit_type='bin',main_dir='/Users/chamalgomes/Documents/Python/GitLab/DeepLearning/KAI PROJECT/rbm/models',
model_name='rbm_model',gibbs_sampling_steps=1,learning_rate=0.01,momentum=0.9,l2=0.001,batch_size=10,
num_epochs=10,stddev=0.1,verbose=0,plot_training_loss=True):
""""
INPUT PARAMETER 1) num_visible: number of visible units in the RBM
INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM
INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories
INPUT PARAMETER 4) model_name: name of the model you wanna save the data
INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional)
INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional)
INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent
INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional)
INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional)
INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional)
INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss'
INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization)
INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True
INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary)
"""
#Defining main paramters
self.num_visible = num_visible #1
self.num_hidden = num_hidden #2
self.main_dir = main_dir #3
self.model_name = model_name #4
self.gibbs_sampling_steps = gibbs_sampling_steps #5
self.learning_rate = learning_rate #6
self.momentum = momentum #7
self.l2 = l2 #8
self.batch_size = batch_size #9
self.num_epochs = num_epochs #10
self.stddev = stddev #11
self.verbose = verbose #12
self.plot_training_loss = plot_training_loss #13
self.visible_unit_type = visible_unit_type #14
self._create_model_directory()
self.model_path = os.path.join(self.main_dir, self.model_name)
self.W = None
self.bh_ = None
self.bv_ = None
self.dw = None
self.dbh_ = None
self.dbv_ = None
self.w_upd8 = None
self.bh_upd8 = None
self.bv_upd8 = None
self.encode = None
self.recontruct = None
self.loss_function = None
self.batch_cost = None
self.batch_free_energy = None
self.training_losses = []
self.input_data = None#_build_model
self.hrand = None # _build_model
self.validation_size = None #fit
self.tf_session = None #fit
self.tf_saver = None #_initialize_tf_utilities_and_ops
def sample_prob(self,probs,rand):
""" takes a tensor of probabilitiesas from a sigmoidal activation and sample from all
the distributions.
probs INPUT parameter: tensor of probabilities
rand INPUT parameter :tensor (of same shape as probabilities) of random values
:RETURN binary sample of probabilities
"""
return tf.nn.relu(tf.sign(probs-rand))
def gen_batches(self,data,batch_size):
""" Divide input data into batches
data INPUT parameter: input data( like a data frame)
batch_size INPUT parameter: desired size of each batch
:RETURN data divided in batches
"""
data = np.array(data)
for i in range(0,data.shape[0],batch_size):
yield data[i:i+batch_size]
def fit(self,train_set,validation_set = None,restore_previous_model=False):
""""
fit the model to the training data
INPUT PARAMETER train_set: training set
INPUT PARAMETER validation set.default None (Hence Optional)
INPUT PARAMETER restore_previous_model:
if true, a previous trained model
with the same name of this model is restored from disk to continue training.
OUTPUT: self
"""
if validation_set is not None:
self.validation_size = validation_set.shape[0]
tf.reset_default_graph()
self._build_model()# you will come across it later on
with tf.Session() as self.tf_session:
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, validation_set)
self.tf_saver.save(self.tf_session, self.model_path)
if self.plot_training_loss:
#plot editing should be done here as you wish
plt.plot(self.training_losses)
plt.title("Training batch losses v.s. iteractions")
plt.xlabel("Num of training iteractions")
plt.ylabel("Reconstruction error")
plt.show()
def _initialize_tf_utilities_and_ops(self, restore_previous_model):
""""
Initialize TensorFlow operations: summaries, init operations, saver, summary_writer.
Restore a previously trained model if the flag restore_previous_model is true.
"""
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
self.tf_session.run(init_op)
if restore_previous_model:
self.tf_saver.restore(self.tf_session, self.model_path)
def _train_model(self, train_set, validation_set):
"""" Train the Model
INPUT PARAMETER train set: Training set
INPUT PARAMETER validation_set: Validation set
OUTPUT self
"""
for i in range(self.num_epochs):
self._run_train_step(train_set)
if validation_set is not None:
self._run_validation_error(i, validation_set)
def _run_train_step(self,train_set):
""""
Run a training step. A training step is made by randomly shuffling the training set,
divide into batches and run the variable update nodes for each batch. If self.plot_training_loss
is true, will record training loss after each batch.
INPUT PARAMETER train_set: training set
OUTPUT self
"""
np.random.shuffle(train_set)
batches = [_ for _ in self.gen_batches(train_set, self.batch_size)]
updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]
for batch in batches:
if self.plot_training_loss:
_,loss = self.tf_session.run([updates,self.loss_function],feed_dict = self._create_feed_dict(batch))
self.training_losses.append(loss)
else:
self.tf_session.run(updates, feed_dict=self._create_feed_dict(batch))
def _run_validation_error(self, epoch, validation_set):
"""
Run the error computation on the validation set and print it out for each epoch.
INPUT PARAMETER: current epoch
INPUT PARAMETER validation_set: validation data
OUTPUT: self
"""
loss = self.tf_session.run(self.loss_function,
feed_dict=self._create_feed_dict(validation_set))
if self.verbose == 1:
tqdm.write("Validation cost at step %s: %s" % (epoch, loss))
def _create_feed_dict(self, data):
""" Create the dictionary of data to feed to TensorFlow's session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform)
"""
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
}
def _build_model(self):
"""
BUilding the Restriced Boltzman Machine in Tensorflow
"""
self.input_data, self.hrand = self._create_placeholders() #check the function below
self.W, self.bh_, self.bv_, self.dw, self.dbh_, self.dbv_ = self._create_variables()#check the function below
hprobs0, hstates0, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(self.input_data)
positive = self.compute_positive_association(self.input_data, hprobs0, hstates0)
nn_input = vprobs
for step in range(self.gibbs_sampling_steps - 1):
hprobs, hstates, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(nn_input)
nn_input = vprobs
self.reconstruct = vprobs
negative = tf.matmul(tf.transpose(vprobs), hprobs1)
self.encode = hprobs1
#exact formula in my paper
dw = positive - negative
self.dw = self.momentum*self.dw + (1-self.momentum)*dw
self.w_upd8 = self.W.assign_add(self.learning_rate*self.dw - self.learning_rate*self.l2*self.W)
dbh_ = tf.reduce_mean(hprobs0 - hprobs1, 0)
self.dbh_ = self.momentum*self.dbh_ + self.learning_rate*dbh_
self.bh_upd8 = self.bh_.assign_add(self.dbh_)
dbv_ = tf.reduce_mean(self.input_data - vprobs, 0)
self.dbv_ = self.momentum*self.dbv_ + self.learning_rate*dbv_
self.bv_upd8 = self.bv_.assign_add(self.dbv_)
self.loss_function = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs)))
self.batch_cost = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs), 1))
self._create_free_energy_for_batch()
def _create_free_energy_for_batch(self):
""" Create free energy ops to batch input data
:return: self
"""
if self.visible_unit_type == 'bin':
self._create_free_energy_for_bin()
elif self.visible_unit_type == 'gauss':
self._create_free_energy_for_gauss()
else:
self.batch_free_energy = None
def _create_free_energy_for_bin(self):
""" Create free energy for mdoel with Bin visible layer
:return: self
"""
#Refer to the Binary Free Energy Equation
self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1]))
def _create_free_energy_for_gauss(self):
""" Create free energy for model with Gauss visible layer
:return: self
"""
#Refer to the Gaussian Free Energy Equation
self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) - tf.reshape(tf.reduce_sum(0.5 * self.input_data * self.input_data, 1), [-1, 1]) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1]))
def _create_placeholders(self):
""" Create the TensorFlow placeholders for the model.
:return: tuple(input(shape(None, num_visible)),
hrand(shape(None, num_hidden)))
"""
x = tf.placeholder('float', [None, self.num_visible], name='x-input')
hrand = tf.placeholder('float', [None, self.num_hidden], name='hrand')
return x, hrand
def _create_variables(self):
""" Create the TensorFlow variables for the model.
:return: tuple(weights(shape(num_visible, num_hidden),
hidden bias(shape(num_hidden)),
visible bias(shape(num_visible)))
"""
W = tf.Variable(tf.random_normal((self.num_visible, self.num_hidden), mean=0.0, stddev=0.01), name='weights')
dw = tf.Variable(tf.zeros([self.num_visible, self.num_hidden]), name = 'derivative-weights')
bh_ = tf.Variable(tf.zeros([self.num_hidden]), name='hidden-bias')
dbh_ = tf.Variable(tf.zeros([self.num_hidden]), name='derivative-hidden-bias')
bv_ = tf.Variable(tf.zeros([self.num_visible]), name='visible-bias')
dbv_ = tf.Variable(tf.zeros([self.num_visible]), name='derivative-visible-bias')
return W, bh_, bv_, dw, dbh_, dbv_
def gibbs_sampling_step(self, visible):
""" Performs one step of gibbs sampling.
:param visible: activations of the visible units
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states)
"""
hprobs, hstates = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs)
hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)
return hprobs, hstates, vprobs, hprobs1, hstates1
def sample_hidden_from_visible(self, visible):
""" Sample the hidden units from the visible units.
This is the Positive phase of the Contrastive Divergence algorithm.
:param visible: activations of the visible units
:return: tuple(hidden probabilities, hidden binary states)
"""
hprobs = tf.nn.sigmoid(tf.matmul(visible, self.W) + self.bh_)
hstates = self.sample_prob(hprobs, self.hrand)
return hprobs, hstates
def sample_visible_from_hidden(self, hidden):
""" Sample the visible units from the hidden units.
This is the Negative phase of the Contrastive Divergence algorithm.
:param hidden: activations of the hidden units
:return: visible probabilities
"""
visible_activation = tf.matmul(hidden, tf.transpose(self.W)) + self.bv_
if self.visible_unit_type == 'bin':
vprobs = tf.nn.sigmoid(visible_activation)
elif self.visible_unit_type == 'gauss':
vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev)
else:
vprobs = None
return vprobs
def compute_positive_association(self, visible, hidden_probs, hidden_states):
""" Compute positive associations between visible and hidden units.
:param visible: visible units
:param hidden_probs: hidden units probabilities
:param hidden_states: hidden units states
:return: positive association = dot(visible.T, hidden)
"""
if self.visible_unit_type == 'bin':
positive = tf.matmul(tf.transpose(visible), hidden_states)
elif self.visible_unit_type == 'gauss':
positive = tf.matmul(tf.transpose(visible), hidden_probs)
else:
positive = None
return positive
def _create_model_directory(self):
""" Create the directory for storing the model
:return: self
"""
if not os.path.isdir(self.main_dir):
print("Created dir: ", self.main_dir)
os.mkdir(self.main_dir)
def getRecontructError(self, data):
""" return Reconstruction Error (loss) from data in batch.
:param data: input data of shape num_samples x visible_size
:return: Reconstruction cost for each sample in the batch
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_loss = self.tf_session.run(self.batch_cost,
feed_dict=self._create_feed_dict(data))
return batch_loss
def getFreeEnergy(self, data):
""" return Free Energy from data.
:param data: input data of shape num_samples x visible_size
:return: Free Energy for each sample: p(x)
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_FE = self.tf_session.run(self.batch_free_energy,
feed_dict=self._create_feed_dict(data))
return batch_FE
def getRecontruction(self, data):
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
batch_reconstruct = self.tf_session.run(self.recontruct,
feed_dict=self._create_feed_dict(data))
return batch_reconstruct
def load_model(self, shape, gibbs_sampling_steps, model_path):
""" Load a trained model from disk. The shape of the model
(num_visible, num_hidden) and the number of gibbs sampling steps
must be known in order to restore the model.
:param shape: tuple(num_visible, num_hidden)
:param gibbs_sampling_steps:
:param model_path:
:return: self
"""
self.num_visible, self.num_hidden = shape[0], shape[1]
self.gibbs_sampling_steps = gibbs_sampling_steps
tf.reset_default_graph()
self._build_model()
init_op = tf.global_variables_initializer()
self.tf_saver = tf.train.Saver()
with tf.Session() as self.tf_session:
self.tf_session.run(init_op)
self.tf_saver.restore(self.tf_session, model_path)
def get_model_parameters(self):
""" Return the model parameters in the form of numpy arrays.
:return: model parameters
"""
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {
'W': self.W.eval(),
'bh_': self.bh_.eval(),
'bv_': self.bv_.eval()
}
#The MIT License (MIT)
#Copyright (c) 2016 Gabriele Angeletti
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#© 2019 GitHub, Inc.
| Unsupervised-Learning/rbm.py | 19,123 | "
INPUT PARAMETER 1) num_visible: number of visible units in the RBM
INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM
INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories
INPUT PARAMETER 4) model_name: name of the model you wanna save the data
INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional)
INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional)
INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent
INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional)
INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional)
INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional)
INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss'
INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization)
INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True
INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary)
BUilding the Restriced Boltzman Machine in Tensorflow
Create the dictionary of data to feed to TensorFlow's session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform)
Create free energy ops to batch input data
:return: self
Create free energy for mdoel with Bin visible layer
:return: self
Create free energy for model with Gauss visible layer
:return: self
Create the directory for storing the model
:return: self
Create the TensorFlow placeholders for the model.
:return: tuple(input(shape(None, num_visible)),
hrand(shape(None, num_hidden)))
Create the TensorFlow variables for the model.
:return: tuple(weights(shape(num_visible, num_hidden),
hidden bias(shape(num_hidden)),
visible bias(shape(num_visible)))
"
Initialize TensorFlow operations: summaries, init operations, saver, summary_writer.
Restore a previously trained model if the flag restore_previous_model is true.
"
Run a training step. A training step is made by randomly shuffling the training set,
divide into batches and run the variable update nodes for each batch. If self.plot_training_loss
is true, will record training loss after each batch.
INPUT PARAMETER train_set: training set
OUTPUT self
Run the error computation on the validation set and print it out for each epoch.
INPUT PARAMETER: current epoch
INPUT PARAMETER validation_set: validation data
OUTPUT: self
" Train the Model
INPUT PARAMETER train set: Training set
INPUT PARAMETER validation_set: Validation set
OUTPUT self
Compute positive associations between visible and hidden units.
:param visible: visible units
:param hidden_probs: hidden units probabilities
:param hidden_states: hidden units states
:return: positive association = dot(visible.T, hidden)
"
fit the model to the training data
INPUT PARAMETER train_set: training set
INPUT PARAMETER validation set.default None (Hence Optional)
INPUT PARAMETER restore_previous_model:
if true, a previous trained model
with the same name of this model is restored from disk to continue training.
OUTPUT: self
Divide input data into batches
data INPUT parameter: input data( like a data frame)
batch_size INPUT parameter: desired size of each batch
:RETURN data divided in batches
return Free Energy from data.
:param data: input data of shape num_samples x visible_size
:return: Free Energy for each sample: p(x)
return Reconstruction Error (loss) from data in batch.
:param data: input data of shape num_samples x visible_size
:return: Reconstruction cost for each sample in the batch
Return the model parameters in the form of numpy arrays.
:return: model parameters
Performs one step of gibbs sampling.
:param visible: activations of the visible units
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states)
Load a trained model from disk. The shape of the model
(num_visible, num_hidden) and the number of gibbs sampling steps
must be known in order to restore the model.
:param shape: tuple(num_visible, num_hidden)
:param gibbs_sampling_steps:
:param model_path:
:return: self
Sample the hidden units from the visible units.
This is the Positive phase of the Contrastive Divergence algorithm.
:param visible: activations of the visible units
:return: tuple(hidden probabilities, hidden binary states)
takes a tensor of probabilitiesas from a sigmoidal activation and sample from all
the distributions.
probs INPUT parameter: tensor of probabilities
rand INPUT parameter :tensor (of same shape as probabilities) of random values
:RETURN binary sample of probabilities
Sample the visible units from the hidden units.
This is the Negative phase of the Contrastive Divergence algorithm.
:param hidden: activations of the hidden units
:return: visible probabilities
Defining main paramters12345 6 7 8 9 1011121314_build_model _build_model fitfit _initialize_tf_utilities_and_ops you will come across it later on plot editing should be done here as you wish check the function belowcheck the function belowexact formula in my paper Refer to the Binary Free Energy Equation Refer to the Gaussian Free Energy EquationThe MIT License (MIT)Copyright (c) 2016 Gabriele AngelettiPermission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THESOFTWARE.© 2019 GitHub, Inc. | 6,397 | en | 0.584818 |
from data_processing_calibration import DataProcessingCalibration
if __name__ == "__main__":
# Start processing
dp_ST = DataProcessingCalibration()
print("Initialize is successful.")
# Open .csv file with data
data_from_sensor = dp_ST.openFile('C://static_test.csv')
print("Data was got.")
# Filter and processing, and convert data in Euler angles
data_orientation_ST = dp_ST.processFile(data_from_sensor)
print("Data was converted.")
# Use method of Allan Variation for data
tau_roll, ad_roll, tau_pitch, ad_pitch, tau_yaw, ad_yaw = dp_ST.deviationAllan(data_orientation_ST, rate=31)
print("Using method of Allan Variation was successful.")
# Create plots
dp_ST.plotDataFromFile(data_orientation_ST, tau_roll, ad_roll, tau_pitch, ad_pitch, tau_yaw, ad_yaw)
print("Plots creating was successful.")
| static_test/main.py | 887 | Start processing Open .csv file with data Filter and processing, and convert data in Euler angles Use method of Allan Variation for data Create plots | 149 | en | 0.743079 |
"""
Support for Smappee energy monitor.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/smappee/
"""
import logging
from datetime import datetime, timedelta
import re
import voluptuous as vol
from requests.exceptions import RequestException
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_HOST
)
from homeassistant.util import Throttle
from homeassistant.helpers.discovery import load_platform
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['smappy==0.2.16']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Smappee'
DEFAULT_HOST_PASSWORD = 'admin'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_HOST_PASSWORD = 'host_password'
DOMAIN = 'smappee'
DATA_SMAPPEE = 'SMAPPEE'
_SENSOR_REGEX = re.compile(
r'(?P<key>([A-Za-z]+))\=' +
r'(?P<value>([0-9\.]+))')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Inclusive(CONF_CLIENT_ID, 'Server credentials'): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, 'Server credentials'): cv.string,
vol.Inclusive(CONF_USERNAME, 'Server credentials'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'Server credentials'): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_HOST_PASSWORD, default=DEFAULT_HOST_PASSWORD):
cv.string
}),
}, extra=vol.ALLOW_EXTRA)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the Smapee component."""
client_id = config.get(DOMAIN).get(CONF_CLIENT_ID)
client_secret = config.get(DOMAIN).get(CONF_CLIENT_SECRET)
username = config.get(DOMAIN).get(CONF_USERNAME)
password = config.get(DOMAIN).get(CONF_PASSWORD)
host = config.get(DOMAIN).get(CONF_HOST)
host_password = config.get(DOMAIN).get(CONF_HOST_PASSWORD)
smappee = Smappee(client_id, client_secret, username,
password, host, host_password)
if not smappee.is_local_active and not smappee.is_remote_active:
_LOGGER.error("Neither Smappee server or local component enabled.")
return False
hass.data[DATA_SMAPPEE] = smappee
load_platform(hass, 'switch', DOMAIN)
load_platform(hass, 'sensor', DOMAIN)
return True
class Smappee:
"""Stores data retrieved from Smappee sensor."""
def __init__(self, client_id, client_secret, username,
password, host, host_password):
"""Initialize the data."""
import smappy
self._remote_active = False
self._local_active = False
if client_id is not None:
try:
self._smappy = smappy.Smappee(client_id, client_secret)
self._smappy.authenticate(username, password)
self._remote_active = True
except RequestException as error:
self._smappy = None
_LOGGER.exception(
"Smappee server authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee server component init skipped.")
if host is not None:
try:
self._localsmappy = smappy.LocalSmappee(host)
self._localsmappy.logon(host_password)
self._local_active = True
except RequestException as error:
self._localsmappy = None
_LOGGER.exception(
"Local Smappee device authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee local component init skipped.")
self.locations = {}
self.info = {}
self.consumption = {}
self.sensor_consumption = {}
self.instantaneous = {}
if self._remote_active or self._local_active:
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update data from Smappee API."""
if self.is_remote_active:
service_locations = self._smappy.get_service_locations() \
.get('serviceLocations')
for location in service_locations:
location_id = location.get('serviceLocationId')
if location_id is not None:
self.sensor_consumption[location_id] = {}
self.locations[location_id] = location.get('name')
self.info[location_id] = self._smappy \
.get_service_location_info(location_id)
_LOGGER.debug("Remote info %s %s",
self.locations, self.info[location_id])
for sensors in self.info[location_id].get('sensors'):
sensor_id = sensors.get('id')
self.sensor_consumption[location_id]\
.update({sensor_id: self.get_sensor_consumption(
location_id, sensor_id,
aggregation=3, delta=1440)})
_LOGGER.debug("Remote sensors %s %s",
self.locations,
self.sensor_consumption[location_id])
self.consumption[location_id] = self.get_consumption(
location_id, aggregation=3, delta=1440)
_LOGGER.debug("Remote consumption %s %s",
self.locations,
self.consumption[location_id])
if self.is_local_active:
self.local_devices = self.get_switches()
_LOGGER.debug("Local switches %s", self.local_devices)
self.instantaneous = self.load_instantaneous()
_LOGGER.debug("Local values %s", self.instantaneous)
@property
def is_remote_active(self):
"""Return true if Smappe server is configured and working."""
return self._remote_active
@property
def is_local_active(self):
"""Return true if Smappe local device is configured and working."""
return self._local_active
def get_switches(self):
"""Get switches from local Smappee."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_command_control_config()
except RequestException as error:
_LOGGER.error(
"Error getting switches from local Smappee. (%s)",
error)
def get_consumption(self, location_id, aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_consumption(location_id,
start,
end,
aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def get_sensor_consumption(self, location_id, sensor_id,
aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_sensor_consumption(location_id,
sensor_id,
start,
end, aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def actuator_on(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn on actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_on(location_id, actuator_id, duration)
self._smappy.actuator_on(location_id, actuator_id, duration)
else:
self._localsmappy.on_command_control(actuator_id)
self._localsmappy.on_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def actuator_off(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn off actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_off(location_id, actuator_id, duration)
self._smappy.actuator_off(location_id, actuator_id, duration)
else:
self._localsmappy.off_command_control(actuator_id)
self._localsmappy.off_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def active_power(self):
"""Get sum of all instantaneous active power values from local hub."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_power()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def active_cosfi(self):
"""Get the average of all instantaneous cosfi values."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_cosfi()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def instantaneous_values(self):
"""ReportInstantaneousValues."""
if not self.is_local_active:
return
report_instantaneous_values = \
self._localsmappy.report_instantaneous_values()
report_result = \
report_instantaneous_values['report'].split('<BR>')
properties = {}
for lines in report_result:
lines_result = lines.split(',')
for prop in lines_result:
match = _SENSOR_REGEX.search(prop)
if match:
properties[match.group('key')] = \
match.group('value')
_LOGGER.debug(properties)
return properties
def active_current(self):
"""Get current active Amps."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['current'])
def active_voltage(self):
"""Get current active Voltage."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['voltage'])
def load_instantaneous(self):
"""LoadInstantaneous."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_instantaneous()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
| homeassistant/components/smappee.py | 12,733 | Stores data retrieved from Smappee sensor.
Initialize the data.
Get the average of all instantaneous cosfi values.
Get current active Amps.
Get sum of all instantaneous active power values from local hub.
Get current active Voltage.
Turn off actuator.
Turn on actuator.
Update data from Smappee.
Update data from Smappee.
Get switches from local Smappee.
ReportInstantaneousValues.
Return true if Smappe local device is configured and working.
Return true if Smappe server is configured and working.
LoadInstantaneous.
Set up the Smapee component.
Update data from Smappee API.
Support for Smappee energy monitor.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/smappee/
Start & End accept epoch (in milliseconds), datetime and pandas timestamps Aggregation: 1 = 5 min values (only available for the last 14 days), 2 = hourly values, 3 = daily values, 4 = monthly values, 5 = quarterly values Start & End accept epoch (in milliseconds), datetime and pandas timestamps Aggregation: 1 = 5 min values (only available for the last 14 days), 2 = hourly values, 3 = daily values, 4 = monthly values, 5 = quarterly values Duration = 300,900,1800,3600 or any other value for an undetermined period of time. The comport plugs have a tendency to ignore the on/off signal. And because you can't read the status of a plug, it's more reliable to execute the command twice. Duration = 300,900,1800,3600 or any other value for an undetermined period of time. The comport plugs have a tendency to ignore the on/off signal. And because you can't read the status of a plug, it's more reliable to execute the command twice. | 1,678 | en | 0.817099 |
import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
self.flag_ae_loss = True if args.loss.lower().find('ae') >= 0 else False
if self.args.precision == 'amp':
self.scaler = torch.cuda.amp.GradScaler()
if self.args.load != '':
# To avoid "UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`."
# The 0 gradient value will not update any parameter of the model to train.
self.optimizer.zero_grad()
self.optimizer.step()
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
# TEMP
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, _,) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
if self.flag_ae_loss:
hr, hr_ae = hr[:,:self.args.n_colors, ...], hr[:,self.args.n_colors:,...]
else:
hr_ae = None
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
if self.args.precision == 'amp':
with torch.cuda.amp.autocast():
sr = self.model(lr, 0)
if self.flag_ae_loss:
sr_ae = self._forward_auto_encoder(hr_ae, 0)
else:
sr_ae = None
loss = self.loss(sr, hr, sr_ae, hr_ae)
self.scaler.scale(loss).backward()
else:
sr = self.model(lr, 0)
if self.flag_ae_loss:
sr_ae = self._forward_auto_encoder(hr_ae, 0)
else:
sr_ae = None
loss = self.loss(sr, hr, sr_ae, hr_ae)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
if self.args.precision == 'amp':
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for lr, hr, filename in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch > self.args.epochs
# return epoch >= self.args.epochs
def _forward_auto_encoder(self, x, idx_scale):
self.model.set_forward_ae_loss(True)
x = self.model(x, idx_scale)
self.model.set_forward_ae_loss(False)
return x | src/trainer.py | 6,548 | To avoid "UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`." The 0 gradient value will not update any parameter of the model to train. TEMP return epoch >= self.args.epochs | 201 | en | 0.389717 |
#
# PySNMP MIB module NOKIA-HWM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NOKIA-HWM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:23:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
ntcHWMibs, ntcHWReqs, ntcCommonModules = mibBuilder.importSymbols("NOKIA-COMMON-MIB-OID-REGISTRATION-MIB", "ntcHWMibs", "ntcHWReqs", "ntcCommonModules")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, IpAddress, TimeTicks, ModuleIdentity, MibIdentifier, Unsigned32, Counter32, NotificationType, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "IpAddress", "TimeTicks", "ModuleIdentity", "MibIdentifier", "Unsigned32", "Counter32", "NotificationType", "iso", "Bits")
AutonomousType, TextualConvention, TimeStamp, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "AutonomousType", "TextualConvention", "TimeStamp", "DisplayString")
ntcHWModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 94, 1, 16, 5, 1))
ntcHWModule.setRevisions(('1998-08-24 00:00', '1998-09-03 00:00', '1998-09-24 00:00', '1998-10-04 00:00', '1999-01-08 00:00', '1999-08-05 00:00', '1999-10-25 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ntcHWModule.setRevisionsDescriptions(('Rev 0.1 August 24, 1998 Initial version - ready for review', 'Rev 0.2 September 3, 1998 Initial review by Tero Soukko whose comments have been incorporated.', 'Rev 0.3 September 24, 1998 ready for initial review.', 'Rev 0.4 Updated anchors to use values registered by Mika Kiikkila.', 'Rev 1.0 Syntax of ntcHWLastChangedTime changed from DateAndTime to TimeStamp. Traps commented out because they are part of Nokia Common Alarm MIB.', 'Rev 1.01 Those IMPORTS which are not used are removed. Groups ntcHWSlots and ntcHWEventGroup which are not defined in this module are removed. The name NokiaHwmSlotEntry is changed to NtcHWSlotEntry on account of convenience. All notification definions before out-commented removed. Some esthetic modifications made.', "Comment 'The NMS is not allowed to set the value of ntcHWAdminstate to missing.' added to the ntcHWAdminstate's description.",))
if mibBuilder.loadTexts: ntcHWModule.setLastUpdated('9901080000Z')
if mibBuilder.loadTexts: ntcHWModule.setOrganization('Nokia')
if mibBuilder.loadTexts: ntcHWModule.setContactInfo('Anna-Kaisa Lindfors Nokia Telecommunications Oy Hiomotie 5, FIN-00380 Helsinki +358-9-51121 anna-kaisa.lindfors@nokia.com')
if mibBuilder.loadTexts: ntcHWModule.setDescription('The MIB module that is used to control the Hardware Management information.')
ntcHWObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1))
ntcHWEvents = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 2, 0))
ntcHWGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 1))
ntcHWCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 2))
ntcHWUnitTable = MibTable((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1), )
if mibBuilder.loadTexts: ntcHWUnitTable.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitTable.setDescription("A table which contains an entry for each pluggable circuit board (in this MIB a 'unit' is the same as a pluggable circuit board.) Entries of this table are automatically created by the hardware management software.")
ntcHWUnitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: ntcHWUnitEntry.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitEntry.setDescription('A conceptual row in the ntcHWUnitTable. Rows are created automatically by the Hardware Management software.')
ntcHWAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("inService", 1), ("outOfService", 2), ("inTest", 3), ("missing", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWAdminState.setStatus('current')
if mibBuilder.loadTexts: ntcHWAdminState.setDescription('Represents the desired state of the unit. inService indicates that the unit is intended to be operating normally. outOfService indicates that the unit should be taken out of normal operating mode and no data traffic should appear in this unit. inTest indicates that the unit should be placed into a selftest mode. missing indicates that the unit is expected to be present but has been detected as not being physically present. The NMS is not allowed to set the value of ntcHWAdminstate to missing.')
ntcHWOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("down", 1), ("up", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWOperState.setStatus('current')
if mibBuilder.loadTexts: ntcHWOperState.setDescription('Indicates the current state of the unit. down indicates that the unit is in a non-functional state. up indicates that the unit is functioning normally.')
ntcHWAvailabilityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("inCharge", 1), ("applicationStarting", 2), ("applicationShutdown", 3), ("platformStarting", 4), ("resetting", 5), ("separated", 6), ("unconfigured", 7), ("testing", 8), ("standby", 9), ("dormant", 10), ("unavailable", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWAvailabilityStatus.setStatus('current')
if mibBuilder.loadTexts: ntcHWAvailabilityStatus.setDescription("Provides more specific information on the state of the unit in this conceptual row. The status column has eleven defined values: inCharge = the unit is fully operational and ready to perform its desired tasks; applicationStarting = the application software is starting up; applicationShutdown = the application software is shutting down; platformStarting = Basic platform software is starting up; resetting = the disk files are closed and hardware reset is forced; separated = Only basic OS software is running. The unit can start application software on request; unconfigured = The administrative state of the unit is 'missing', disk files are closed and only basic OS software is running. The unit refuses to start application software; testing = Selftests can be performed, only basic OS are running; standby = The unit is redundant and is fully operational but not in charge of operations. It is ready to move to 'inCharge' state when necessary; dormant = All connections are physically inactive to enable removal of the unit without electric disturbance in the backplane. Only watchdog software is running for a short duration of time; unavailable = The unit is not physically present or cannot be contacted.")
ntcHWRestart = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("reset", 1), ("hotRestart", 2), ("detach", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWRestart.setStatus('current')
if mibBuilder.loadTexts: ntcHWRestart.setDescription('Provides the ability to reset or perform a hot restart the unit represented by this conceptual row. reset = the Unit is shutdown in an orderly manner and restarted again via hardware reset; hotRestart = only the software in a unit is restarted, a hardware reset is not initiated; detach = all electrical connections of the unit are forced to an inactive state to enable removal of the unit without electrical disturbance in the backplane.')
ntcHWLedState = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("red", 1), ("yellow", 2), ("black", 3), ("green", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWLedState.setStatus('current')
if mibBuilder.loadTexts: ntcHWLedState.setDescription('Indicates the current LED color of the unit represented by this conceptual row.')
ntcHWSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWSerialNumber.setStatus('current')
if mibBuilder.loadTexts: ntcHWSerialNumber.setDescription('The units serial number in displayable format.')
ntcHWProductionDate = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWProductionDate.setStatus('current')
if mibBuilder.loadTexts: ntcHWProductionDate.setDescription('The units production date in displayable format.')
ntcHWUnitEntryChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 1, 1, 8), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWUnitEntryChanged.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnitEntryChanged.setDescription('Represents the value of sysUpTime at the instant that this conceptual row entry has changed.')
ntcHWSlotTable = MibTable((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2), )
if mibBuilder.loadTexts: ntcHWSlotTable.setStatus('current')
if mibBuilder.loadTexts: ntcHWSlotTable.setDescription('Table whose entries represent the expected circuit board type. The entries are created automatically by the hardware management software.')
ntcHWSlotEntry = MibTableRow((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: ntcHWSlotEntry.setStatus('current')
if mibBuilder.loadTexts: ntcHWSlotEntry.setDescription('The logical row describing the expected circiut board type of a slot.')
ntcHWDesiredUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 2, 1, 2), AutonomousType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWDesiredUnitType.setStatus('current')
if mibBuilder.loadTexts: ntcHWDesiredUnitType.setDescription("The unit type which is expected to be inserted or present in the current slot. An indication of the vendor-specific hardware type of the HWM entity. Note that this is different from the definition of MIB-II's sysObjectID. An agent should set this object to a enterprise-specific registration identifier value indicating the specific equipment type in detail. If no vendor-specific registration identifier exists for this entity, or the value is unknown by this agent, then the value { 0 0 } is returned.")
ntcHWLastChangedTime = MibScalar((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ntcHWLastChangedTime.setStatus('current')
if mibBuilder.loadTexts: ntcHWLastChangedTime.setDescription('The value of sysUpTime at the time any of these events occur: * any instance in the following object changes value: - hwmUnitEntryChanged This object shall be set to value 0 in startup.')
ntcHWLoadInventoryContainer = MibScalar((1, 3, 6, 1, 4, 1, 94, 1, 16, 7, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ntcHWLoadInventoryContainer.setStatus('current')
if mibBuilder.loadTexts: ntcHWLoadInventoryContainer.setDescription('Writing any value to this object will cause the hardware management software to reread its configuration file from disk.')
ntcHWUnits = ObjectGroup((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 1, 1)).setObjects(("NOKIA-HWM-MIB", "ntcHWAdminState"), ("NOKIA-HWM-MIB", "ntcHWOperState"), ("NOKIA-HWM-MIB", "ntcHWAvailabilityStatus"), ("NOKIA-HWM-MIB", "ntcHWRestart"), ("NOKIA-HWM-MIB", "ntcHWLedState"), ("NOKIA-HWM-MIB", "ntcHWSerialNumber"), ("NOKIA-HWM-MIB", "ntcHWProductionDate"), ("NOKIA-HWM-MIB", "ntcHWUnitEntryChanged"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntcHWUnits = ntcHWUnits.setStatus('current')
if mibBuilder.loadTexts: ntcHWUnits.setDescription('A collection of objects representing the status of a unit.')
ntcHWCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 94, 1, 16, 8, 1, 2, 1)).setObjects(("ENTITY-MIB", "entityPhysicalGroup"), ("NOKIA-HWM-MIB", "ntcHWUnits"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ntcHWCompliance = ntcHWCompliance.setStatus('current')
if mibBuilder.loadTexts: ntcHWCompliance.setDescription('The compliance statement Hardware Management.')
mibBuilder.exportSymbols("NOKIA-HWM-MIB", ntcHWCompliance=ntcHWCompliance, ntcHWLedState=ntcHWLedState, ntcHWDesiredUnitType=ntcHWDesiredUnitType, ntcHWLastChangedTime=ntcHWLastChangedTime, ntcHWSlotEntry=ntcHWSlotEntry, ntcHWUnits=ntcHWUnits, ntcHWUnitEntry=ntcHWUnitEntry, ntcHWUnitEntryChanged=ntcHWUnitEntryChanged, ntcHWUnitTable=ntcHWUnitTable, ntcHWProductionDate=ntcHWProductionDate, ntcHWLoadInventoryContainer=ntcHWLoadInventoryContainer, ntcHWGroups=ntcHWGroups, ntcHWCompliances=ntcHWCompliances, ntcHWModule=ntcHWModule, ntcHWOperState=ntcHWOperState, ntcHWRestart=ntcHWRestart, ntcHWEvents=ntcHWEvents, ntcHWAvailabilityStatus=ntcHWAvailabilityStatus, ntcHWAdminState=ntcHWAdminState, ntcHWSlotTable=ntcHWSlotTable, ntcHWSerialNumber=ntcHWSerialNumber, ntcHWObjs=ntcHWObjs, PYSNMP_MODULE_ID=ntcHWModule)
| pysnmp-with-texts/NOKIA-HWM-MIB.py | 14,112 | PySNMP MIB module NOKIA-HWM-MIB (http://snmplabs.com/pysmi) ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NOKIA-HWM-MIB Produced by pysmi-0.3.4 at Wed May 1 14:23:29 2019 On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) | 320 | en | 0.402725 |
class NoNodeData(Exception):
pass
class AVLNode(object):
def __init__(self, key=None, value=None) -> None:
"""Initializes the AVL Node.
Args:
data (dict, optional): {Key:Value} pair. Defaults to None.
"""
super().__init__()
self.key = key
self.value = value
self.left = None
self.right = None
self.height = 1
def __str__(self) -> str:
"""Prints single AVL Node to stdout
Raises:
NoNodeData: If no data is present in the node
Returns:
str: output string
"""
if self.key:
out = "data: {0}\nleft: {1}\nright: {2}\n".format(
(self.key, self.value), self.left.__str__(), self.right.__str__())
return out
raise NoNodeData
def get_key(self) -> str:
"""returns the key of the node
Returns:
str: the key in (key, value) pair
"""
return self.key
def get_value(self) -> str:
"""returns the value of the key
Returns:
str: the value in (key, value) pair
"""
return self.value
| avltree/AVLNode.py | 1,178 | Initializes the AVL Node.
Args:
data (dict, optional): {Key:Value} pair. Defaults to None.
Prints single AVL Node to stdout
Raises:
NoNodeData: If no data is present in the node
Returns:
str: output string
returns the key of the node
Returns:
str: the key in (key, value) pair
returns the value of the key
Returns:
str: the value in (key, value) pair | 375 | en | 0.432373 |
# -*- coding: utf-8 -*-
from benedict.core import clone as _clone
from benedict.core import traverse as _traverse
import unittest
class traverse_test_case(unittest.TestCase):
def test_traverse(self):
i = {
'a': {
'x': 2,
'y': 3,
'z': {
'ok': 5,
}
},
'b': {
'x': 7,
'y': 11,
'z': {
'ok': 13,
}
},
'c': {
'x': 17,
'y': 19,
'z': {
'ok': 23,
}
},
}
o = _clone(i)
with self.assertRaises(ValueError):
_traverse(o, True)
def f(parent, key, value):
if not isinstance(value, dict):
parent[key] = (value + 1)
_traverse(o, f)
r = {
'a': {
'x': 3,
'y': 4,
'z': {
'ok': 6,
}
},
'b': {
'x': 8,
'y': 12,
'z': {
'ok': 14,
}
},
'c': {
'x': 18,
'y': 20,
'z': {
'ok': 24,
}
},
}
self.assertEqual(o, r)
| tests/core/test_traverse.py | 1,452 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from logging import getLogger
from .onnx_model import OnnxModel
from typing import Tuple
from onnx import helper, TensorProto
logger = getLogger(__name__)
class FusionUtils:
def __init__(self, model: OnnxModel):
self.model: OnnxModel = model
def cast_graph_input_to_int32(self, input_name: str) -> Tuple[bool, str]:
graph_input = self.model.find_graph_input(input_name)
if graph_input is not None and graph_input.type.tensor_type.elem_type != TensorProto.INT32:
cast_output, cast_node = self.cast_input_to_int32(input_name)
logger.debug(f"Casted graph input {input_name} to int32")
return True, cast_output
logger.debug(f"Did not cast graph input {input_name} to int32: found {graph_input is not None}")
return False, input_name
def cast_input_to_int32(self, input_name: str):
cast_output = input_name + '_int32'
# Avoid consequent Cast nodes.
inputs = [input_name]
output_name_to_node = self.model.output_name_to_node()
if input_name in output_name_to_node:
parent_node = output_name_to_node[input_name]
if parent_node and parent_node.op_type == 'Cast':
inputs = [parent_node.input[0]]
cast_node = helper.make_node('Cast', inputs=inputs, outputs=[cast_output])
cast_node.attribute.extend([helper.make_attribute("to", int(TensorProto.INT32))])
self.model.add_node(cast_node)
return cast_output, cast_node
def remove_cast_int32(self, input_name: str):
input_name_to_nodes = self.model.input_name_to_nodes()
nodes = input_name_to_nodes[input_name]
for node in nodes:
if node.op_type == "Cast":
is_int32 = False
for att in node.attribute:
if att.name == 'to' and att.i == int(TensorProto.INT32):
is_int32 = True
break
if is_int32:
output_name = node.output[0]
self.model.remove_node(node)
self.model.replace_input_of_all_nodes(output_name, input_name)
| examples/fastformers/onnx_graph_optimizer/fusion_utils.py | 2,421 | ------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.-------------------------------------------------------------------------- Avoid consequent Cast nodes. | 267 | en | 0.360903 |
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
def densenet121(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet121'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
def densenet169(pretrained=False, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet169'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
def densenet201(pretrained=False, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet201'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
def densenet161(pretrained=False, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet161'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
out = self.classifier(out)
return out
| cvlib/models/densenet.py | 9,993 | Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
'.'s are no longer allowed in module names, but pervious _DenseLayer has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. They are also in the checkpoints in model_urls. This pattern is used to find such keys. '.'s are no longer allowed in module names, but pervious _DenseLayer has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. They are also in the checkpoints in model_urls. This pattern is used to find such keys. '.'s are no longer allowed in module names, but pervious _DenseLayer has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. They are also in the checkpoints in model_urls. This pattern is used to find such keys. '.'s are no longer allowed in module names, but pervious _DenseLayer has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. They are also in the checkpoints in model_urls. This pattern is used to find such keys. First convolution Each denseblock Final batch norm Linear layer Official init from torch repo. | 2,368 | en | 0.782025 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: storyboard_node.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from container_sdk.model.next_builder import storyboard_brick_pb2 as container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2
from container_sdk.model.next_builder import storyboard_route_pb2 as container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2
from container_sdk.model.next_builder import micro_app_project_pb2 as container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='storyboard_node.proto',
package='next_builder',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builder'),
serialized_pb=_b('\n\x15storyboard_node.proto\x12\x0cnext_builder\x1a\x37\x63ontainer_sdk/model/next_builder/storyboard_brick.proto\x1a\x37\x63ontainer_sdk/model/next_builder/storyboard_route.proto\x1a\x38\x63ontainer_sdk/model/next_builder/micro_app_project.proto\"\xe8\x02\n\x0eStoryboardNode\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\x12\r\n\x05\x61ppId\x18\x03 \x01(\t\x12\n\n\x02id\x18\x04 \x01(\t\x12\x12\n\nmountPoint\x18\x05 \x01(\t\x12\x0c\n\x04sort\x18\x06 \x01(\x05\x12\x0c\n\x04type\x18\x07 \x01(\t\x12,\n\x05\x62rick\x18\x08 \x01(\x0b\x32\x1d.next_builder.StoryboardBrick\x12,\n\x05route\x18\t \x01(\x0b\x32\x1d.next_builder.StoryboardRoute\x12.\n\x07project\x18\n \x01(\x0b\x32\x1d.next_builder.MicroAppProject\x12,\n\x06parent\x18\x0b \x01(\x0b\x32\x1c.next_builder.StoryboardNode\x12.\n\x08\x63hildren\x18\x0c \x03(\x0b\x32\x1c.next_builder.StoryboardNodeBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/next_builderb\x06proto3')
,
dependencies=[container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2.DESCRIPTOR,container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2.DESCRIPTOR,container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2.DESCRIPTOR,])
_STORYBOARDNODE = _descriptor.Descriptor(
name='StoryboardNode',
full_name='next_builder.StoryboardNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='next_builder.StoryboardNode.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alias', full_name='next_builder.StoryboardNode.alias', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='next_builder.StoryboardNode.appId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='next_builder.StoryboardNode.id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mountPoint', full_name='next_builder.StoryboardNode.mountPoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort', full_name='next_builder.StoryboardNode.sort', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='next_builder.StoryboardNode.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='brick', full_name='next_builder.StoryboardNode.brick', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route', full_name='next_builder.StoryboardNode.route', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='project', full_name='next_builder.StoryboardNode.project', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent', full_name='next_builder.StoryboardNode.parent', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='children', full_name='next_builder.StoryboardNode.children', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=212,
serialized_end=572,
)
_STORYBOARDNODE.fields_by_name['brick'].message_type = container__sdk_dot_model_dot_next__builder_dot_storyboard__brick__pb2._STORYBOARDBRICK
_STORYBOARDNODE.fields_by_name['route'].message_type = container__sdk_dot_model_dot_next__builder_dot_storyboard__route__pb2._STORYBOARDROUTE
_STORYBOARDNODE.fields_by_name['project'].message_type = container__sdk_dot_model_dot_next__builder_dot_micro__app__project__pb2._MICROAPPPROJECT
_STORYBOARDNODE.fields_by_name['parent'].message_type = _STORYBOARDNODE
_STORYBOARDNODE.fields_by_name['children'].message_type = _STORYBOARDNODE
DESCRIPTOR.message_types_by_name['StoryboardNode'] = _STORYBOARDNODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StoryboardNode = _reflection.GeneratedProtocolMessageType('StoryboardNode', (_message.Message,), {
'DESCRIPTOR' : _STORYBOARDNODE,
'__module__' : 'storyboard_node_pb2'
# @@protoc_insertion_point(class_scope:next_builder.StoryboardNode)
})
_sym_db.RegisterMessage(StoryboardNode)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| container_sdk/model/next_builder/storyboard_node_pb2.py | 8,372 | -*- coding: utf-8 -*- Generated by the protocol buffer compiler. DO NOT EDIT! source: storyboard_node.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:next_builder.StoryboardNode) @@protoc_insertion_point(module_scope) | 247 | en | 0.509459 |
#!/usr/bin/env python3
import asyncio
import logging
from collections import defaultdict
from functools import partial
from box import Box
_l = logging.getLogger(__name__)
_instances = dict()
_events = defaultdict(asyncio.Event)
_event_queues = list()
_event_callbacks = defaultdict(list)
class Component:
"""A stateful element in a workflow that can be configured, run, and uniquely named."""
def __init__(self, *args, id=None, workflow=None, parent=None, logger=_l, **kwargs):
self.id = id
if id:
key = (type(self), id)
if key in _instances:
raise ValueError(
f'{key[0].__name__} with ID "{id}" already exists: {_instances[key]}')
_instances[key] = self
self.workflow = workflow
self.parent = parent
self.children = list()
if parent:
parent.children.append(self)
self.logger = logger
self.loop = asyncio.get_event_loop()
self._event_lock = set()
self._debug = {'events'}
self._settings = Box(self.configure(**kwargs) or dict())
if not workflow:
workflow = self
settings = [f'{k}={v}' for k, v in workflow.safe_settings(self._settings).items()]
self.debug(f'Initialized {" ".join(settings)}')
def configure(self, **settings):
return settings
def settings(self, **override):
return Box(self._settings, **override)
def safe_settings(self, settings):
return settings
@property
def type(self):
return type(self).__name__
@property
def status(self):
return getattr(self, '_status', None)
@status.setter
def status(self, status):
if not (self.hasstatus(status) or status in self._event_lock):
self._event_lock.add(status)
try:
self._status_setter(status)
finally:
self._event_lock.remove(status)
_dependent_statuses = {'processing-finished', 'finished', 'exited'}
def _status_setter(self, status):
event = status if isinstance(status, ComponentEvent) else ComponentEvent(status, self)
if event.status in self._dependent_statuses:
children = set(filter(lambda c: isinstance(c, Component), self.children))
ready = set(filter(lambda c: c.hasstatus(event.status), children))
if len(children) > len(ready):
if 'events' in self._debug:
pending = ", ".join(c.id for c in children.difference(ready))
self.debug(f'Status "{event.status}" waiting on {pending}')
return
if self.hasstatus('aborted') and event.status != 'exited':
if 'events' in self._debug:
self.debug(f'Ignoring status "{event.status}" because the component is '
'in aborted state')
return
# event.id = self._fqevent(status)
if 'events' in self._debug:
self.debug(f'Emitting event "{event.id}"')
self._status = event.status
_events[event.id].set()
for queue in _event_queues:
queue.put_nowait(event)
if self.parent and event.status != 'aborted' and not isinstance(self, LocalEvents):
self.parent.status = event.status
for callback in _event_callbacks[event.id]:
asyncio.ensure_future(callback())
_event_callbacks[event.id].clear()
def hasstatus(self, status):
"""Return `True` if given status was set."""
if isinstance(status, ComponentEvent):
event = status.id
elif ':' in status:
event = status
else:
event = ComponentEvent(status, self).id
return _events[event].is_set()
async def waiton(self, event):
if 'events' in self._debug:
self.debug(f'Waiting on event "{event}"')
await _events[event].wait()
if 'events' in self._debug:
self.debug(f'Received event "{event}"')
@property
def running(self):
"""Return `True` if in one of the running states."""
if not self.stopped:
for status in ['started', 'running']:
if self.hasstatus(status):
return True
@property
def stopped(self):
"""Return `True` if in one of the stopped states."""
for status in ['aborted', 'finished']:
if self.hasstatus(status):
return True
@property
def aborted(self):
"""Return `True` if the aborted event was emitted."""
return self.hasstatus('aborted')
def start(self):
self.status = 'started'
return self.run()
def stop(self):
self.debug('Stopping')
def abort(self, exception=None):
if self.hasstatus('aborted'):
return
self.status = ComponentEvent('aborted', self, exception)
for child in self.children:
if child.settings().get('error-propagation') in ('none', 'up'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to child {child.id}')
elif not child.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to child {child.id}')
child.abort()
if self.parent:
if self.parent.settings().get('error-propagation') in ('none', 'down'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to parent {self.parent.id}')
elif not self.parent.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to parent {self.parent.id}')
self.parent.abort(exception)
def __getattr__(self, name):
if name not in ('critical', 'error', 'warning', 'info', 'debug', 'exception'):
raise AttributeError(f"'{self.type}' object has no attribute '{name}'")
return partial(self._proxied_logging_method, name)
def _proxied_logging_method(self, method, *args, **kwargs):
if method == 'debug':
if logging in (self.workflow or self).settings():
debug = (self.workflow or self).settings().logging.debug
else:
debug = []
if not ('all' in debug or self.type in debug or (self.id in debug)):
return lambda *a, **kw: None
return getattr(self.logger, method)(*self._log_formatted(*args), **kwargs)
def _log_formatted(self, msg, *args):
"""Return the msg prefixed with this component's ID and type."""
prefix = f'{self.id} ' if self.id else ''
msg = f'{prefix}({self.type}) {msg}'
return (msg,) + args
async def run(self):
self.status = 'running'
async def try_while_running(self, callable, timeout=0.5):
"""Return result of `callable`, or raise `ComponentInterrupted` if component is stopped."""
while self.running:
coro = callable()
try:
return await asyncio.wait_for(coro, timeout)
except asyncio.TimeoutError:
pass
raise ComponentInterrupted
class ComponentEvent:
def __init__(self, status, component, exception=None):
self.status = status
self.component = component
self.exception = exception
@property
def id(self):
"""Return a fully qualified ID string representing this event."""
return f'{self.component.id}:{self.status}'
class LocalEvents:
pass
class ComponentInterrupted(Exception):
pass
def get_event_listener():
"""Return a new `Queue` object that will see all events."""
queue = asyncio.Queue()
_event_queues.append(queue)
return queue
def add_event_callback(event, callable, *args, **kwargs):
"""Register a callback that will be called upon the given event."""
_event_callbacks[event].append(partial(callable, *args, **kwargs))
| pipekit/component.py | 8,113 | A stateful element in a workflow that can be configured, run, and uniquely named.
Return the msg prefixed with this component's ID and type.
Return `True` if the aborted event was emitted.
Register a callback that will be called upon the given event.
Return a new `Queue` object that will see all events.
Return `True` if given status was set.
Return a fully qualified ID string representing this event.
Return `True` if in one of the running states.
Return `True` if in one of the stopped states.
!/usr/bin/env python3 event.id = self._fqevent(status) | 553 | en | 0.841527 |
# sqlite/base.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite
:name: SQLite
:full_support: 3.21, 3.28+
:normal_support: 3.12+
:best_effort: 3.7.16+
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQLite is used. The implementation classes are
:class:`_sqlite.DATETIME`, :class:`_sqlite.DATE` and :class:`_sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <https://www.sqlite.org/datatype3.html#affinity>`_ -
in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: https://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among other things, this
means that any type name which contains the substring ``"INT"`` will be
determined to be of "integer affinity". A type named ``"BIGINT"``,
``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be
of "integer" affinity. However, **the SQLite autoincrement feature, whether
implicitly or explicitly enabled, requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an application uses a type
like :class:`.BigInteger` for a primary key, on SQLite this type will need to
be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE
TABLE`` statement in order for the autoincrement behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
name to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <https://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQLite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<https://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level / Autocommit
----------------------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the
`PRAGMA read_uncommitted <https://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`_sa.create_engine.isolation_level` parameter of
:func:`_sa.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
When using the pysqlite driver, the ``"AUTOCOMMIT"`` isolation level is also
available, which will alter the pysqlite connection using the ``.isolation_level``
attribute on the DBAPI connection and set it to None for the duration
of the setting.
.. versionadded:: 1.3.16 added support for SQLite AUTOCOMMIT isolation level
when using the pysqlite / sqlite3 SQLite driver.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <https://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. seealso::
:ref:`dbapi_autocommit`
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`_engine.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transaction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use -- including the initial call to
:meth:`sqlalchemy.schema.MetaData.create_all`.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <https://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_on_conflict_ddl:
ON CONFLICT support for constraints
-----------------------------------
.. seealso:: This section describes the :term:`DDL` version of "ON CONFLICT" for
SQLite, which occurs within a CREATE TABLE statement. For "ON CONFLICT" as
applied to an INSERT statement, see :ref:`sqlite_on_conflict_insert`.
SQLite supports a non-standard DDL clause known as ON CONFLICT which can be applied
to primary key, unique, check, and not null constraints. In DDL, it is
rendered either within the "CONSTRAINT" clause or within the column definition
itself depending on the location of the target constraint. To render this
clause within DDL, the extension parameter ``sqlite_on_conflict`` can be
specified with a string conflict resolution algorithm within the
:class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`,
:class:`.CheckConstraint` objects. Within the :class:`_schema.Column` object,
there
are individual parameters ``sqlite_on_conflict_not_null``,
``sqlite_on_conflict_primary_key``, ``sqlite_on_conflict_unique`` which each
correspond to the three types of relevant constraint types that can be
indicated from a :class:`_schema.Column` object.
.. seealso::
`ON CONFLICT <https://www.sqlite.org/lang_conflict.html>`_ - in the SQLite
documentation
.. versionadded:: 1.3
The ``sqlite_on_conflict`` parameters accept a string argument which is just
the resolution name to be chosen, which on SQLite can be one of ROLLBACK,
ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint
that specifies the IGNORE algorithm::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
)
The above renders CREATE TABLE DDL as::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (id, data) ON CONFLICT IGNORE
)
When using the :paramref:`_schema.Column.unique`
flag to add a UNIQUE constraint
to a single column, the ``sqlite_on_conflict_unique`` parameter can
be added to the :class:`_schema.Column` as well, which will be added to the
UNIQUE constraint in the DDL::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, unique=True,
sqlite_on_conflict_unique='IGNORE')
)
rendering::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (data) ON CONFLICT IGNORE
)
To apply the FAIL algorithm for a NOT NULL constraint,
``sqlite_on_conflict_not_null`` is used::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, nullable=False,
sqlite_on_conflict_not_null='FAIL')
)
this renders the column inline ON CONFLICT phrase::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER NOT NULL ON CONFLICT FAIL,
PRIMARY KEY (id)
)
Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True,
sqlite_on_conflict_primary_key='FAIL')
)
SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
resolution algorithm is applied to the constraint itself::
CREATE TABLE some_table (
id INTEGER NOT NULL,
PRIMARY KEY (id) ON CONFLICT FAIL
)
.. _sqlite_on_conflict_insert:
INSERT...ON CONFLICT (Upsert)
-----------------------------------
.. seealso:: This section describes the :term:`DML` version of "ON CONFLICT" for
SQLite, which occurs within an INSERT statement. For "ON CONFLICT" as
applied to a CREATE TABLE statement, see :ref:`sqlite_on_conflict_ddl`.
From version 3.24.0 onwards, SQLite supports "upserts" (update or insert)
of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT``
statement. A candidate row will only be inserted if that row does not violate
any unique or primary key constraints. In the case of a unique constraint violation, a
secondary action can occur which can be either "DO UPDATE", indicating that
the data in the target row should be updated, or "DO NOTHING", which indicates
to silently skip this row.
Conflicts are determined using columns that are part of existing unique
constraints and indexes. These constraints are identified by stating the
columns and conditions that comprise the indexes.
SQLAlchemy provides ``ON CONFLICT`` support via the SQLite-specific
:func:`_sqlite.insert()` function, which provides
the generative methods :meth:`_sqlite.Insert.on_conflict_do_update`
and :meth:`_sqlite.Insert.on_conflict_do_nothing`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.dialects.sqlite import insert
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
>>> do_update_stmt = insert_stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?{stop}
>>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
... index_elements=['id']
... )
>>> print(do_nothing_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO NOTHING
.. versionadded:: 1.4
.. seealso::
`Upsert
<https://sqlite.org/lang_UPSERT.html>`_
- in the SQLite documentation.
Specifying the Target
^^^^^^^^^^^^^^^^^^^^^
Both methods supply the "target" of the conflict using column inference:
* The :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements` argument
specifies a sequence containing string column names, :class:`_schema.Column`
objects, and/or SQL expression elements, which would identify a unique index
or unique constraint.
* When using :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements`
to infer an index, a partial index can be inferred by also specifying the
:paramref:`_sqlite.Insert.on_conflict_do_update.index_where` parameter:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(user_email='a@b.com', data='inserted data')
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=[my_table.c.user_email],
... index_where=my_table.c.user_email.like('%@gmail.com'),
... set_=dict(data=stmt.excluded.data)
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (data, user_email) VALUES (?, ?)
ON CONFLICT (user_email)
WHERE user_email LIKE '%@gmail.com'
DO UPDATE SET data = excluded.data
>>>
The SET Clause
^^^^^^^^^^^^^^^
``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are specified using the
:paramref:`_sqlite.Insert.on_conflict_do_update.set_` parameter. This
parameter accepts a dictionary which consists of direct values
for UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?
.. warning::
The :meth:`_sqlite.Insert.on_conflict_do_update` method does **not** take
into account Python-side default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`. These
values will not be exercised for an ON CONFLICT style of UPDATE, unless
they are manually specified in the
:paramref:`_sqlite.Insert.on_conflict_do_update.set_` dictionary.
Updating using the Excluded INSERT Values
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to refer to the proposed insertion row, the special alias
:attr:`~.sqlite.Insert.excluded` is available as an attribute on
the :class:`_sqlite.Insert` object; this object creates an "excluded." prefix
on a column, that informs the DO UPDATE to update the row with the value that
would have been inserted had the constraint not failed:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... )
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author)
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
Additional WHERE Criteria
^^^^^^^^^^^^^^^^^^^^^^^^^
The :meth:`_sqlite.Insert.on_conflict_do_update` method also accepts
a WHERE clause using the :paramref:`_sqlite.Insert.on_conflict_do_update.where`
parameter, which will limit those rows which receive an UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... )
>>> on_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author),
... where=(my_table.c.status == 2)
... )
>>> print(on_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
WHERE my_table.status = ?
Skipping Rows with DO NOTHING
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``ON CONFLICT`` may be used to skip inserting a row entirely
if any conflict with a unique constraint occurs; below this is illustrated
using the :meth:`_sqlite.Insert.on_conflict_do_nothing` method:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
>>> print(stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT (id) DO NOTHING
If ``DO NOTHING`` is used without specifying any columns or constraint,
it has the effect of skipping the INSERT for any unique violation which
occurs:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing()
>>> print(stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT DO NOTHING
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
https://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`_types.BIGINT`, :class:`_types.BLOB`,
:class:`_types.BOOLEAN`, :class:`_types.BOOLEAN`,
:class:`_types.CHAR`, :class:`_types.DATE`,
:class:`_types.DATETIME`, :class:`_types.FLOAT`,
:class:`_types.DECIMAL`, :class:`_types.FLOAT`,
:class:`_types.INTEGER`, :class:`_types.INTEGER`,
:class:`_types.NUMERIC`, :class:`_types.REAL`,
:class:`_types.SMALLINT`, :class:`_types.TEXT`,
:class:`_types.TIME`, :class:`_types.TIMESTAMP`,
:class:`_types.VARCHAR`, :class:`_types.NVARCHAR`,
:class:`_types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`_types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`_types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`_types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`_types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`_types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
.. _sqlite_dotted_column_names:
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver up until version **3.10.0** of SQLite has a bug which
requires that SQLAlchemy filter out these dots in result sets.
.. versionchanged:: 1.1
The following SQLite issue has been resolved as of version 3.10.0
of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.exec_driver_sql("create table x (a integer, b integer)")
conn.exec_driver_sql("insert into x (a, b) values (1, 1)")
conn.exec_driver_sql("insert into x (a, b) values (2, 2)")
result = conn.exec_driver_sql("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.exec_driver_sql('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`_engine.CursorResult.keys` and :meth:`.Row.keys()` in the public API. In
the very specific case where an application is forced to use column names that
contain dots, and the functionality of :meth:`_engine.CursorResult.keys` and
:meth:`.Row.keys()` is required to return these dotted names unmodified,
the ``sqlite_raw_colnames`` execution option may be provided, either on a
per-:class:`_engine.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`_engine.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`_engine.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
SQLite-specific table options
-----------------------------
One option for CREATE TABLE is supported directly by the SQLite
dialect in conjunction with the :class:`_schema.Table` construct:
* ``WITHOUT ROWID``::
Table("some_table", metadata, ..., sqlite_with_rowid=False)
.. seealso::
`SQLite CREATE TABLE options
<https://www.sqlite.org/lang_createtable.html>`_
""" # noqa
import datetime
import numbers
import re
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import processors
from ...engine import reflection
from ...sql import coercions
from ...sql import ColumnElement
from ...sql import compiler
from ...sql import elements
from ...sql import roles
from ...sql import schema
from ...types import BLOB # noqa
from ...types import BOOLEAN # noqa
from ...types import CHAR # noqa
from ...types import DECIMAL # noqa
from ...types import FLOAT # noqa
from ...types import INTEGER # noqa
from ...types import NUMERIC # noqa
from ...types import REAL # noqa
from ...types import SMALLINT # noqa
from ...types import TEXT # noqa
from ...types import TIMESTAMP # noqa
from ...types import VARCHAR # noqa
class _SQliteJson(JSON):
def result_processor(self, dialect, coltype):
default_processor = super(_SQliteJson, self).result_processor(
dialect, coltype
)
def process(value):
try:
return default_processor(value)
except TypeError:
if isinstance(value, numbers.Number):
return value
else:
raise
return process
class _DateTimeMixin:
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
"""return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
"""
spec = self._storage_format % {
"year": 0,
"month": 0,
"day": 0,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
return bool(re.search(r"[^0-9]", spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
r"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
2021-03-15 12:05:57.105542
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
""" # noqa
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
else:
raise TypeError(
"SQLite DateTime type only accepts Python "
"datetime and date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime
)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
r"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
}
else:
raise TypeError(
"SQLite Date type only accepts Python "
"date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date
)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
r"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(storage_format="%(hour)02d-%(minute)02d-"
"%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format_ % {
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
else:
raise TypeError(
"SQLite Time type only accepts Python "
"time objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time
)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.JSON: _SQliteJson,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
sqltypes.Time: TIME,
}
ischema_names = {
"BIGINT": sqltypes.BIGINT,
"BLOB": sqltypes.BLOB,
"BOOL": sqltypes.BOOLEAN,
"BOOLEAN": sqltypes.BOOLEAN,
"CHAR": sqltypes.CHAR,
"DATE": sqltypes.DATE,
"DATE_CHAR": sqltypes.DATE,
"DATETIME": sqltypes.DATETIME,
"DATETIME_CHAR": sqltypes.DATETIME,
"DOUBLE": sqltypes.DOUBLE,
"DECIMAL": sqltypes.DECIMAL,
"FLOAT": sqltypes.FLOAT,
"INT": sqltypes.INTEGER,
"INTEGER": sqltypes.INTEGER,
"JSON": JSON,
"NUMERIC": sqltypes.NUMERIC,
"REAL": sqltypes.REAL,
"SMALLINT": sqltypes.SMALLINT,
"TEXT": sqltypes.TEXT,
"TIME": sqltypes.TIME,
"TIME_CHAR": sqltypes.TIME,
"TIMESTAMP": sqltypes.TIMESTAMP,
"VARCHAR": sqltypes.VARCHAR,
"NVARCHAR": sqltypes.NVARCHAR,
"NCHAR": sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
},
)
def visit_truediv_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " / "
+ "(%s + 0.0)" % self.process(binary.right, **kw)
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw),
)
except KeyError as err:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field
) from err
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
# sqlite has no "FOR UPDATE" AFAICT
return ""
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "%s IS NOT %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
return "%s IS %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_empty_set_op_expr(self, type_, expand_op):
# slightly old SQLite versions don't seem to be able to handle
# the empty set impl
return self.visit_empty_set_expr(type_)
def visit_empty_set_expr(self, element_types):
return "SELECT %s FROM (SELECT %s) WHERE 1!=1" % (
", ".join("1" for type_ in element_types or [INTEGER()]),
", ".join("1" for type_ in element_types or [INTEGER()]),
)
def visit_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " REGEXP ", **kw)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " NOT REGEXP ", **kw)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
target_text = "(%s)" % clause.constraint_target
elif clause.inferred_target_elements is not None:
target_text = "(%s)" % ", ".join(
(
self.preparer.quote(c)
if isinstance(c, str)
else self.process(c, include_table=False, use_schema=False)
)
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
target_text += " WHERE %s" % self.process(
clause.inferred_target_whereclause,
include_table=False,
use_schema=False,
literal_binds=True,
)
else:
target_text = ""
return target_text
def visit_on_conflict_do_nothing(self, on_conflict, **kw):
target_text = self._on_conflict_target(on_conflict, **kw)
if target_text:
return "ON CONFLICT %s DO NOTHING" % target_text
else:
return "ON CONFLICT DO NOTHING"
def visit_on_conflict_do_update(self, on_conflict, **kw):
clause = on_conflict
target_text = self._on_conflict_target(on_conflict, **kw)
action_set_ops = []
set_parameters = dict(clause.update_values_to_set)
# create a list of column assignment clauses as tuples
insert_statement = self.stack[-1]["selectable"]
cols = insert_statement.table.c
for c in cols:
col_key = c.key
if col_key in set_parameters:
value = set_parameters.pop(col_key)
elif c in set_parameters:
value = set_parameters.pop(c)
else:
continue
if coercions._is_literal(value):
value = elements.BindParameter(None, value, type_=c.type)
else:
if (
isinstance(value, elements.BindParameter)
and value.type._isnull
):
value = value._clone()
value.type = c.type
value_text = self.process(value.self_group(), use_schema=False)
key_text = self.preparer.quote(col_key)
action_set_ops.append("%s = %s" % (key_text, value_text))
# check for names that don't match columns
if set_parameters:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.current_executable.table.name,
(", ".join("'%s'" % c for c in set_parameters)),
)
)
for k, v in set_parameters.items():
key_text = (
self.preparer.quote(k)
if isinstance(k, str)
else self.process(k, use_schema=False)
)
value_text = self.process(
coercions.expect(roles.ExpressionElementRole, v),
use_schema=False,
)
action_set_ops.append("%s = %s" % (key_text, value_text))
action_text = ", ".join(action_set_ops)
if clause.update_whereclause is not None:
action_text += " WHERE %s" % self.process(
clause.update_whereclause, include_table=True, use_schema=False
)
return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column
)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
if isinstance(column.server_default.arg, ColumnElement):
default = "(" + default + ")"
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_not_null"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
if column.primary_key:
if (
column.autoincrement is True
and len(column.table.primary_key.columns) != 1
):
raise exc.CompileError(
"SQLite does not support autoincrement for "
"composite primary keys"
)
if (
column.table.dialect_options["sqlite"]["autoincrement"]
and len(column.table.primary_key.columns) == 1
and issubclass(column.type._type_affinity, sqltypes.Integer)
and not column.foreign_keys
):
colspec += " PRIMARY KEY"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
colspec += " AUTOINCREMENT"
if column.computed is not None:
colspec += " " + self.process(column.computed)
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (
c.primary_key
and c.table.dialect_options["sqlite"]["autoincrement"]
and issubclass(c.type._type_affinity, sqltypes.Integer)
and not c.foreign_keys
):
return None
text = super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_unique_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_unique_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
col1 = list(constraint)[0]
if isinstance(col1, schema.SchemaItem):
on_conflict_clause = list(constraint)[0].dialect_options[
"sqlite"
]["on_conflict_unique"]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_check_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_column_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_column_check_constraint(
constraint
)
if constraint.dialect_options["sqlite"]["on_conflict"] is not None:
raise exc.CompileError(
"SQLite does not support on conflict clause for "
"column check constraint"
)
return text
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
constraint
)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX "
if create.if_not_exists:
text += "IF NOT EXISTS "
text += "%s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=False),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
def post_create_table(self, table):
if table.dialect_options["sqlite"]["with_rowid"] is False:
return "\n WITHOUT ROWID"
return ""
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
def visit_JSON(self, type_, **kw):
# note this name provides NUMERIC affinity, not TEXT.
# should not be an issue unless the JSON value consists of a single
# numeric value. JSONTEXT can be used if this case is required.
return "JSON"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set(
[
"add",
"after",
"all",
"alter",
"analyze",
"and",
"as",
"asc",
"attach",
"autoincrement",
"before",
"begin",
"between",
"by",
"cascade",
"case",
"cast",
"check",
"collate",
"column",
"commit",
"conflict",
"constraint",
"create",
"cross",
"current_date",
"current_time",
"current_timestamp",
"database",
"default",
"deferrable",
"deferred",
"delete",
"desc",
"detach",
"distinct",
"drop",
"each",
"else",
"end",
"escape",
"except",
"exclusive",
"exists",
"explain",
"false",
"fail",
"for",
"foreign",
"from",
"full",
"glob",
"group",
"having",
"if",
"ignore",
"immediate",
"in",
"index",
"indexed",
"initially",
"inner",
"insert",
"instead",
"intersect",
"into",
"is",
"isnull",
"join",
"key",
"left",
"like",
"limit",
"match",
"natural",
"not",
"notnull",
"null",
"of",
"offset",
"on",
"or",
"order",
"outer",
"plan",
"pragma",
"primary",
"query",
"raise",
"references",
"reindex",
"rename",
"replace",
"restrict",
"right",
"rollback",
"row",
"select",
"set",
"table",
"temp",
"temporary",
"then",
"to",
"transaction",
"trigger",
"true",
"union",
"unique",
"update",
"using",
"vacuum",
"values",
"view",
"virtual",
"when",
"where",
]
)
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return (
not self.dialect._broken_dotted_colnames
or self.execution_options.get("sqlite_raw_colnames", False)
)
def _translate_colname(self, colname):
# TODO: detect SQLite version 3.10.0 or greater;
# see [ticket:3633]
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
# "database.tablename.colname", in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = "sqlite"
supports_alter = False
# SQlite supports "DEFAULT VALUES" but *does not* support
# "VALUES (DEFAULT)"
supports_default_values = True
supports_default_metavalue = False
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
tuple_in_values = True
supports_statement_cache = True
default_paramstyle = "qmark"
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
construct_arguments = [
(
sa_schema.Table,
{
"autoincrement": False,
"with_rowid": True,
},
),
(sa_schema.Index, {"where": None}),
(
sa_schema.Column,
{
"on_conflict_primary_key": None,
"on_conflict_not_null": None,
"on_conflict_unique": None,
},
),
(sa_schema.Constraint, {"on_conflict": None}),
]
_broken_fk_pragma_quotes = False
_broken_dotted_colnames = False
@util.deprecated_params(
_json_serializer=(
"1.3.7",
"The _json_serializer argument to the SQLite dialect has "
"been renamed to the correct name of json_serializer. The old "
"argument name will be removed in a future release.",
),
_json_deserializer=(
"1.3.7",
"The _json_deserializer argument to the SQLite dialect has "
"been renamed to the correct name of json_deserializer. The old "
"argument name will be removed in a future release.",
),
)
def __init__(
self,
native_datetime=False,
json_serializer=None,
json_deserializer=None,
_json_serializer=None,
_json_deserializer=None,
**kwargs,
):
default.DefaultDialect.__init__(self, **kwargs)
if _json_serializer:
json_serializer = _json_serializer
if _json_deserializer:
json_deserializer = _json_deserializer
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some hypothetical
# driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
if self.dbapi.sqlite_version_info < (3, 7, 16):
util.warn(
"SQLite version %s is older than 3.7.16, and will not "
"support right nested joins, as are sometimes used in "
"more complex ORM scenarios. SQLAlchemy 1.4 and above "
"no longer tries to rewrite these joins."
% (self.dbapi.sqlite_version_info,)
)
# NOTE: python 3.7 on fedora for me has SQLite 3.34.1. These
# version checks are getting very stale.
self._broken_dotted_colnames = self.dbapi.sqlite_version_info < (
3,
10,
0,
)
self.supports_default_values = self.dbapi.sqlite_version_info >= (
3,
3,
8,
)
self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3)
self.supports_multivalues_insert = (
# https://www.sqlite.org/releaselog/3_7_11.html
self.dbapi.sqlite_version_info
>= (3, 7, 11)
)
# see https://www.sqlalchemy.org/trac/ticket/2568
# as well as https://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < (
3,
6,
14,
)
_isolation_lookup = util.immutabledict(
{"READ UNCOMMITTED": 1, "SERIALIZABLE": 0}
)
def get_isolation_level_values(self, dbapi_connection):
return list(self._isolation_lookup)
def set_isolation_level(self, dbapi_connection, level):
isolation_level = self._isolation_lookup[level]
cursor = dbapi_connection.cursor()
cursor.execute(f"PRAGMA read_uncommitted = {isolation_level}")
cursor.close()
def get_isolation_level(self, dbapi_connection):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA read_uncommitted")
res = cursor.fetchone()
if res:
value = res[0]
else:
# https://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "PRAGMA database_list"
dl = connection.exec_driver_sql(s)
return [db[1] for db in dl if db[1] != "temp"]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (
master,
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='table' ORDER BY name "
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='view' ORDER BY name "
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
self._ensure_has_table_connection(connection)
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema
)
return bool(info)
def _get_default_schema_name(self, connection):
return "main"
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (
master,
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
s = ("SELECT sql FROM %s WHERE name = ? AND type='view'") % (
master,
)
rs = connection.exec_driver_sql(s, (view_name,))
else:
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = ? "
"AND type='view'"
)
rs = connection.exec_driver_sql(s, (view_name,))
except exc.DBAPIError:
s = (
"SELECT sql FROM sqlite_master WHERE name = ? "
"AND type='view'"
)
rs = connection.exec_driver_sql(s, (view_name,))
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
pragma = "table_info"
# computed columns are threaded as hidden, they require table_xinfo
if self.server_version_info >= (3, 31):
pragma = "table_xinfo"
info = self._get_table_pragma(
connection, pragma, table_name, schema=schema
)
columns = []
tablesql = None
for row in info:
name = row[1]
type_ = row[2].upper()
nullable = not row[3]
default = row[4]
primary_key = row[5]
hidden = row[6] if pragma == "table_xinfo" else 0
# hidden has value 0 for normal columns, 1 for hidden columns,
# 2 for computed virtual columns and 3 for computed stored columns
# https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b
if hidden == 1:
continue
generated = bool(hidden)
persisted = hidden == 3
if tablesql is None and generated:
tablesql = self._get_table_sql(
connection, table_name, schema, **kw
)
columns.append(
self._get_column_info(
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
)
)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
):
if generated:
# the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)"
# somehow is "INTEGER GENERATED ALWAYS"
type_ = re.sub("generated", "", type_, flags=re.IGNORECASE)
type_ = re.sub("always", "", type_, flags=re.IGNORECASE).strip()
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = str(default)
colspec = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"primary_key": primary_key,
}
if generated:
sqltext = ""
if tablesql:
pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?"
match = re.search(
re.escape(name) + pattern, tablesql, re.IGNORECASE
)
if match:
sqltext = match.group(1)
colspec["computed"] = {"sqltext": sqltext, "persisted": persisted}
return colspec
def _resolve_type_affinity(self, type_):
"""Return a data type from a reflected column, using affinity rules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in https://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by recognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
"""
match = re.match(r"([\w ]+)(\(.*?\))?", type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ""
args = ""
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif "INT" in coltype:
coltype = sqltypes.INTEGER
elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype:
coltype = sqltypes.TEXT
elif "BLOB" in coltype or not coltype:
coltype = sqltypes.NullType
elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r"(\d+)", args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments."
% (coltype, args)
)
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
constraint_name = None
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data:
PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY"
result = re.search(PK_PATTERN, table_data, re.I)
constraint_name = result.group(1) if result else None
cols = self.get_columns(connection, table_name, schema, **kw)
cols.sort(key=lambda col: col.get("primary_key"))
pkeys = []
for col in cols:
if col["primary_key"]:
pkeys.append(col["name"])
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list", table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
if not rcol:
# no referred column, which means it was not named in the
# original DDL. The referred columns of the foreign key
# constraint are therefore the primary key of the referred
# table.
referred_pk = self.get_pk_constraint(
connection, rtbl, schema=schema, **kw
)
# note that if table doesn't exist, we still get back a record,
# just it has no columns in it
referred_columns = referred_pk["constrained_columns"]
else:
# note we use this list only if this is the first column
# in the constraint. for subsequent columns we ignore the
# list and append "rcol" if present.
referred_columns = []
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
"name": None,
"constrained_columns": [],
"referred_schema": schema,
"referred_table": rtbl,
"referred_columns": referred_columns,
"options": {},
}
fks[numerical_id] = fk
fk["constrained_columns"].append(lcol)
if rcol:
fk["referred_columns"].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return (
tuple(constrained_columns)
+ (referred_table,)
+ tuple(referred_columns)
)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk["constrained_columns"],
fk["referred_table"],
fk["referred_columns"],
),
fk,
)
for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
r"(?:CONSTRAINT (\w+) +)?"
r"FOREIGN KEY *\( *(.+?) *\) +"
r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *'
r"((?:ON (?:DELETE|UPDATE) "
r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)"
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name,
constrained_columns,
referred_quoted_name,
referred_name,
referred_columns,
onupdatedelete,
) = match.group(1, 2, 3, 4, 5, 6)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns)
)
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns)
)
referred_name = referred_quoted_name or referred_name
options = {}
for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
if token.startswith("DELETE"):
ondelete = token[6:].strip()
if ondelete and ondelete != "NO ACTION":
options["ondelete"] = ondelete
elif token.startswith("UPDATE"):
onupdate = token[6:].strip()
if onupdate and onupdate != "NO ACTION":
options["onupdate"] = onupdate
yield (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
)
fkeys = []
for (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
) in parse_fks():
sig = fk_sig(constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (sig, table_name)
)
continue
key = keys_by_signature.pop(sig)
key["name"] = constraint_name
key["options"] = options
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection,
table_name,
schema=schema,
include_auto_indexes=True,
**kw,
):
if not idx["name"].startswith("sqlite_autoindex"):
continue
sig = tuple(idx["column_names"])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?) '
r"+[a-z0-9_ ]+? +UNIQUE"
)
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2))
)
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {"name": name, "column_names": cols}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *"
check_constraints = []
# NOTE: we aren't using re.S here because we actually are
# taking advantage of each CHECK constraint being all on one
# line in the table definition in order to delineate. This
# necessarily makes assumptions as to how the CREATE TABLE
# was emitted.
for match in re.finditer(CHECK_PATTERN, table_data, re.I):
check_constraints.append(
{"sqltext": match.group(2), "name": match.group(1)}
)
return check_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema
)
indexes = []
include_auto_indexes = kw.pop("include_auto_indexes", False)
for row in pragma_indexes:
# ignore implicit primary key index.
# https://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
if not include_auto_indexes and row[1].startswith(
"sqlite_autoindex"
):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in list(indexes):
pragma_index = self._get_table_pragma(
connection, "index_info", idx["name"]
)
for row in pragma_index:
if row[2] is None:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx["name"]
)
indexes.remove(idx)
break
else:
idx["column_names"].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
if schema:
schema_expr = "%s." % (
self.identifier_preparer.quote_identifier(schema)
)
else:
schema_expr = ""
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM %(schema)ssqlite_master UNION ALL "
" SELECT * FROM %(schema)ssqlite_temp_master) "
"WHERE name = ? "
"AND type = 'table'" % {"schema": schema_expr}
)
rs = connection.exec_driver_sql(s, (table_name,))
except exc.DBAPIError:
s = (
"SELECT sql FROM %(schema)ssqlite_master "
"WHERE name = ? "
"AND type = 'table'" % {"schema": schema_expr}
)
rs = connection.exec_driver_sql(s, (table_name,))
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statements = ["PRAGMA %s." % quote(schema)]
else:
# because PRAGMA looks in all attached databases if no schema
# given, need to specify "main" schema, however since we want
# 'temp' tables in the same namespace as 'main', need to run
# the PRAGMA twice
statements = ["PRAGMA main.", "PRAGMA temp."]
qtable = quote(table_name)
for statement in statements:
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.exec_driver_sql(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# https://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
if result:
return result
else:
return []
| lib/sqlalchemy/dialects/sqlite/base.py | 87,820 | Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
2021-03-15 12:05:57.105542
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(storage_format="%(hour)02d-%(minute)02d-"
"%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
Return a data type from a reflected column, using affinity rules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in https://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by recognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
Format the remote table clause of a CREATE CONSTRAINT clause.
return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
.. dialect:: sqlite
:name: SQLite
:full_support: 3.21, 3.28+
:normal_support: 3.12+
:best_effort: 3.7.16+
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQLite is used. The implementation classes are
:class:`_sqlite.DATETIME`, :class:`_sqlite.DATE` and :class:`_sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <https://www.sqlite.org/datatype3.html#affinity>`_ -
in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: https://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among other things, this
means that any type name which contains the substring ``"INT"`` will be
determined to be of "integer affinity". A type named ``"BIGINT"``,
``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be
of "integer" affinity. However, **the SQLite autoincrement feature, whether
implicitly or explicitly enabled, requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an application uses a type
like :class:`.BigInteger` for a primary key, on SQLite this type will need to
be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE
TABLE`` statement in order for the autoincrement behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
name to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <https://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQLite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<https://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level / Autocommit
----------------------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the
`PRAGMA read_uncommitted <https://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`_sa.create_engine.isolation_level` parameter of
:func:`_sa.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
When using the pysqlite driver, the ``"AUTOCOMMIT"`` isolation level is also
available, which will alter the pysqlite connection using the ``.isolation_level``
attribute on the DBAPI connection and set it to None for the duration
of the setting.
.. versionadded:: 1.3.16 added support for SQLite AUTOCOMMIT isolation level
when using the pysqlite / sqlite3 SQLite driver.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <https://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. seealso::
:ref:`dbapi_autocommit`
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`_engine.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transaction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use -- including the initial call to
:meth:`sqlalchemy.schema.MetaData.create_all`.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <https://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_on_conflict_ddl:
ON CONFLICT support for constraints
-----------------------------------
.. seealso:: This section describes the :term:`DDL` version of "ON CONFLICT" for
SQLite, which occurs within a CREATE TABLE statement. For "ON CONFLICT" as
applied to an INSERT statement, see :ref:`sqlite_on_conflict_insert`.
SQLite supports a non-standard DDL clause known as ON CONFLICT which can be applied
to primary key, unique, check, and not null constraints. In DDL, it is
rendered either within the "CONSTRAINT" clause or within the column definition
itself depending on the location of the target constraint. To render this
clause within DDL, the extension parameter ``sqlite_on_conflict`` can be
specified with a string conflict resolution algorithm within the
:class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`,
:class:`.CheckConstraint` objects. Within the :class:`_schema.Column` object,
there
are individual parameters ``sqlite_on_conflict_not_null``,
``sqlite_on_conflict_primary_key``, ``sqlite_on_conflict_unique`` which each
correspond to the three types of relevant constraint types that can be
indicated from a :class:`_schema.Column` object.
.. seealso::
`ON CONFLICT <https://www.sqlite.org/lang_conflict.html>`_ - in the SQLite
documentation
.. versionadded:: 1.3
The ``sqlite_on_conflict`` parameters accept a string argument which is just
the resolution name to be chosen, which on SQLite can be one of ROLLBACK,
ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint
that specifies the IGNORE algorithm::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
)
The above renders CREATE TABLE DDL as::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (id, data) ON CONFLICT IGNORE
)
When using the :paramref:`_schema.Column.unique`
flag to add a UNIQUE constraint
to a single column, the ``sqlite_on_conflict_unique`` parameter can
be added to the :class:`_schema.Column` as well, which will be added to the
UNIQUE constraint in the DDL::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, unique=True,
sqlite_on_conflict_unique='IGNORE')
)
rendering::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (data) ON CONFLICT IGNORE
)
To apply the FAIL algorithm for a NOT NULL constraint,
``sqlite_on_conflict_not_null`` is used::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, nullable=False,
sqlite_on_conflict_not_null='FAIL')
)
this renders the column inline ON CONFLICT phrase::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER NOT NULL ON CONFLICT FAIL,
PRIMARY KEY (id)
)
Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True,
sqlite_on_conflict_primary_key='FAIL')
)
SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
resolution algorithm is applied to the constraint itself::
CREATE TABLE some_table (
id INTEGER NOT NULL,
PRIMARY KEY (id) ON CONFLICT FAIL
)
.. _sqlite_on_conflict_insert:
INSERT...ON CONFLICT (Upsert)
-----------------------------------
.. seealso:: This section describes the :term:`DML` version of "ON CONFLICT" for
SQLite, which occurs within an INSERT statement. For "ON CONFLICT" as
applied to a CREATE TABLE statement, see :ref:`sqlite_on_conflict_ddl`.
From version 3.24.0 onwards, SQLite supports "upserts" (update or insert)
of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT``
statement. A candidate row will only be inserted if that row does not violate
any unique or primary key constraints. In the case of a unique constraint violation, a
secondary action can occur which can be either "DO UPDATE", indicating that
the data in the target row should be updated, or "DO NOTHING", which indicates
to silently skip this row.
Conflicts are determined using columns that are part of existing unique
constraints and indexes. These constraints are identified by stating the
columns and conditions that comprise the indexes.
SQLAlchemy provides ``ON CONFLICT`` support via the SQLite-specific
:func:`_sqlite.insert()` function, which provides
the generative methods :meth:`_sqlite.Insert.on_conflict_do_update`
and :meth:`_sqlite.Insert.on_conflict_do_nothing`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.dialects.sqlite import insert
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
>>> do_update_stmt = insert_stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?{stop}
>>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
... index_elements=['id']
... )
>>> print(do_nothing_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO NOTHING
.. versionadded:: 1.4
.. seealso::
`Upsert
<https://sqlite.org/lang_UPSERT.html>`_
- in the SQLite documentation.
Specifying the Target
^^^^^^^^^^^^^^^^^^^^^
Both methods supply the "target" of the conflict using column inference:
* The :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements` argument
specifies a sequence containing string column names, :class:`_schema.Column`
objects, and/or SQL expression elements, which would identify a unique index
or unique constraint.
* When using :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements`
to infer an index, a partial index can be inferred by also specifying the
:paramref:`_sqlite.Insert.on_conflict_do_update.index_where` parameter:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(user_email='a@b.com', data='inserted data')
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=[my_table.c.user_email],
... index_where=my_table.c.user_email.like('%@gmail.com'),
... set_=dict(data=stmt.excluded.data)
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (data, user_email) VALUES (?, ?)
ON CONFLICT (user_email)
WHERE user_email LIKE '%@gmail.com'
DO UPDATE SET data = excluded.data
>>>
The SET Clause
^^^^^^^^^^^^^^^
``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are specified using the
:paramref:`_sqlite.Insert.on_conflict_do_update.set_` parameter. This
parameter accepts a dictionary which consists of direct values
for UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?
.. warning::
The :meth:`_sqlite.Insert.on_conflict_do_update` method does **not** take
into account Python-side default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`. These
values will not be exercised for an ON CONFLICT style of UPDATE, unless
they are manually specified in the
:paramref:`_sqlite.Insert.on_conflict_do_update.set_` dictionary.
Updating using the Excluded INSERT Values
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to refer to the proposed insertion row, the special alias
:attr:`~.sqlite.Insert.excluded` is available as an attribute on
the :class:`_sqlite.Insert` object; this object creates an "excluded." prefix
on a column, that informs the DO UPDATE to update the row with the value that
would have been inserted had the constraint not failed:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... )
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author)
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
Additional WHERE Criteria
^^^^^^^^^^^^^^^^^^^^^^^^^
The :meth:`_sqlite.Insert.on_conflict_do_update` method also accepts
a WHERE clause using the :paramref:`_sqlite.Insert.on_conflict_do_update.where`
parameter, which will limit those rows which receive an UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... )
>>> on_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author),
... where=(my_table.c.status == 2)
... )
>>> print(on_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
WHERE my_table.status = ?
Skipping Rows with DO NOTHING
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``ON CONFLICT`` may be used to skip inserting a row entirely
if any conflict with a unique constraint occurs; below this is illustrated
using the :meth:`_sqlite.Insert.on_conflict_do_nothing` method:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
>>> print(stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT (id) DO NOTHING
If ``DO NOTHING`` is used without specifying any columns or constraint,
it has the effect of skipping the INSERT for any unique violation which
occurs:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing()
>>> print(stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT DO NOTHING
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
https://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`_types.BIGINT`, :class:`_types.BLOB`,
:class:`_types.BOOLEAN`, :class:`_types.BOOLEAN`,
:class:`_types.CHAR`, :class:`_types.DATE`,
:class:`_types.DATETIME`, :class:`_types.FLOAT`,
:class:`_types.DECIMAL`, :class:`_types.FLOAT`,
:class:`_types.INTEGER`, :class:`_types.INTEGER`,
:class:`_types.NUMERIC`, :class:`_types.REAL`,
:class:`_types.SMALLINT`, :class:`_types.TEXT`,
:class:`_types.TIME`, :class:`_types.TIMESTAMP`,
:class:`_types.VARCHAR`, :class:`_types.NVARCHAR`,
:class:`_types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`_types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`_types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`_types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`_types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`_types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
.. _sqlite_dotted_column_names:
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver up until version **3.10.0** of SQLite has a bug which
requires that SQLAlchemy filter out these dots in result sets.
.. versionchanged:: 1.1
The following SQLite issue has been resolved as of version 3.10.0
of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.exec_driver_sql("create table x (a integer, b integer)")
conn.exec_driver_sql("insert into x (a, b) values (1, 1)")
conn.exec_driver_sql("insert into x (a, b) values (2, 2)")
result = conn.exec_driver_sql("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.exec_driver_sql('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`_engine.CursorResult.keys` and :meth:`.Row.keys()` in the public API. In
the very specific case where an application is forced to use column names that
contain dots, and the functionality of :meth:`_engine.CursorResult.keys` and
:meth:`.Row.keys()` is required to return these dotted names unmodified,
the ``sqlite_raw_colnames`` execution option may be provided, either on a
per-:class:`_engine.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`_engine.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`_engine.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
SQLite-specific table options
-----------------------------
One option for CREATE TABLE is supported directly by the SQLite
dialect in conjunction with the :class:`_schema.Table` construct:
* ``WITHOUT ROWID``::
Table("some_table", metadata, ..., sqlite_with_rowid=False)
.. seealso::
`SQLite CREATE TABLE options
<https://www.sqlite.org/lang_createtable.html>`_
sqlite/base.py Copyright (C) 2005-2022 the SQLAlchemy authors and contributors <see AUTHORS file> This module is part of SQLAlchemy and is released under the MIT License: https://www.opensource.org/licenses/mit-license.php noqa noqa noqa noqa noqa noqa noqa noqa noqa noqa noqa noqa noqa noqa sqlite has no "FOR UPDATE" AFAICT slightly old SQLite versions don't seem to be able to handle the empty set impl create a list of column assignment clauses as tuples check for names that don't match columns for columns with sqlite_autoincrement=True, the PRIMARY KEY constraint can only be inline with the column itself. note this name provides NUMERIC affinity, not TEXT. should not be an issue unless the JSON value consists of a single numeric value. JSONTEXT can be used if this case is required. TODO: detect SQLite version 3.10.0 or greater; see [ticket:3633] adjust for dotted column names. SQLite in the case of UNION may store col names as "tablename.colname", or if using an attached database, "database.tablename.colname", in cursor.description SQlite supports "DEFAULT VALUES" but *does not* support "VALUES (DEFAULT)" this flag used by pysqlite dialect, and perhaps others in the future, to indicate the driver is handling date/timestamp conversions (and perhaps datetime/time as well on some hypothetical driver ?) NOTE: python 3.7 on fedora for me has SQLite 3.34.1. These version checks are getting very stale. https://www.sqlite.org/releaselog/3_7_11.html see https://www.sqlalchemy.org/trac/ticket/2568 as well as https://www.sqlite.org/src/info/600482d161 https://www.sqlite.org/changes.htmlversion_3_3_3 "Optional READ UNCOMMITTED isolation (instead of the default isolation level of SERIALIZABLE) and table level locking when database connections share a common cache."" pre-SQLite 3.3.0 default to 0 computed columns are threaded as hidden, they require table_xinfo hidden has value 0 for normal columns, 1 for hidden columns, 2 for computed virtual columns and 3 for computed stored columns https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)" somehow is "INTEGER GENERATED ALWAYS" sqlite makes this *extremely difficult*. First, use the pragma to get the actual FKs. no referred column, which means it was not named in the original DDL. The referred columns of the foreign key constraint are therefore the primary key of the referred table. note that if table doesn't exist, we still get back a record, just it has no columns in it note we use this list only if this is the first column in the constraint. for subsequent columns we ignore the list and append "rcol" if present. then, parse the actual SQL and attempt to find DDL that matches the names as well. SQLite saves the DDL in whatever format it was typed in as, so need to be liberal here. system tables, etc. assume the remainders are the unnamed, inline constraints, just use them as is as it's extremely difficult to parse inline constraints we need to match inlines as well, as we seek to differentiate a UNIQUE constraint from a UNIQUE INDEX, even though these are kind of the same thing :) NOTE: auto_index_by_sig might not be empty here, the PRIMARY KEY may have an entry. NOTE: we aren't using re.S here because we actually are taking advantage of each CHECK constraint being all on one line in the table definition in order to delineate. This necessarily makes assumptions as to how the CREATE TABLE was emitted. ignore implicit primary key index. https://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html loop thru unique indexes to get the column names. because PRAGMA looks in all attached databases if no schema given, need to specify "main" schema, however since we want 'temp' tables in the same namespace as 'main', need to run the PRAGMA twice work around SQLite issue whereby cursor.description is blank when PRAGMA returns no rows: https://www.sqlite.org/cvstrac/tktview?tn=1884 | 37,816 | en | 0.709143 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.