id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
/gwydion-0.1.zip/gwydion-0.1/gwydion/base.py | from abc import ABC, abstractmethod
from inspect import getfullargspec
import numpy as np
import matplotlib.pyplot as plt
from gwydion.exceptions import GwydionError
class Base(ABC):
"""
Base ABC object to be subclassed in making Gwydion classes.
Cannot be used as a class by itself, must be subclassed.
Parameters
----------
N : Integer
Length of arrays to be returned via the data method.
xlim : Tuple of floats or integers.
(Min, Max) values for the x-data.
rand : Boolean.
Choose whether the y values should have some random numbers added to them. Defaults to True.
rand_factor : Float or integer.
The amplitude of random numbers added to the y-data. If rand=False, has no use. Defaults to 0.5.
seed : Integer or None.
Used to seed the RNG if repeatable results are required. Defaults to None (and thus no seeding).
"""
def __init__(self, N, xlim, rand_factor, seed):
super().__init__()
self.N = N
self.seed = seed
try:
self.random = np.random.RandomState(self.seed)
except Exception as e:
raise GwydionError('Setting the random seed has failed.') from e
self.xlim = xlim
self.rand_factor = rand_factor if rand_factor is not None else 0
@property
def r(self):
try:
rand = self.rand_factor * (2 * self.random.rand(self.N) - 1)
except Exception as e:
raise GwydionError('Unable to create randomised data.') from e
return rand
@property
def data(self):
try:
x = np.linspace(*self.xlim, num=self.N)
except Exception as e:
raise GwydionError('Unable to create x-data.') from e
try:
y = self.func(x)
except Exception as e:
raise GwydionError('Unable to create y-data.') from e
r = self.r
return x, y + r
def plot(self, *args, ax=None, **kwargs):
x, y = self.data
if ax is None:
fig, ax = plt.subplots()
ax.plot(x, y, *args, **kwargs)
return ax
@abstractmethod
def set_variables(self, *args):
pass
@abstractmethod
def func(self):
pass
def __str__(self):
s = '<{s.__class__.__name__} : N={s.N}, rand_factor={s.rand_factor}>'
return s.format(s=self)
def __repr__(self):
v = vars(self)
spec = getfullargspec(self.__class__)
s = '{}(' + ', '.join(['{}={}'.format(key, val) for key, val in v.items() if key in spec.args]) + ')'
return s.format(self.__class__.__name__) | PypiClean |
/Mcdp-0.2.1.tar.gz/Mcdp-0.2.1/mcdp/context.py | import asyncio
import warnings
from pathlib import Path
from collections import ChainMap, UserList, defaultdict
from typing import Any, ClassVar, DefaultDict, Dict, List, Literal, Optional, Callable, Union, Type
from .typings import McdpBaseModel, McdpVar
from .config import get_config, get_version
from .aio_stream import Stream, T_Path, wraps
from .counter import get_counter
from .exceptions import McdpError, McdpValueError
class StackCache(UserList):
__slots__ = "_capacity",
def __init__(self, capacity: int = 12):
if not (1 < capacity <= 128):
raise ValueError(
f"expect the capacity ranging from 2 to 128, but get {capacity}"
)
self._capacity = capacity
super().__init__()
async def append(self, env: "Context") -> None:
super().append(env)
await env.activate()
overflow = len(self) - self._capacity
if overflow > 0:
for e in self[:overflow]:
await e.deactivate()
async def pop(self) -> "Context":
ans: "Context" = super().pop()
await ans.deactivate()
if self:
if not self[-1].writable():
await self[-1].activate(True)
return ans
async def clear(self) -> None:
for e in self:
e: "Context"
await e.deactivate()
super().clear()
class EnvMethod:
"""
Use the class as a decorator to
announce a environment method.
When called from the instance, the method works
as a normal method. And when it is called from the
class, the param 'self' will input <class>.current as
the instance.
"""
__slots__ = ["__func__", "use_async"]
def __init__(self, func: Callable):
self.__func__ = func
self.use_async = asyncio.iscoroutinefunction(func)
def __get__(self, instance: Any, owner: "Context") -> Callable:
if instance is None:
instance = owner.top
if not instance:
raise McdpContextError("invalid current context")
if self.use_async:
@wraps(self.__func__)
async def wrapper(*args, **kw):
return await self.__func__(instance, *args, **kw)
else:
@wraps(self.__func__)
def wrapper(*args, **kw):
return self.__func__(instance, *args, **kw)
return wrapper
class ContextEnv(McdpVar):
__slots__ = ["env_type"]
path: ClassVar[Path]
env_counter: DefaultDict[str, int] = defaultdict(lambda:0)
def __init__(self, env_type: str) -> None:
self.env_type = env_type
def init(self) -> None:
config = get_config()
comment(
f"Datapack {config.name} built by Mcdp.",
f"Supported Minecraft version: {config.version}({get_version(config.version)})",
)
newline()
def decorate_command(self, cmd: str) -> str:
return cmd
def creat_stream(self) -> "Context":
file_name = self.env_type + hex(self.env_counter[self.env_type])
self.env_counter[self.env_type] += 1
return Context(file_name, root_path=self.path, envs=self)
class ContextMeta(type):
"""
Metaclass of Context.
Support for the sentence of 'with' and the initialization
of Context.
"""
MAX_OPENED: int = 8
stack: StackCache
environments: list
enter: staticmethod
leave: staticmethod
def init(self, path: T_Path) -> None:
self.path = Path(path, "functions").resolve()
ContextEnv.path = self.path / "envs"
@property
def top(self) -> "Context":
if len(self.stack) < 1:
raise McdpContextError("Class 'Context' should be inited before used.")
return self.stack[-1]
async def __aenter__(self) -> "ContextMeta":
"""
Init the datapack.
"""
default_env = self("__init__", root_path=self.path, envs=ContextEnv("__init__"))
await self.stack.append(default_env)
comment("This is the initize function.")
newline(2)
self.enter()
insert("tag @e[tag=Mcdp_stack] add Mcdp_home")
TagManager("functions", namespace="minecraft")
TagManager("functions", namespace=get_namespace())
insert(f"function {get_namespace()}:__init_score__")
return self
async def __aexit__(self, exc_type, exc_ins, traceback) -> None:
await self.stack.clear()
del self.path
def __str__(self) -> str:
return f"<{self.__name__} with env {self.top.name}"
class Context(McdpVar, metaclass=ContextMeta):
"""
Set for async file IO.
"""
__slots__ = ["name", "stream", "environments"]
stack = StackCache(ContextMeta.MAX_OPENED)
file_suffix = ".mcfunction"
path: Path
top: "Context"
enter: ClassVar[staticmethod]
leave: ClassVar[staticmethod]
def __init__(
self,
name: str,
*,
root_path: Optional[T_Path] = None,
envs: Union[ContextEnv, List[ContextEnv]] = []
) -> None:
self.name = name
self.stream: Stream = Stream(name + self.file_suffix, root=root_path or self.path)
if not isinstance(envs, list):
envs = [envs,]
self.environments = envs
def write(self, content: str) -> None:
self.stream.write(content)
def writable(self) -> bool:
return self.stream.writable()
async def activate(self, append: bool = False) -> None:
if not append:
await self.stream.open()
else:
await self.stream.open("a")
for env in self.environments:
env.init()
async def deactivate(self) -> None:
await self.stream.close()
async def __aenter__(self) -> "Context":
await self.__class__.stack.append(self)
return self
async def __aexit__(self, exc_type, exc_ins, traceback) -> None:
if (await self.__class__.stack.pop()).name == "__init__":
raise McdpContextError("Cannot leave the static stack '__init__'.")
@EnvMethod
def insert(self, *content: str) -> None:
if not self.writable():
raise McdpContextError("fail to insert command.")
counter = get_counter().commands
for command in content:
+counter
if not command.endswith('\n'):
command += '\n'
if command.count('\n') > 1:
l_cmd = command.split('\n')[:-1]
for c in l_cmd:
for env in self.environments:
c = env.decorate_command(c)
self.write(c + '\n')
else:
for env in self.environments:
command = env.decorate_command(command)
self.write(command)
@EnvMethod
def comment(self, *content: str) -> None:
if not self.writable():
raise McdpContextError("fail to add comments.")
com = []
for c in content:
if '\n' in c:
lc = c.split('\n')
com.extend(lc)
else:
com.append(c)
self.write("# " + "\n# ".join(com) + '\n')
@EnvMethod
def newline(self, line: int = 1) -> None:
self.write('\n' * line)
@EnvMethod
def add_env(self, env: ContextEnv) -> None:
self.environments.append(env)
@EnvMethod
def pop_env(self) -> ContextEnv:
return self.environments.pop()
@classmethod
def enter_space(cls, name: str) -> None:
cls.path = cls.path / name
@classmethod
def exit_space(cls) -> None:
cls.path = cls.path.parent
@classmethod
def get_relative_path(cls) -> Path:
return cls.path.relative_to(Path(get_namespace(), "functions").resolve())
def __str__(self) -> str:
return f"<env {self.name} in the context>"
T_tag = Literal["blocks", "entity_types", "items", "fluids", "functions"]
class TagManager(McdpVar):
__slots__ = ["name", "type", "replace", "root_path", "cache"]
__accessible__ = ["type", "replace", "@item"]
collection: Dict[str, "TagManager"] = {}
def __init__(self, type: T_tag, *, namespace: Optional[str] = None, replace: bool = False) -> None:
self.type = type
self.replace = replace
if not namespace:
namespace = get_namespace()
self.root_path = Path(namespace, "tags", type).resolve()
self.cache = defaultdict(list)
self.name = f"{namespace}:{type}"
self.collect()
def add(self, tag: str, item: str, *, namaspace: Optional[str] = None) -> None:
if not ":" in item:
if not namaspace:
namaspace = get_namespace()
item = f"{namaspace}:{item}"
if item in self.cache[tag]:
warnings.warn(f"Try to add the tag '{tag}' twice.")
else:
self.cache[tag].append(item)
def __getitem__(self, key: str) -> List[str]:
return self.cache[key]
def __setitem__(self, key: str, item: str) -> None:
self.add(key, item)
def get_tag_data(self, tag: str, replace: bool = False) -> dict:
if not tag in self.cache:
raise McdpContextError(f"Cannot find tag {tag} in the cache.")
values = self.cache[tag]
if not replace:
replace = self.replace
return {"replace": replace, "values": values}
async def apply_tag(self, tag: str, *, replace: bool = False) -> None:
if not tag in self.cache:
raise McdpContextError(f"Tag {tag} did not defined.")
async with Stream(tag + ".json", root=self.root_path) as stream:
await stream.adump(self.get_tag_data(tag, replace))
del self.cache[tag]
def apply(self) -> List[asyncio.Task]:
tl = []
for tag in self.cache:
tl.append(asyncio.ensure_future(self.apply_tag(tag)))
return tl
def collect(self) -> None:
self.collection[self.name] = self
@classmethod
def apply_all(cls) -> asyncio.Future:
tl = []
for i in cls.collection.values():
tl.extend(i.apply())
return asyncio.gather(*tl)
def __del__(self) -> None:
if self.cache:
self.apply()
def insert(*content: str) -> None:
Context.insert(*content)
def comment(*content: str) -> None:
if get_config().pydp.add_comments:
Context.comment(*content)
def newline(line: int = 1) -> None:
if get_config().pydp.add_comments:
Context.newline(line)
def add_tag(
tag: str,
value: Optional[str] = None,
*,
namespace: Optional[str] = None,
type: T_tag = "functions"
) -> None:
if ':' in tag:
nt = tag.split(':', 2)
namespace = nt[0]
tag = nt[1]
elif not namespace:
namespace = get_namespace()
if not value:
if type == "functions":
c = Context.top
value = str(c.get_relative_path() / c.name)
else:
raise McdpError("no value input.")
m_tag: TagManager = TagManager.collection[f"{namespace}:{type}"]
m_tag.add(tag, value)
def get_namespace() -> str:
return get_config().namespace
def enter_stack_ops(func: Callable[[],None]) -> Callable:
Context.enter = staticmethod(func)
return func
def leave_stack_ops(func: Callable[[], None]) -> Callable:
Context.leave = staticmethod(func)
return func
class McdpContextError(McdpError):
__slots__ = ["context", ]
def __init__(self, *arg: str) -> None:
self.context = Context
super(McdpError, self).__init__(*arg) | PypiClean |
/CustomPipeline-0.0.3-py3-none-any.whl/rpcore/gui/render_mode_selector.py | from __future__ import division
from functools import partial
from panda3d.core import Vec3
from rplibs.yaml import load_yaml_file
from rpcore.native import NATIVE_CXX_LOADED
from rpcore.gui.draggable_window import DraggableWindow
from rpcore.gui.labeled_checkbox import LabeledCheckbox
from rpcore.gui.checkbox_collection import CheckboxCollection
class RenderModeSelector(DraggableWindow):
""" Window which offers the user to select a render mode to apply """
def __init__(self, pipeline, parent):
DraggableWindow.__init__(self, width=690, height=340, parent=parent,
title="Select render mode")
self._pipeline = pipeline
self._selected_mode = ""
self._create_components()
self.hide()
def _create_components(self):
""" Internal method to init the components """
DraggableWindow._create_components(self)
self._content_node = self._node.attach_new_node("content")
self._populate_content()
def _populate_content(self):
""" Populates the windows content """
self._content_node.node().remove_all_children()
# Reload config each time the window is opened so its easy to add new
# render modes
config = load_yaml_file("/$$rpconfig/debugging.yaml")
debugger_content = self._content_node.attach_new_node("RenderModes")
debugger_content.set_z(-20)
debugger_content.set_x(20)
render_modes = [("Default", "", False, "", False)]
# Read modes from configuration
for mode in config["render_modes"]:
data = [mode["name"], mode["key"]]
data.append(mode.get("cxx_only", False))
data.append(mode.get("requires", ""))
data.append(mode.get("special", False))
render_modes.append(data)
collection = CheckboxCollection()
max_column_height = 9
for idx, (mode, mode_id, requires_cxx, requires_plugin, special) in enumerate(render_modes):
offs_y = (idx % max_column_height) * 24 + 35
offs_x = (idx // max_column_height) * 220
enabled = True
if requires_cxx and not NATIVE_CXX_LOADED:
enabled = False
if requires_plugin:
if not self._pipeline.plugin_mgr.is_plugin_enabled(requires_plugin):
enabled = False
box = LabeledCheckbox(
parent=debugger_content, x=offs_x, y=offs_y, text=mode.upper(),
text_color=Vec3(0.4), radio=True, chb_checked=(mode_id == self._selected_mode),
chb_callback=partial(self._set_render_mode, mode_id, special),
text_size=14, expand_width=230, enabled=enabled)
collection.add(box.checkbox)
def _set_render_mode(self, mode_id, special, value):
""" Callback which gets called when a render mode got selected """
if not value:
return
to_remove = []
for define in self._pipeline.stage_mgr.defines:
if define.startswith("_RM_"):
to_remove.append(define)
for define in to_remove:
del self._pipeline.stage_mgr.defines[define]
if mode_id == "":
self._pipeline.stage_mgr.defines["ANY_DEBUG_MODE"] = 0
else:
# Don't activate the generic debugging mode for special modes. This
# is for modes like luminance which expect the scene to be rendered
# unaltered.
self._pipeline.stage_mgr.defines["ANY_DEBUG_MODE"] = 0 if special else 1
self._pipeline.stage_mgr.defines["_RM_" + mode_id] = 1
self._selected_mode = mode_id
self._pipeline.reload_shaders()
def toggle(self):
""" Toggles the visibility of this windows """
if self._visible:
self.hide()
else:
self._populate_content()
self.show() | PypiClean |
/MParT-2.0.2.tar.gz/MParT-2.0.2/docs/source/api/templateconcepts.rst | ==================
Template Concepts
==================
Many of the lower-level classes in MParT are templated to allow for generic implementations. Using templates instead of other programming techniques, like virtual inheritance, makes it simpler to copy these classes to/from a GPU and can sometimes even result in more efficient CPU code. For example, the :code:`MonotoneComponent`` class, which uses a generic function :math:`f(x)` to define a monotone function :math:`T_d(x)`, is templated on the type of the :math:`f` function. It is therefore possible to construct a monotone function from any class defining :math:`f(x)`, as long as the class contains the functions (i.e., the interface) expected by :code:`MonotoneComponent`. In the language of generic programming, the necessary interface is a specific `concept <https://en.wikipedia.org/wiki/Concept_(generic_programming)>`_.
.. topic:: Concept
A concept is a set of requirements defining the interface expected by a templated function or class.
Specific concepts used throughout MParT can be found on the following pages.
.. toctree::
concepts/cachedparameterization
concepts/parameterizedfunction
| PypiClean |
/Hopsworks_Integration-0.0.2-py3-none-any.whl/src/service/SimpleFeatureService.py | import json
import logging
import pandas as pd
from sqlalchemy.sql import text
from pathlib import Path
import sys
import os
parent_dir = os.path.dirname(os.getcwd())
sys.path.insert(0,parent_dir)
from src.service import FeatureStoreService as fss, ComplexFeatureService as cfs
from src.database import DbTables
logger = logging.getLogger('logger')
class SimpleFeatureService:
def __init__(self, config):
self.conf = config
with open(self.conf, "r") as jsonfile:
data = json.load(jsonfile)
def get_group(self, group_name: str, version: int):
try:
fss_obj = fss.FeatureStoreService(self.conf)
fg = fss_obj.get_group(group_name, version)
return fg
except Exception as e:
logger.error(e)
def get_group_features(self, group_name: str, version: int, features):
"""
:param group_name: Name of the feature group required to be fetched
:param version: Version of the feature group required to be fetched
:param features: Comma-separated list of features of the feature group required to be fetched
:return: returns dataframe of required features
"""
try:
fss_obj = fss.FeatureStoreService(self.conf)
fg = fss_obj.get_group(group_name, version)
query = fg.select(features)
df = query.read()
print(df.head())
return df
except Exception as e:
logger.error(e)
def build_group_from_db(self, group_name: str, version: int, description: str, table_name: str, primary_key: [],
partition_key: [], features: [], derived_features: []):
"""
:param group_name: Name of the feature group to be built
:param version: Version of the feature group to be built
:param description: Short description of the feature group to be built
:param table_name: Name of the table name which would be used to create the feature group
:param primary_key: List of columns that would be designated as prime attributes in the feature group
:param partition_key: List of columns that would be designated as partition key in the feature group
:param features: List of table columns that would be used as features in the feature group
:param derived_features: List of derived columns from the table that would be used as features in the feature group
:return: returns nothing
"""
try:
dbtables_obj = DbTables.DbTables(self.conf)
conn = dbtables_obj.get_db_conn()
logger.info('Getting table details and data from the database')
query = text('SELECT ' + ','.join(features + derived_features).strip(',') + ' FROM ' + table_name)
df = dbtables_obj.run_query(conn, query)
fss_obj = fss.FeatureStoreService(self.conf)
fss_obj.build_group(group_name, version, description, primary_key, partition_key, features, df)
except Exception as e:
logger.error(e)
finally:
dbtables_obj.close_db_conn(conn)
def build_group_from_view(self, group_name: str, group_version: int, description: str, view_name: str,
view_version: int, features: [],
primary_key: [], partition_key: []):
try:
logger.info('Getting details from the feature view')
fss_obj = fss.FeatureStoreService(self.conf)
fv = fss_obj.get_view(view_name, view_version)
df = fv.query.read()
df_filtered = df[[x.lower() for x in features]]
fss_obj.build_group(group_name, group_version, description, primary_key, partition_key, df_filtered.columns.tolist(),
df_filtered)
except Exception as e:
logger.error(e)
finally:
pass
def build_group_from_file(self, group_name: str, group_version: int, description: str, file_path: str,
file_name: str, sep: str, index: str, header: str, quote_char: str, escape_char: str,
primary_key: [], partition_key: [], features: []):
try:
logger.info('Getting details from the file')
fss_obj = fss.FeatureStoreService(self.conf)
df = pd.read_csv(file_path + file_name, sep=sep, index_col=eval(index), header=header, quotechar=quote_char,
escapechar=escape_char)
df_filtered = df[[x.lower() for x in features]]
fss_obj.build_group(group_name, group_version, description, primary_key, partition_key, df_filtered.columns.tolist(),
df_filtered)
except Exception as e:
logger.error(e)
finally:
pass
def export_group_to_file(self, group_name: str, version: int, file_path: str, file_name: str):
try:
fss_obj = fss.FeatureStoreService(self.conf)
fg = fss_obj.get_group(group_name, version)
query = fg.select_all()
df = query.read()
out_file = file_name
out_dir = Path(file_path)
out_dir.mkdir(parents=True, exist_ok=True)
df.to_csv(out_dir / out_file, sep='|', index=False, header=True, quotechar='"', escapechar="\"")
return fg
except Exception as e:
logger.error(e)
def drop_group(self, group_name: str, version: int):
try:
fss_obj = fss.FeatureStoreService(self.conf)
fss_obj.drop_group(group_name, version)
except Exception as e:
logger.error(e)
def get_view(self, view_name: str, version: int):
try:
fss_obj = fss.FeatureStoreService(self.conf)
fv = fss_obj.get_view(view_name, version)
return fv
except Exception as e:
logger.error(e)
def get_view_features(self, view_name: str, version, features):
try:
fss_obj = fss.FeatureStoreService(self.conf)
fv = fss_obj.get_view(view_name, version)
df = fv.query.read()
df_filtered = df[features]
print(df_filtered.head())
return df_filtered
except Exception as e:
logger.error(e)
def build_view(self, view_name, view_version, description, view_json):
try:
cfs_obj = cfs.ComplexFeatureService(self.conf)
if (view_name == 'view_name1'):
##TODO: Implement logic for view 1
pass
elif (view_name == 'view_name2'):
##TODO: Implement logic for view 2
pass
elif (view_name == 'view_name3'):
##TODO: Implement logic for view 3
pass
else:
logger.error('Feature view function not found. Please implement the function in '
'ComplexFeatureService class.')
except Exception as e:
logger.error(e)
finally:
pass
def export_view_to_file(self, view_name: str, version: int, file_path: str, file_name: str):
try:
fss_obj = fss.FeatureStoreService(self.conf)
fv = fss_obj.get_view(view_name, version)
df = fv.query.read()
out_file = file_name
out_dir = Path(file_path)
out_dir.mkdir(parents=True, exist_ok=True)
df.to_csv(out_dir / out_file, sep='|', index=False, header=True, quotechar='"', escapechar="\"")
return fv
except Exception as e:
logger.error(e)
def drop_view(self, view_name: str, version: int):
try:
fss_obj = fss.FeatureStoreService(self.conf)
fss_obj.drop_view(view_name, version)
except Exception as e:
logger.error(e) | PypiClean |
/Adafruit_Blinka-8.20.1-py3-none-any.whl/adafruit_blinka/microcontroller/tegra/t234/pin.py |
"""Tegra T234 pin names"""
import atexit
from Jetson import GPIO
GPIO.setmode(GPIO.TEGRA_SOC)
GPIO.setwarnings(False) # shh!
class Pin:
"""Pins dont exist in CPython so...lets make our own!"""
IN = 0
OUT = 1
LOW = 0
HIGH = 1
PULL_NONE = 0
PULL_UP = 1
PULL_DOWN = 2
id = None
_value = LOW
_mode = IN
def __init__(self, bcm_number):
self.id = bcm_number
def __repr__(self):
return str(self.id)
def __eq__(self, other):
return self.id == other
def init(self, mode=IN, pull=None):
"""Initialize the Pin"""
if mode is not None:
if mode == self.IN:
self._mode = self.IN
GPIO.setup(self.id, GPIO.IN)
elif mode == self.OUT:
self._mode = self.OUT
GPIO.setup(self.id, GPIO.OUT)
else:
raise RuntimeError("Invalid mode for pin: %s" % self.id)
if pull is not None:
if self._mode != self.IN:
raise RuntimeError("Cannot set pull resistor on output")
if pull == self.PULL_UP:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_UP)
elif pull == self.PULL_DOWN:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
else:
raise RuntimeError("Invalid pull for pin: %s" % self.id)
def value(self, val=None):
"""Set or return the Pin Value"""
if val is not None:
if val == self.LOW:
self._value = val
GPIO.output(self.id, val)
return None
if val == self.HIGH:
self._value = val
GPIO.output(self.id, val)
return None
raise RuntimeError("Invalid value for pin")
return GPIO.input(self.id)
# pylint: disable=no-method-argument
@atexit.register
def cleanup():
"""Clean up pins"""
print("Exiting... \nCleaning up pins")
GPIO.cleanup()
# pylint: enable=no-method-argument
# Cannot be used as GPIO
SDA = Pin("GP16_I2C8_DAT") # I2C4
SCL = Pin("GP81_I2C9_CLK")
SDA_1 = Pin("GP14_I2C2_DAT") # I2C2
SCL_1 = Pin("GP13_I2C2_CLK")
# Jetson AGX Orin
Q06 = Pin("GP66")
R04 = Pin("GP72_UART1_RTS_N")
H07 = Pin("GP122")
R00 = Pin("GP68")
N01 = Pin("GP88_PWM1")
BB00 = Pin("GP25")
H00 = Pin("GP115")
Z05 = Pin("GP49_SPI1_MOSI")
Z04 = Pin("GP48_SPI1_MISO")
P04 = Pin("GP56")
Z03 = Pin("GP47_SPI1_CLK")
Z06 = Pin("GP50_SPI1_CS0_N")
Z07 = Pin("GP51_SPI1_CS1_N")
AA01 = Pin("GP18_CAN0_DIN")
AA00 = Pin("GP17_CAN0_DOUT")
BB01 = Pin("GP26")
AA02 = Pin("GP19_CAN1_DOUT")
I02 = Pin("GP125")
R05 = Pin("GP73_UART1_CTS_N")
AA03 = Pin("GP20_CAN1_DIN")
I01 = Pin("GP124")
I00 = Pin("GP123")
AC06 = Pin("GP167")
Y00 = Pin("SPI1_SCK")
N01 = Pin("GP88_PWM1")
Y04 = Pin("GP40_SPI3_CS1_N")
Y03 = Pin("GP39_SPI3_CS0_N")
Y01 = Pin("GP37_SPI3_MISO")
Q05 = Pin("GP65")
G06 = Pin("GP113_PWM7")
Y02 = Pin("GP38_SPI3_MOSI")
i2cPorts = (
(7, SCL, SDA),
(1, SCL_1, SDA_1),
)
# ordered as spiId, sckId, mosiId, misoId
spiPorts = ((0, Z03, Z05, Z04),) | PypiClean |
/CaMo-0.0.5-py3-none-any.whl/camo/discover/ica_lingam.py | from itertools import permutations
from typing import Optional
import numpy as np
import pandas as pd
from scipy.optimize import linear_sum_assignment as hungarian
from sklearn.decomposition import FastICA
from sklearn.linear_model import LassoLarsIC, LinearRegression
from ..structure import LinearNonGaussianSCM
class ICALiNGAM:
# Define Adaptive Lasso regression.
def _apply_adaptive_lasso(self, data, target, regressors, gamma=1.0):
target, regressors = data.iloc[:, target], data.iloc[:, regressors]
w = LinearRegression().fit(regressors, target).coef_
w = LassoLarsIC(criterion="bic").fit(
regressors * np.power(np.abs(w), gamma),
target
).coef_ * w
return w
def fit(self, data: pd.DataFrame, seed: Optional[int] = None):
# Given a d-dimensional random vector x and its (d,n) observed data matrix X,
# apply an ICA algorithm to obtain an estimate of A.
d = len(data.columns)
B = FastICA(random_state=seed).fit(data).components_
# Find the unique permutation of the rows of W = A^-1 that yields a matrix W'
# without any zeros on the main diagonal. The permutation is sought by minimizing
# sum_i (1/|W'_ii|). This minimization problem is the classical linear assignment
# problem, and here the Hungarian algorithm (Kuhn, 1955) is used.
_, K = hungarian(1 / np.abs(B))
B = B.take(K, 0)
# Divide each row of W' by its corresponding diagonal element in order to
# yield a new matrix W'' with a diagonal consisting entirely of 1s.
B /= B.diagonal()[..., None]
# Compute an estimate B' of B by using B' = I - W''.
B = np.identity(d) - B
# Finally, to estimate a causal order k(i), determine the permutation matrix
# K of B', obtaining the matrix B' = PB'K^T that is as close as possible
# to having a strictly lower triangular structure.
K = None
if d < 8:
# For a small number of variables, i.e., fewer than 8, the lower triangularity
# of B' can be measured by using the sum of squared bij in its upper triangular
# section sum_i<=j (b'_ij^2). In addition, an exhaustive search over all possible
# permutations is feasible and is hence performed.
vmin = np.inf
for p in permutations(range(d)):
score = np.sum(np.square(np.triu(B.take(p, 0))))
if score < vmin:
vmin = score
K = p
K = np.array(K)
else:
# For higher-dimensional data, the following approximate algorithm is used,
# which sets small absolute valued elements in B' to zero, and whereby it can be
# determined whether it is possible to permute the resulting matrix to become
# strictly lower triangular:
# (a) Set the d(d+1)/2 smallest (in absolute value) elements of B' to zero.
i = round(d*(d+1)/2)
pmin = np.argsort(np.abs(np.ravel(B)))
B.flat[pmin[:i]] = 0
# (b) Repeat
while K is None:
# i. Determine whether B' can be permuted to become strictly lower triangular.
# If this is possible, stop and return the permuted B'.
K, A, L = np.zeros(d, int), np.arange(d), B
while len(A) > 0:
# Find a row where all elements are zero, if any.
j = np.where(np.sum(np.abs(L), axis=1) == 0)
# If there is no row with zero elements, exit.
if len(j[0]) == 0:
K = None
break
# Select the first row with zero elements.
j = j[0][0]
# Add original index to permutation matrix.
K[d-len(A)] = A[j]
A = np.delete(A, j)
# Remove selected row and columns.
mask = np.delete(np.arange(len(L)), j)
L = L[mask][:, mask]
# ii. In addition, set the next smallest (in absolute value) element of Bb to zero.
if K is None:
B.flat[pmin[i]] = 0
i += 1
return K
def fit_transform(self, data: pd.DataFrame, seed: Optional[int] = None):
return self.transform(data, self.fit(data, seed))
def transform(self, data: pd.DataFrame, K):
# Estimate B applying Adaptive Lasso over the causal order K incrementally.
d = len(K)
B = np.zeros((d, d))
for i in range(1, d):
B[K[i], K[:i]] = self._apply_adaptive_lasso(data, K[i], K[:i])
# Workaround to remove subnormal numbers.
EPS = np.finfo(float).eps
B[B < EPS] = 0
return LinearNonGaussianSCM(data.columns, B) | PypiClean |
/Nosyd-0.0.5.tar.gz/Nosyd-0.0.5/README | -------
Summary
-------
Nosyd is a _minimalist_ personal command line friendly CI server. It is primarily meant to run on your developer machine.
Nosyd tracks multiple projects and automatically runs your build whenever one of the monitored files of the monitored projects has changed.
------------
How it works
------------
Nosyd is an auto-testing tool, also called personnal Continuous Integration server. It is a daemonization of Jeff Wrinkler's original nosy script [1].
Nosyd sits in the background and can monitor multiple projects. This means you only need one instance of nosyd on your desktop.
Today nosyd rebuilds projects if one of the files it monitors has changed. After it has built the project, nosyd tries to notify you
of the build results using Desktop notifications. When it can, nosyd uses information from the build to report accurate information (number of failed tests, etc).
Nosyd then goes back waiting for a project to be rebuilt.
Nosyd version has a command line interface, configuration files, supports multiple builders and desktop notifiers notifiers.
--------
Features
--------
* automatically run your build whenever one of the monitored files of the monitored projects has changed
* filesets to select files monitored
* support multiple builders (nosetests, maven2, trial, django)
* notify using Gnome's notification library on Linux, Growl on Mac OS X
* per project configuration to override defaults
* command line interface to add/remove/list jobs
* persists daemon configuration using files and links
* logging
-----
Usage
-----
* install in path and run nosyd in a terminal to start the daemon. The terminal should be kept open, you will see the build output there
* optionally create a ~/.nosyd/config to override some of the configuration
* optionaly create a .nosy file in your project's directory. If that file changes, the file will be reloaded at next build.
* add/remove the monitored project by using --add/--remove [path] options
** you can also do it manually by adding/removing symlinks into the .nosyd/jobs/ directory
ln -s /path/to/your/project ~/.nosyd/jobs/
------
Layout
------
~/.nosyd/config optional main nosyd configuration
~/.nosyd/stop temporary file created to indicate nosyd should stop
~/.nosyd/jobs/a_link link to a directory on your file system representing a monitored project
/path/to/your/project/.nosy optional project specific configuration for nosyd
--------------
Configurations
--------------
Default values for the various configuration files:
~/.nosyd/config
[nosyd]
#logging=warning
#check_period=1
/path/to/your/project/.nosy
[nosy]
#type=default
#monitor_paths (project specific)
#logging=warning (for the project and associated builder and notifier)
#check_period (when ran standalone, i.e. with --local)
----
Help
----
Usage: nosyd [options]
Options:
--version show program's version number and exit
-h, --help show this help message and exit
-a, --add Start monitoring the specified or current directory
-r, --remove Stop monitoring the specified or current directory
-l, --list List the monitored projects
-c, --clean Clean the projects nosyd can't track anymore (links point to
nowhere)
-1, --local Run the standalone nosyd on the specified or current directory
-s, --stop Stops the running server, if any
Default behavior:
Start nosyd
Comments & bugs to <jerome.lacoste@gmail.com>
-----
Links
-----
[1] http://jeffwinkler.net/2006/04/27/keeping-your-nose-green/
[2] http://douglatornell.ca/software/python/Nosy-1.0.tar.gz | PypiClean |
/FEV_KEGG-1.1.4.tar.gz/FEV_KEGG-1.1.4/FEV_KEGG/Experiments/19.py | from FEV_KEGG.KEGG.File import cache
import FEV_KEGG.KEGG.Organism
from FEV_KEGG.Statistics.Percent import getPercentSentence
@cache(folder_path = 'experiments/19', file_name = 'enterobacteriales_SubstanceEcGraph')
def enterobacterialesEcGraph():
#- Create a group of example organisms of Order Enterobacteriales.
enterobacteriales_organisms_abbreviations = ['eco', 'ses', 'sfl', 'ent', 'esa', 'kpn', 'cko', 'ype', 'spe', 'buc']
enterobacteriales_organisms = FEV_KEGG.KEGG.Organism.Group(organismAbbreviations = enterobacteriales_organisms_abbreviations)
return enterobacteriales_organisms.consensusEcGraph()
@cache(folder_path = 'experiments/19', file_name = 'gammaproteobacteria_SubstanceEcGraph')
def gammaproteobacteriaEcGraph():
#- Create a group of example organisms of Class Gammaproteobacteria, including the same organsims as the group of Enterobacteriales.
enterobacteriales_organisms_abbreviations = ['eco', 'ses', 'sfl', 'ent', 'esa', 'kpn', 'cko', 'ype', 'spe', 'buc']
gammaproteobacteria_organisms_abbreviations = ['hin', 'mht', 'xcc', 'vch', 'pae', 'acb', 'son', 'pha', 'amc', 'lpn', 'ftu', 'aha']
gammaproteobacteria_organisms_abbreviations.extend(enterobacteriales_organisms_abbreviations) # extend with the sub-set, because they are also part of the set
gammaproteobacteria_organisms = FEV_KEGG.KEGG.Organism.Group(organismAbbreviations = gammaproteobacteria_organisms_abbreviations)
return gammaproteobacteria_organisms.consensusEcGraph()
if __name__ == '__main__':
#- Calculate consensus substance-ec graphs for both groups. Leaving only EC numbers which occur in all organisms of the group.
enterobacteriales_EC_graph = enterobacterialesEcGraph()
gammaproteobacteria_EC_graph = gammaproteobacteriaEcGraph()
#- Calculate the difference of the two sets of consensus EC numbers, leaving only the EC numbers which occur in Enterobacteriales consensus, but not in Gammaproteobacteria consensus.
enterobacteriales_EC_set = enterobacteriales_EC_graph.getECs()
gammaproteobacteria_EC_set = gammaproteobacteria_EC_graph.getECs()
only_enterobacteriales_EC_set = enterobacteriales_EC_set.difference(gammaproteobacteria_EC_set)
#- Print these EC numbers and their percentage of all EC numbers in Enterobacteriales, ie. how many of the EC numbers in Enterobacteriales do not exist in Gammaproteobacteria consensus.
output = []
for ec in only_enterobacteriales_EC_set:
output.append(ec.__str__())
output.sort()
print(str(len(output)) + ' results')
for line in output:
print(line)
print( getPercentSentence(len(only_enterobacteriales_EC_set), len(enterobacteriales_EC_set)) + ' of EC numbers in Enterobacteriales are new, compared to Gammaproteobacteria consensus' ) | PypiClean |
/Impression-CMS-0.2.0.tar.gz/Impression-CMS-0.2.0/impression/themes/admin/static/js/plugins/flot/jquery.flot.resize.js | * jQuery resize event - v1.1 - 3/14/2010
* http://benalman.com/projects/jquery-resize-plugin/
*
* Copyright (c) 2010 "Cowboy" Ben Alman
* Dual licensed under the MIT and GPL licenses.
* http://benalman.com/about/license/
*/
(function($,h,c){var a=$([]),e=$.resize=$.extend($.resize,{}),i,k="setTimeout",j="resize",d=j+"-special-event",b="delay",f="throttleWindow";e[b]=250;e[f]=true;$.event.special[j]={setup:function(){if(!e[f]&&this[k]){return false}var l=$(this);a=a.add(l);$.data(this,d,{w:l.width(),h:l.height()});if(a.length===1){g()}},teardown:function(){if(!e[f]&&this[k]){return false}var l=$(this);a=a.not(l);l.removeData(d);if(!a.length){clearTimeout(i)}},add:function(l){if(!e[f]&&this[k]){return false}var n;function m(s,o,p){var q=$(this),r=$.data(this,d);r.w=o!==c?o:q.width();r.h=p!==c?p:q.height();n.apply(this,arguments)}if($.isFunction(l)){n=l;return m}else{n=l.handler;l.handler=m}}};function g(){i=h[k](function(){a.each(function(){var n=$(this),m=n.width(),l=n.height(),o=$.data(this,d);if(m!==o.w||l!==o.h){n.trigger(j,[o.w=m,o.h=l])}});g()},e[b])}})(jQuery,this);
(function ($) {
var options = { }; // no options
function init(plot) {
function onResize() {
var placeholder = plot.getPlaceholder();
// somebody might have hidden us and we can't plot
// when we don't have the dimensions
if (placeholder.width() == 0 || placeholder.height() == 0)
return;
plot.resize();
plot.setupGrid();
plot.draw();
}
function bindEvents(plot, eventHolder) {
plot.getPlaceholder().resize(onResize);
}
function shutdown(plot, eventHolder) {
plot.getPlaceholder().unbind("resize", onResize);
}
plot.hooks.bindEvents.push(bindEvents);
plot.hooks.shutdown.push(shutdown);
}
$.plot.plugins.push({
init: init,
options: options,
name: 'resize',
version: '1.0'
});
})(jQuery); | PypiClean |
/Ngoto-0.0.39-py3-none-any.whl/ngoto/core/util/task_controller.py | from ngoto.core.util.interface import show_tasks, clear_screen
class TaskController:
tasks = []
tasks_running = []
logger = None
def add_task(self, task) -> None:
self.tasks.append(task)
def enable_task(self, task_id: str, logger) -> None:
""" Enable task """
for task in self.tasks:
if task.id == task_id or task_id == 'all':
task.active = True
logger.info('Enabled task: ' + task.id)
logger.info('Task not found: ' + task_id)
def disable_task(self, task_id: str, logger) -> None:
""" Disable task """
for task in self.tasks:
if task.id == task_id or task_id == 'all':
task.active = False
logger.info('Disabled task: ' + task.id)
logger.info('Task not found: ' + task_id)
def set_delay(self, task_id: str, delay: int, logger) -> None:
for task in self.tasks:
if task.id == task_id or task_id == 'all':
task.delay = delay
logger.info(
'Set delay of: ' + task.id + ' to ' + str(delay))
logger.info('Task not found: ' + task_id)
@staticmethod
def update_task(self, task, curr_time, os: str) -> bool:
""" Check if task should be run """
if ((curr_time - task.last_run) > task.delay and
task.active and os in task.os and (
curr_time - task.last_run) > task.delay):
return True
return False
def check_available_tasks(self, executor, curr_time, os: str) -> None:
for task in self.tasks:
if self.update_task(self, task, curr_time, os):
tmp_task = executor.submit(task)
tmp_task.name = task.name
self.tasks_running.append(tmp_task)
task.iteration += 1
task.last_run = curr_time
def check_running_tasks(self, logger):
for task in self.tasks_running:
if task.done():
logger.info(task.result(), program=task.name)
self.tasks_running.remove(task)
def run_command(self, options: list, os: str, logger):
if len(options) == 4 and options[1] == 'delay':
self.set_delay(options[2], int(options[3]), logger)
elif len(options) == 3 and options[1] in ['e', 'enable']:
self.enable_task(options[2], logger)
elif len(options) == 3 and options[1] in ['d', 'disable']:
self.disable_task(options[2], logger)
else: # show tasks status
try:
clear_screen()
show_tasks(self.tasks, os)
except Exception as e:
print(e) | PypiClean |
/src/models/trainer.py | import os
import logging
logging.getLogger().setLevel(logging.INFO)
import numpy as np
import time
import shutil
from argparse import ArgumentParser
from pathlib import Path
from tensorboardX import SummaryWriter
import torch
from src.models.models import PretrainedModel, AdapterModel
from src.models.optimization import AdamW, WarmupLinearSchedule
logger = logging.getLogger(__name__)
import collections
import inspect
import math
import random
import re
import sys
import warnings
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import transformers
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
from transformers.integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
is_sigopt_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data import DataLoader, Dataset, IterableDataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from huggingface_hub import Repository
from transformers import __version__
from transformers.configuration_utils import PretrainedConfig
from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from transformers.debug_utils import DebugOption, DebugUnderflowOverflow
from transformers.deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
from transformers.dependency_versions_check import dep_version_check
from transformers.file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
get_full_repo_name,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
)
from transformers.modelcard import TrainingSummary
from transformers.modeling_utils import PreTrainedModel, unwrap_model
from transformers.models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
from transformers.optimization import Adafactor, AdamW, get_scheduler
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
HubStrategy,
IntervalStrategy,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
number_of_arguments,
set_seed,
speed_metrics,
)
from transformers.training_args import ParallelMode, TrainingArguments
from transformers.utils import logging
_is_torch_generator_available = False
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from transformers.utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_torch_generator_available = True
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from transformers.trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
# Name of the files used for checkpointing
TRAINING_ARGS_NAME = "training_args.bin"
TRAINER_STATE_NAME = "trainer_state.json"
OPTIMIZER_NAME = "optimizer.pt"
SCHEDULER_NAME = "scheduler.pt"
SCALER_NAME = "scaler.pt"
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.Dataset` or :obj:`torch.utils.data.IterableDataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
Note that if it's a :obj:`torch.utils.data.IterableDataset` with some randomization and you are training in
a distributed fashion, your iterable dataset should either use a internal attribute :obj:`generator` that
is a :obj:`torch.Generator` for the randomization that must be identical on all processes (and the Trainer
will manually set the seed of this :obj:`generator` at each epoch) or have a :obj:`set_epoch()` method that
internally sets the seed of the RNGs used.
eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
be able to choose different architectures according to hyper parameters (such as layer count, sizes of
inner layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
stable_dataset: Optional[Dataset] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# set the correct log level depending on the node
log_level = args.get_process_log_level()
logging.set_verbosity(log_level)
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full fp16 eval - since the model needs to be half'ed first
# 4. Sharded DDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.stable_dataset = stable_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
self._move_model_to_device(model, args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument. "
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create clone of distant repo and output directory if needed
if self.args.push_to_hub:
self.init_git_repo()
# In case of pull, we need to make sure every process has the latest.
if is_torch_tpu_available():
xm.rendezvous("init git repo")
elif args.local_rank != -1:
dist.barrier()
if self.args.should_save:
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
if is_sagemaker_mp_enabled():
self.scaler = smp.amp.GradScaler()
elif self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if is_sagemaker_mp_enabled() and self.use_amp and args.max_grad_norm is not None and args.max_grad_norm > 0:
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _move_model_to_device(self, model, device):
model = model.to(device)
# Moving a model to an XLA device disconnects the tied weights, so we have to retie them.
if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"):
model.tie_weights()
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if not isinstance(self.train_dataset, collections.abc.Sized):
return None
generator = None
if self.args.world_size <= 1 and _is_torch_generator_available:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.args.train_batch_size,
dataset=self.train_dataset,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.args.train_batch_size,
dataset=self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=self.args.seed,
)
else:
if self.args.world_size <= 1:
if _is_torch_generator_available:
return RandomSampler(self.train_dataset, generator=generator)
return RandomSampler(self.train_dataset)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
if isinstance(train_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self.args.train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
if isinstance(test_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
and/or :obj:`create_scheduler`) in a subclass.
"""
self.create_optimizer()
self.create_scheduler(num_training_steps=num_training_steps, optimizer=self.optimizer)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
# print("*"*20,self.args.learning_rate)
# for n, p in self.model.named_parameters():
# if "ex" in n:
# print(n)
# input()
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters and "ex" not in n],
"weight_decay": self.args.weight_decay,
'lr': 0,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters and "ex" not in n],
"weight_decay": 0.0,
'lr': 0,
},
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters and "ex" in n],
"weight_decay": self.args.weight_decay,
'lr': self.args.learning_rate
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters and "ex" in n],
"weight_decay": 0.0,
'lr': self.args.learning_rate
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
"""
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
passed as an argument.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return self.lr_scheduler
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
elif self.hp_search_backend == HPSearchBackend.SIGOPT:
params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()}
for key, value in params.items():
if not hasattr(self.args, key):
logger.warn(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
continue
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
if self.hp_search_backend == HPSearchBackend.SIGOPT:
logger.info(f"SigOpt Assignments: {trial.assignments}")
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.deepspeed import HfDeepSpeedConfig
self.args.hf_deepspeed_config = HfDeepSpeedConfig(self.args)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
def call_model_init(self, trial=None):
model_init_argcount = number_of_arguments(self.model_init)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_dp_enabled():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not model.is_gradient_checkpointing
else:
find_unused_parameters = True
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None,
output_device=self.args.local_rank if self.args._n_gpu != 0 else None,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (:obj:`List[str]`, `optional`)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
resume_from_checkpoint = None if not resume_from_checkpoint else resume_from_checkpoint
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if args.fp16_full_eval and not args.do_train:
self._move_model_to_device(self.model, args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}).")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warn(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if args.deepspeed:
# will be resumed in deepspeed_init
pass
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
# release memory
del state_dict
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = len(self.train_dataset) * args.num_train_epochs
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = args.max_steps
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_train_samples = args.max_steps * total_train_batch_size
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP (torch.distributed.launch)."
)
else:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
self.model.gradient_checkpointing_enable()
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
num_examples = (
self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
if trial is not None:
assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial
self.state.trial_params = hp_params(assignments)
else:
self.state.trial_params = None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
# step = -1
for step, inputs in enumerate(epoch_iterator):
inputs = {k: v.to('cuda') for k, v in inputs.items()}
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss_step = self.training_step(model, inputs)
else:
tr_loss_step = self.training_step(model, inputs)
if (
args.logging_nan_inf_filter
and not is_torch_tpu_available()
and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
):
# if loss is nan or inf simply add the average of previous logged losses
tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
else:
tr_loss += tr_loss_step
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (self.state.global_step) % args.eval_steps == 0:
metrics = self.evaluate(eval_dataset = self.eval_dataset,ignore_keys=ignore_keys_for_eval)
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
if step < 0:
logger.warning(
f"There seems to be not a single sample in your epoch_iterator, stopping training at step"
f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
f" num_steps ({max_steps}) higher than the number of available samples."
)
self.control.should_training_stop = True
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
# self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
if os.path.exists(best_model_path):
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(best_model_path, map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
else:
logger.warn(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
"on multiple nodes, you should activate `--save_on_each_node`."
)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
train_loss = self._total_loss_scalar / self.state.global_step
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
return TrainOutput(self.state.global_step, train_loss, metrics)
def _load_state_dict_in_model(self, state_dict):
load_result = self.model.load_state_dict(state_dict, strict=False)
if len(load_result.missing_keys) != 0:
if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set(
self.model._keys_to_ignore_on_save
):
self.model.tie_weights()
else:
logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
logs: Dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank != -1:
rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(rng_file):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
run_id = tune.get_trial_id()
elif self.hp_search_backend == HPSearchBackend.SIGOPT:
run_id = trial.id
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_fp16_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
if smp.dp_rank() == 0:
# Consolidate the state dict on all processed of dp_rank 0
opt_state_dict = self.optimizer.state_dict()
# Save it and the scheduler on the main process
if self.args.should_save:
torch.save(opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
elif self.args.should_save and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank == -1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{local_rank}.pth"))
if self.args.push_to_hub:
self._push_from_checkpoint(output_dir)
# Maybe delete some older checkpoints.
if self.args.should_save:
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
if os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) and os.path.isfile(
os.path.join(checkpoint, SCHEDULER_NAME)
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME)))
reissue_pt_warnings(caught_warnings)
if self.use_amp and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME)))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune`` or ``SigOpt``. The optimized quantity is
determined by :obj:`compute_objective`, which defaults to a function returning the evaluation loss when no
metric is provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` or
:func:`~transformers.trainer_utils.default_hp_space_sigopt` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
on which one is installed. If all are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
- the documentation of `sigopt <https://app.sigopt.com/docs/endpoints/experiments/create>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`. "
"To install ray run `pip install ray[tune]`. "
"To install sigopt run `pip install sigopt`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
if backend == HPSearchBackend.SIGOPT and not is_sigopt_available():
raise RuntimeError("You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.")
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
backend_dict = {
HPSearchBackend.OPTUNA: run_hp_search_optuna,
HPSearchBackend.RAY: run_hp_search_ray,
HPSearchBackend.SIGOPT: run_hp_search_sigopt,
}
best_run = backend_dict[backend](self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]:
"""
Prepares one :obj:`data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
"""
if isinstance(data, dict):
return type(data)(**{k: self._prepare_input(v) for k, v in data.items()})
elif isinstance(data, (tuple, list)):
return type(data)(self._prepare_input(v) for v in data)
elif isinstance(data, torch.Tensor):
kwargs = dict(device=self.args.device)
if self.deepspeed and data.dtype != torch.int64:
# NLP models inputs are int64 and those get adjusted to the right dtype of the
# embedding. Other models such as wav2vec2's inputs are already float and thus
# may need special handling to match the dtypes of the model
kwargs.update(dict(dtype=self.args.hf_deepspeed_config.dtype()))
return data.to(**kwargs)
return data
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
inputs = self._prepare_input(inputs)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
scaler = self.scaler if self.use_amp else None
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps, scaler=scaler)
return loss_mb.reduce_mean().detach().to(self.args.device)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def eval_compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
inputs = {'input_ids': inputs['input_ids'][:-self.stable_batch_size, :],
'attention_mask': inputs['attention_mask'][:-self.stable_batch_size, :],
'labels': inputs['labels'][:-self.stable_batch_size],
}
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
state_dict = self.model_wrapped.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.args.should_save:
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_fp16_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
self.deepspeed.save_fp16_model(output_dir, WEIGHTS_NAME)
elif self.args.should_save:
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.args.should_save,
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.args.should_save, save_function=xm.save)
if self.tokenizer is not None and self.args.should_save:
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += (
distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item()
)
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=False,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
# output.metrics.update(
# speed_metrics(
# metric_key_prefix,
# start_time,
# num_samples=output.num_samples,
# num_steps=math.ceil(output.num_samples / total_batch_size),
# )
# )
# self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
logger.info(f"***** Running {description} *****")
if isinstance(dataloader.dataset, collections.abc.Sized):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
all_edit_ranks = []
all_stable_ranks = []
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
inputs = {
'input_ids': inputs['input_ids'],
'attention_mask': inputs['attention_mask'],
'labels': inputs['labels']
}
# Prediction step
loss, logits, labels = self.eval_prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
logits = logits[:, :, self.entity_id_st:self.entity_id_ed]
_, mask_idx = (inputs["input_ids"] == self.tokenizer.mask_token_id).nonzero(as_tuple=True)
bsz = inputs["input_ids"].shape[0]
logits = logits[torch.arange(bsz), mask_idx]
_, outputs = torch.sort(logits, dim=1, descending=True)
_, outputs = torch.sort(outputs, dim=1)
ranks = outputs[torch.arange(bsz), inputs["labels"]].detach().cpu() + 1
edit_ranks = ranks[:-self.stable_batch_size]
stable_ranks = ranks[-self.stable_batch_size:]
all_edit_ranks += (edit_ranks.tolist())
all_stable_ranks += (stable_ranks.tolist())
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if not isinstance(eval_dataset, IterableDataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, "num_examples"):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
metrics = self.compute_metrics(all_edit_ranks, all_stable_ranks)
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
# metrics = None
# if all_losses is not None:
# metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
# # Prefix all keys with metric_key_prefix + '_'
# for key in list(metrics.keys()):
# if not key.startswith(f"{metric_key_prefix}_"):
# metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
if tensor.shape[1] == max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
if self.use_amp:
with autocast():
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
else:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def eval_prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
if self.use_amp:
with autocast():
loss, outputs = self.eval_compute_loss(model, inputs, return_outputs=True)
else:
loss, outputs = self.eval_compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def init_git_repo(self):
"""
Initializes a git repo in :obj:`self.args.hub_model_id`.
"""
if not self.is_world_process_zero():
return
use_auth_token = True if self.args.hub_token is None else self.args.hub_token
if self.args.hub_model_id is None:
repo_name = Path(self.args.output_dir).absolute().name
else:
repo_name = self.args.hub_model_id
if "/" not in repo_name:
repo_name = get_full_repo_name(repo_name, token=self.args.hub_token)
try:
self.repo = Repository(
self.args.output_dir,
clone_from=repo_name,
use_auth_token=use_auth_token,
)
except EnvironmentError:
if self.args.overwrite_output_dir:
# Try again after wiping output_dir
shutil.rmtree(self.args.output_dir)
self.repo = Repository(
self.args.output_dir,
clone_from=repo_name,
use_auth_token=use_auth_token,
)
else:
raise
self.repo.git_pull()
# By default, ignore the checkpoint folders
if (
not os.path.exists(os.path.join(self.args.output_dir, ".gitignore"))
and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS
):
with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
writer.writelines(["checkpoint-*/"])
self.push_in_progress = None
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Optional[str] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
tasks: Optional[str] = None,
dataset_tags: Optional[Union[str, List[str]]] = None,
dataset: Optional[Union[str, List[str]]] = None,
dataset_args: Optional[Union[str, List[str]]] = None,
):
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def _push_from_checkpoint(self, checkpoint_folder):
# Only push from one node.
if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END:
return
# If we haven't finished the last push, we don't do this one.
if self.push_in_progress is not None and not self.push_in_progress.is_done:
return
output_dir = self.args.output_dir
# To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder
modeling_files = [CONFIG_NAME, WEIGHTS_NAME]
for modeling_file in modeling_files:
if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)):
shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file))
# Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure.
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Same for the training arguments
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
try:
if self.args.hub_strategy == HubStrategy.CHECKPOINT:
# Temporarily move the checkpoint just saved for the push
tmp_checkpoint = os.path.join(output_dir, "last-checkpoint")
# We have to remove the "last-checkpoint" dir if it exists, otherwise the checkpoint is moved as a
# subfolder.
if os.path.isdir(tmp_checkpoint):
shutil.rmtree(tmp_checkpoint)
shutil.move(checkpoint_folder, tmp_checkpoint)
if self.args.save_strategy == IntervalStrategy.STEPS:
commit_message = f"Training in progress, step {self.state.global_step}"
else:
commit_message = f"Training in progress, epoch {int(self.state.epoch)}"
_, self.push_in_progress = self.repo.push_to_hub(
commit_message=commit_message, blocking=False, auto_lfs_prune=True
)
finally:
if self.args.hub_strategy == HubStrategy.CHECKPOINT:
# Move back the checkpoint to its place
shutil.move(tmp_checkpoint, checkpoint_folder)
def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
"""
Upload `self.model` and `self.tokenizer` to the 🤗 model hub on the repo `self.args.hub_model_id`.
Parameters:
commit_message (:obj:`str`, `optional`, defaults to :obj:`"End of training"`):
Message to commit while pushing.
blocking (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether the function should return only when the :obj:`git push` has finished.
kwargs:
Additional keyword arguments passed along to :meth:`~transformers.Trainer.create_model_card`.
Returns:
The url of the commit of your model in the given repository if :obj:`blocking=False`, a tuple with the url
of the commit and an object to track the progress of the commit if :obj:`blocking=True`
"""
if self.args.should_save:
if self.args.hub_model_id is None:
model_name = Path(self.args.output_dir).name
else:
model_name = self.args.hub_model_id.split("/")[-1]
# Needs to be executed on all processes for TPU training, but will only save on the processed determined by
# self.args.should_save.
self.save_model()
# Only push from one node.
if not self.is_world_process_zero():
return
git_head_commit_url = self.repo.push_to_hub(
commit_message=commit_message, blocking=blocking, auto_lfs_prune=True
)
# push separately the model card to be independant from the rest of the model
if self.args.should_save:
self.create_model_card(model_name=model_name, **kwargs)
try:
self.repo.push_to_hub(
commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True
)
except EnvironmentError as exc:
logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}")
return git_head_commit_url
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
class AdapterTrainer:
@staticmethod
def add_argparse_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--kge_model_type", default='K-Adapter', type=str,
help="Model type selected in the list")
parser.add_argument("--model_type", default='bert', type=str,
help="Model type selected in the list")
parser.add_argument("--model_name_or_path", default='bert-base-uncased', type=str, required=True,
help="Path to tokenizer or shortcut name selected in the list: ")
parser.add_argument("--pretrain_model_checkpoint", default='models/KGC', type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: ")
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train.")
parser.add_argument("--comment", default='', type=str,
help="The comment")
parser.add_argument('--output_dir', type=Path, default="output")
parser.add_argument('--num_workers', type=int, default=32)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--stable_batch_size', type=int, default=8)
parser.add_argument('--pretrain', type=int, default=0)
parser.add_argument('--eval_batch_size', type=int, default=8)
parser.add_argument("--restore", type=bool, default=True,
help="Whether restore from the last checkpoint, is nochenckpoints, start from scartch")
parser.add_argument("--max_seq_length", type=int, default=64, help="max lenght of token sequence")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", type=bool, default=False,
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--adapter_transformer_layers", default=2, type=int,
help="The transformer layers of adapter.")
parser.add_argument("--adapter_size", default=128, type=int,
help="The hidden size of adapter.")
parser.add_argument("--adapter_list", default="0,11,23", type=str,
help="The layer where add an adapter")
parser.add_argument("--adapter_skip_layers", default=6, type=int,
help="The skip_layers of adapter according to bert layers")
parser.add_argument('--meta_adapter_model', type=str, help='the pretrained adapter model')
parser.add_argument("--per_gpu_train_batch_size", default=32, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=64, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=10,
help="How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)")
parser.add_argument('--save_steps', type=int, default=1000,
help="Save checkpoint every X updates steps.")
parser.add_argument('--eval_steps', type=int, default=None,
help="eval every X updates steps.")
parser.add_argument('--max_save_checkpoints', type=int, default=500,
help="The max amounts of checkpoint saving. Bigger than it will delete the former checkpoints")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
parser.add_argument('--negative_sample', type=int, default=0, help='how many negative samples to select')
return parser
def __init__(self, args, pretrained_model, kgc_data):
kgc_data.setup()
self.data = kgc_data
self.tokenizer = kgc_data.tokenizer
self.args = args
pretrained_model.config.entity_id_st = kgc_data.entity_id_st
pretrained_model.config.entity_id_ed = kgc_data.entity_id_ed
pretrained_model.config.mask_token_id = self.tokenizer.mask_token_id
num_labels = pretrained_model.config.vocab_size
adapter_model = AdapterModel(args, pretrained_model.config, num_labels)
adapter_model.out_proj = pretrained_model.cls
if args.meta_adapter_model:
model_dict = adapter_model.state_dict()
logger.info('Adapter model weight:')
logger.info(adapter_model.state_dict().keys())
logger.info('Load model state dict from {}'.format(args.meta_adapter_model))
adapter_meta_dict = torch.load(args.meta_adapter_model, map_location=lambda storage, loc: storage)
logger.info('Load pretraiend adapter model state dict ')
logger.info(adapter_meta_dict.keys())
changed_adapter_meta = {}
for key in adapter_meta_dict.keys():
changed_adapter_meta[key.replace('encoder.','adapter.encoder.')] = adapter_meta_dict[key]
changed_adapter_meta = {k: v for k, v in changed_adapter_meta.items() if k in model_dict.keys()}
model_dict.update(changed_adapter_meta)
adapter_model.load_state_dict(model_dict)
self.model = (pretrained_model, adapter_model)
def train(self):
""" Train the model """
args = self.args
pretrained_model = self.model[0].to(args.device)
adapter_model = self.model[1].to(args.device)
tokenizer = self.tokenizer
self.args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataloader = self.get_train_dataloader()
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in adapter_model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in adapter_model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
adapter_model, optimizer = amp.initialize(adapter_model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
pretrained_model = torch.nn.DataParallel(pretrained_model)
adapter_model = torch.nn.DataParallel(adapter_model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
pretrained_model = torch.nn.parallel.DistributedDataParallel(pretrained_model, device_ids=[args.local_rank],
output_device=args.local_rank)
adapter_model = torch.nn.parallel.DistributedDataParallel(adapter_model, device_ids=[args.local_rank],
output_device=args.local_rank)
# Train!
logger.info("***** Running training *****")
logger.info(" Num train examples = %d",
len(train_dataloader)) # logging.info(f" Num train_examples = {len(train_examples)}")
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info("Try resume from checkpoint")
if args.restore:
if os.path.exists(os.path.join(args.output_dir, 'global_step.bin')):
logger.info("Load last checkpoint data")
global_step = torch.load(os.path.join(args.output_dir, 'global_step.bin'))
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
logger.info("Load from output_dir {}".format(output_dir))
optimizer.load_state_dict(torch.load(os.path.join(output_dir, 'optimizer.bin')))
scheduler.load_state_dict(torch.load(os.path.join(output_dir, 'scheduler.bin')))
# args = torch.load(os.path.join(output_dir, 'training_args.bin'))
if hasattr(adapter_model, 'module'):
adapter_model.module.load_state_dict(torch.load(os.path.join(output_dir, 'pytorch_model.bin')))
else: # Take care of distributed/parallel training
adapter_model.load_state_dict(torch.load(os.path.join(output_dir, 'pytorch_model.bin')))
global_step += 1
start_epoch = int(global_step / len(train_dataloader))
start_step = global_step - start_epoch * len(train_dataloader) - 1
logger.info("Start from global_step={} epoch={} step={}".format(global_step, start_epoch, start_step))
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(log_dir=args.output_dir + "/runs/" + args.my_model_name, purge_step=global_step)
else:
global_step = 0
start_epoch = 0
start_step = 0
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(log_dir=args.output_dir + "/runs/" + args.my_model_name, purge_step=global_step)
logger.info("Start from scratch")
else:
global_step = 0
start_epoch = 0
start_step = 0
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(log_dir=args.output_dir + "/runs/" + args.my_model_name, purge_step=global_step)
logger.info("Start from scratch")
tr_loss, logging_loss = 0.0, 0.0
pretrained_model.zero_grad()
adapter_model.zero_grad()
for epoch in range(start_epoch, int(args.num_train_epochs)):
for step, batch in enumerate(train_dataloader):
start = time.time()
if args.restore and (step < start_step):
continue
# if args.restore and (flag_count < global_step):
# flag_count+=1
# continue
pretrained_model.eval()
adapter_model.train()
inputs = {'input_ids': batch['input_ids'],
'attention_mask': batch['attention_mask'],
'labels': batch['labels'],
}
inputs = {k: v.to(args.device) for k, v in inputs.items()}
pretrained_model_outputs = pretrained_model(**inputs)
outputs = adapter_model(pretrained_model_outputs,**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# epoch_iterator.set_description("loss {}".format(loss))
logger.info("Epoch {}/{} - Iter {} / {}, loss = {:.5f}, time used = {:.3f}s".format(epoch, int(
args.num_train_epochs), step,
len(train_dataloader),
loss.item(),
time.time() - start))
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(adapter_model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
scheduler.step() # Update learning rate schedule
optimizer.step()
# model.zero_grad()
pretrained_model.zero_grad()
adapter_model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = adapter_model.module if hasattr(adapter_model,
'module') else adapter_model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir) # save to pytorch_model.bin model.state_dict()
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.bin'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.bin'))
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
torch.save(global_step, os.path.join(args.output_dir, 'global_step.bin'))
logger.info("Saving model checkpoint, optimizer, global_step to %s", output_dir)
if (global_step / args.save_steps) > args.max_save_checkpoints:
try:
shutil.rmtree(os.path.join(args.output_dir, 'checkpoint-{}'.format(
global_step - args.max_save_checkpoints * args.save_steps)))
except OSError as e:
print(e)
if args.local_rank == -1 and args.evaluate_during_training and global_step % args.eval_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well
results = self.evaluate()
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
if args.max_steps > 0 and global_step > args.max_steps:
break
if args.max_steps > 0 and global_step > args.max_steps:
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(self, mode="Eval"):
args = self.args
pretrained_model = self.model[0].to(args.device)
adapter_model = self.model[1].to(args.device)
tokenizer = self.tokenizer
results = {}
self.args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
val_dataloader = self.get_eval_dataloader()
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
prediction = []
gold_result = []
start = time.time()
all_edit_ranks = []
all_loc_ranks = []
for step, batch in enumerate(val_dataloader):
pretrained_model.eval()
adapter_model.eval()
batch = {k: v.to(args.device) for k, v in batch.items()}
with torch.no_grad():
inputs = {'input_ids': batch['input_ids'],
'attention_mask': batch['attention_mask'],
'labels': batch['labels'],
}
pretrained_model_outputs = pretrained_model(**inputs)
outputs = adapter_model(pretrained_model_outputs, **inputs)
_, logits = outputs[:2]
edit_bsz = inputs['input_ids'][:self.args.eval_batch_size].shape[0]
loc_bsz = inputs['input_ids'][self.args.eval_batch_size:].shape[0]
_, outputs = torch.sort(logits, dim=1, descending=True)
_, outputs = torch.sort(outputs, dim=1)
# ranks = outputs[torch.arange(bsz), batch['labels'][:args.eval_batch_size]].detach().cpu() + 1
edit_ranks = outputs[torch.arange(edit_bsz), batch['labels'][:self.args.eval_batch_size]].detach().cpu() + 1
loc_ranks = outputs[torch.arange(loc_bsz), batch['labels'][self.args.eval_batch_size:]].detach().cpu() + 1
all_edit_ranks.append(edit_ranks)
all_loc_ranks.append(loc_ranks)
nb_eval_steps += 1
edit_ranks = np.concatenate(all_edit_ranks)
loc_ranks = np.concatenate(all_loc_ranks)
edit_hits20 = (edit_ranks<=20).mean()
edit_hits10 = (edit_ranks<=10).mean()
edit_hits5 = (edit_ranks<=5).mean()
edit_hits3 = (edit_ranks<=3).mean()
edit_hits1 = (edit_ranks<=1).mean()
loc_hits20 = (loc_ranks<=20).mean()
loc_hits10 = (loc_ranks<=10).mean()
loc_hits5 = (loc_ranks<=5).mean()
loc_hits3 = (loc_ranks<=3).mean()
loc_hits1 = (loc_ranks<=1).mean()
logger.info("***** %s results *****" % mode)
logger.info("Eval/hits1: %f", edit_hits1)
logger.info("Eval/hits3: %f", edit_hits3)
logger.info("Eval/hits5: %f", edit_hits5)
logger.info("Loc/hits1: %f", loc_hits1)
logger.info("Loc/hits3: %f", loc_hits3)
logger.info("Loc/hits5: %f", loc_hits5)
return results
def get_train_dataloader(self):
return DataLoader(self.data.data_train,
num_workers=self.args.num_workers,
pin_memory=True,
collate_fn=self.data.sampler,
batch_size=self.args.train_batch_size,
shuffle=True)
def get_eval_dataloader(self, eval_dataset = None):
return DataLoader(self.data.data_val,
num_workers=self.args.num_workers,
pin_memory=True,
collate_fn=self.data.sampler,
batch_size=self.args.eval_batch_size,
shuffle=False)
def get_test_dataloader(self, test_dataset):
return DataLoader(self.data.data_test,
num_workers=self.args.num_workers,
pin_memory=True,
collate_fn=self.data.sampler,
batch_size=self.args.eval_batch_size,
shuffle=False)
class CaliNetTrainer(Trainer):
def __init__(
self,
model = None,
args = None,
data_collator = None,
train_dataset = None,
eval_dataset = None,
stable_dataset = None,
tokenizer = None,
model_init = None,
compute_metrics = None,
callbacks = None,
optimizers = (None, None),
stable_batch_size = None,
):
super().__init__(model, args, data_collator, train_dataset, eval_dataset, stable_dataset, tokenizer, compute_metrics=compute_metrics)
self.stable_batch_size = stable_batch_size
def prediction_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
return super().prediction_step(model, inputs, prediction_loss_only, ignore_keys)
def evaluate(self, eval_dataset: Optional[Dataset] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval") -> Dict[str, float]:
return super().evaluate(eval_dataset, ignore_keys, metric_key_prefix)
def predict(self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test") -> PredictionOutput:
return super().predict(test_dataset, ignore_keys, metric_key_prefix)
def get_train_dataloader(self):
return DataLoader(self.train_dataset,
num_workers=self.args.dataloader_num_workers,
pin_memory=True,
collate_fn=self.data_collator,
batch_size=self.args.train_batch_size,
shuffle=True)
def get_eval_dataloader(self, eval_dataset = None):
return DataLoader(self.eval_dataset,
num_workers=self.args.dataloader_num_workers,
pin_memory=True,
collate_fn=self.data_collator,
batch_size=self.args.eval_batch_size,
shuffle=False)
def get_test_dataloader(self, test_dataset):
return DataLoader(test_dataset,
num_workers=self.args.dataloader_num_workers,
pin_memory=True,
collate_fn=self.data_collator,
batch_size=self.args.eval_batch_size,
shuffle=False)
trainer_mapping = {
'CaliNet': CaliNetTrainer,
'K-Adapter': AdapterTrainer,
} | PypiClean |
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/models/ssd_mobilenet_v3_feature_extractor.py | """SSDFeatureExtractor for MobileNetV3 features."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v3
class SSDMobileNetV3FeatureExtractorBase(ssd_meta_arch.SSDFeatureExtractor):
"""Base class of SSD feature extractor using MobilenetV3 features."""
def __init__(self,
conv_defs,
from_layer,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobilenetV3'):
"""MobileNetV3 Feature Extractor for SSD Models.
MobileNet v3. Details found in:
https://arxiv.org/abs/1905.02244
Args:
conv_defs: MobileNetV3 conv defs for backbone.
from_layer: A cell of two layer names (string) to connect to the 1st and
2nd inputs of the SSD head.
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
scope_name: scope name (string) of network variables.
"""
super(SSDMobileNetV3FeatureExtractorBase, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams
)
self._conv_defs = conv_defs
self._from_layer = from_layer
self._scope_name = scope_name
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Raises:
ValueError if conv_defs is not provided or from_layer does not meet the
size requirement.
"""
if not self._conv_defs:
raise ValueError('Must provide backbone conv defs.')
if len(self._from_layer) != 2:
raise ValueError('SSD input feature names are not provided.')
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
feature_map_layout = {
'from_layer': [
self._from_layer[0], self._from_layer[1], '', '', '', ''
],
'layer_depth': [-1, -1, 512, 256, 256, 128],
'use_depthwise': self._use_depthwise,
'use_explicit_padding': self._use_explicit_padding,
}
with tf.variable_scope(
self._scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v3.training_scope(is_training=None, bn_decay=0.9997)), \
slim.arg_scope(
[mobilenet.depth_multiplier], min_depth=self._min_depth):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = mobilenet_v3.mobilenet_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
conv_defs=self._conv_defs,
final_endpoint=self._from_layer[1],
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return list(feature_maps.values())
class SSDMobileNetV3LargeFeatureExtractor(SSDMobileNetV3FeatureExtractorBase):
"""Mobilenet V3-Large feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobilenetV3'):
super(SSDMobileNetV3LargeFeatureExtractor, self).__init__(
conv_defs=mobilenet_v3.V3_LARGE_DETECTION,
from_layer=['layer_14/expansion_output', 'layer_17'],
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name
)
class SSDMobileNetV3SmallFeatureExtractor(SSDMobileNetV3FeatureExtractorBase):
"""Mobilenet V3-Small feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False,
scope_name='MobilenetV3'):
super(SSDMobileNetV3SmallFeatureExtractor, self).__init__(
conv_defs=mobilenet_v3.V3_SMALL_DETECTION,
from_layer=['layer_10/expansion_output', 'layer_13'],
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
scope_name=scope_name
) | PypiClean |
/Aston-0.7.1.tar.gz/Aston-0.7.1/aston/tracefile/bruker.py | import struct
import numpy as np
import scipy.sparse
from aston.trace import Chromatogram, Trace
from aston.tracefile import TraceFile
class BrukerMSMS(TraceFile):
mime = 'application/vnd-bruker-msms'
traces = ['#ms']
# def _getTotalTrace(self):
# pass
@property
def data(self):
# convenience function for reading in data
def rd(f, st):
return struct.unpack(st, f.read(struct.calcsize(st)))
# open the file
f = open(self.filename, 'rb')
nscans = rd(f, 'ii')[1]
if nscans == 0:
self.data = Trace(np.array([]), np.array([]), [])
return
times = np.array(rd(f, nscans * 'd')) / 60.0
f.seek(f.tell() + 4) # number of scans again
# set up the array of column indices
indptr = np.empty(nscans + 1, dtype=int)
indptr[0] = 0
# figure out the total number of points
dpos = f.tell()
tot_pts = 0
for scn in range(nscans):
npts = rd(f, 'i')[0]
# rd(f, npts * 'f' + 'i' + n_pts * 'f')
f.seek(f.tell() + 8 * npts + 4)
tot_pts += npts
indptr[scn + 1] = tot_pts
f.seek(dpos)
ions = []
i_lkup = {}
idxs = np.empty(tot_pts, dtype=int)
vals = np.empty(tot_pts, dtype=float)
for scn in range(nscans):
npts = rd(f, 'i')[0]
rd_ions = rd(f, npts * 'f')
f.seek(f.tell() + 4) # number of points again
abun = rd(f, npts * 'f')
nions = set([int(i) for i in rd_ions if int(i) not in i_lkup])
i_lkup.update(dict((ion, i + len(ions) - 1)
for i, ion in enumerate(nions)))
ions += nions
idxs[indptr[scn]:indptr[scn + 1]] = \
[i_lkup[int(i)] for i in rd_ions]
vals[indptr[scn]:indptr[scn + 1]] = \
abun
idxs += 1
data = scipy.sparse.csr_matrix((vals, idxs, indptr),
shape=(nscans, len(ions)), dtype=float)
return Chromatogram(data, times, ions)
# self.data = np.zeros((recs, 2))
# times = rd(f, nscans * 'd')
# self.data[:, 0] = np.array(times) / 60
# ions = set()
# rd(f, 'i') # number of data points again
# for i in range(nscans):
# n_pts = rd(f, 'i')[0]
# ions.update(rd(f, n_pts * 'f'))
# rd(f, 'i') # number of pts in spectra again
# abun = rd(f, n_pts * 'f')
# #self.data[i, 0] = times[i] / 60
# self.data[i, 1] = sum(abun)
# f.close()
# self.ions = [1]
class BrukerBAF(TraceFile):
mime = 'application/vnd-bruker-baf'
pass
# TODO: implement this
# 0x000c - q - 230 or 242
###############################################
# file 1 - 230 ("Carolina")
# 0xFFFFFFFFFFFF at 0x678D, 0x6825, 0xC459, 0x491AD,
# 0x500E7, 0x57C39, and 25+ others
# text section starts 0x018E, ends 0x6708
# 0x409BF - (d- -1.000) then a ton of doubles
# 3000 scans?, 2371 ions
###############################################
# file 2 - 230 ("Short")
###############################################
# file 3 - 242
# 0xFFFFFFFFFFFF at 0x6BEF, 0x6CB7, 0x4044A
# text section starts 0x0186, ends 0x6B3B
# some CHP records at 0xAE9B til 0xBF5d (?)
# 0x42a1e - (d- -1.000) then a ton of doubles
# til 0x23DFFF56
# 0x23FA916A - 0xFFFFFFFF before last data chunk? | PypiClean |
/Jaspion-0.3.7.1.tar.gz/Jaspion-0.3.7.1/jaspion/cli.py | import os
import sys
import importlib
import click
from greenswitch.esl import NotConnectedError
from jaspion import Jaspion
@click.group()
def main():
"""Jaspion CLI to manipulate and execute projects."""
...
@main.command()
@click.option(
"--host",
envvar="FSHOST",
show_default=True,
default="127.0.0.1",
help="Address of FreeSwitch.",
)
@click.option(
"--port",
type=int,
default=8021,
envvar="FSPORT",
show_default=True,
help="Port to ESL connect.",
)
@click.option(
"--password",
default="ClueCon",
show_default=True,
envvar="FSPASSWD",
help="Password to ESL connect.",
)
@click.option("--debug/--no-debug", default=False)
def runserver(host, port, password, debug):
"""Connect in freeswitch and start a listner."""
try:
module = os.environ.get("JASPION_APP", None)
sketch = "app"
if ":" in module:
module, sketch = module.split(":", 1)
if module:
path = os.getcwd()
importlib.invalidate_caches()
sys.path.append(os.path.abspath(path))
click.echo("Try to connect in esl://{}:{}".format(host, port))
if debug:
click.echo(
click.style(f"Read file {path}/{module}.py", fg="blue")
)
mod = importlib.import_module(module)
result = getattr(mod, sketch)
if debug:
click.echo(click.style(f"Listner: {result}", fg="blue"))
app = Jaspion(host, port, password)
if callable(result):
listner = result()
else:
listner = result
app.update(listner)
app.run()
else:
click.echo("No application configured.")
except ImportError:
click.echo(click.style("Failed to load listener.", fg="red"))
except (KeyError, TypeError):
click.echo(click.style("Invalid listener configured.", fg="red"))
except (NotConnectedError, ConnectionRefusedError):
click.echo(click.style("Failed to connect with freeswitch.", fg="red"))
except KeyboardInterrupt:
click.echo("Stoping...")
app.stop() | PypiClean |
/Distributions_GauBino-0.1.tar.gz/Distributions_GauBino-0.1/Distributions_GauBino/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/MapProxy-1.16.0.tar.gz/MapProxy-1.16.0/mapproxy/util/ext/tempita/__init__.py | from __future__ import print_function
import re
import sys
import os
import tokenize
from io import StringIO, BytesIO
from mapproxy.compat import iteritems, PY2, text_type
from mapproxy.compat.modules import escape
from mapproxy.util.py import reraise
from mapproxy.util.ext.tempita._looper import looper
from mapproxy.util.ext.tempita.compat3 import bytes, basestring_, next, is_unicode, coerce_text
if PY2:
from urllib import quote as url_quote
else:
from urllib.parse import quote as url_quote
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
token_re = re.compile(r'\{\{|\}\}')
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace,
get_template=from_template.get_template)
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
default_inherit = None
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0):
self.content = content
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__file__' in globals:
name = globals['__file__']
if name.endswith('.pyc') or name.endswith('.pyo'):
name = name[:-1]
elif '__name__' in globals:
name = globals['__name__']
else:
name = '<string>'
if lineno:
name += ':%s' % lineno
self.name = name
self._parsed = parse(content, name=name, line_offset=line_offset)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
f = open(filename, 'rb')
c = f.read()
f.close()
if encoding:
c = c.decode(encoding)
return cls(content=c, name=filename, namespace=namespace,
default_inherit=default_inherit, get_template=get_template)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give one positional argument")
if not hasattr(args[0], 'items'):
raise TypeError(
"If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r"
% (args[0],))
kw = args[0]
ns = kw
ns['__template_name__'] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
__traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if '__inherit__' in defs:
inherit = defs.pop('__inherit__')
else:
inherit = None
return ''.join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
__traceback_hide__ = True
if not self.get_template:
raise TemplateError(
'You cannot use inheritance without passing in get_template',
position=None, name=self.name)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in iteritems(defs):
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns['self'] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
__traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
__traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == 'inherit':
expr = code[2]
value = self._eval(expr, ns, pos)
defs['__inherit__'] = value
elif name == 'def':
name = code[2]
signature = code[3]
parts = code[4]
ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns,
pos=pos)
elif name == 'comment':
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out, defs):
__traceback_hide__ = True
if expr is None:
return
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
'Need %i items to unpack (got %i items)'
% (len(vars), len(item)))
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out, defs)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out, defs):
__traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring_)
name, pos = part[0], part[1]
if name == 'else':
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out, defs)
break
def _eval(self, code, ns, pos):
__traceback_hide__ = True
try:
try:
value = eval(code, self.default_namespace, ns)
except SyntaxError as e:
raise SyntaxError(
'invalid syntax in expression: %s' % code)
return value
except:
exc_info = sys.exc_info()
e = exc_info[1]
if getattr(e, 'args', None):
arg0 = e.args[0]
else:
arg0 = coerce_text(e)
e.args = (self._add_line_info(arg0, pos),)
reraise((exc_info[0], e, exc_info[2]))
def _exec(self, code, ns, pos):
__traceback_hide__ = True
try:
exec(code, self.default_namespace, ns)
except:
exc_info = sys.exc_info()
e = exc_info[1]
if e.args:
e.args = (self._add_line_info(e.args[0], pos),)
else:
e.args = (self._add_line_info(None, pos),)
reraise((exc_info[0], e, exc_info[2]))
def _repr(self, value, pos):
__traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
try:
value = text_type(value)
except UnicodeDecodeError:
value = bytes(value)
else:
if not isinstance(value, basestring_):
value = coerce_text(value)
if (is_unicode(value)
and self.default_encoding):
value = value.encode(self.default_encoding)
except:
exc_info = sys.exc_info()
e = exc_info[1]
e.args = (self._add_line_info(e.args[0], pos),)
reraise((exc_info[0], e, exc_info[2]))
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
raise UnicodeDecodeError(
'Cannot decode bytes value %r into unicode '
'(no default_encoding provided)' % value)
try:
value = value.decode(self.default_encoding)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
e.encoding,
e.object,
e.start,
e.end,
e.reason + ' in string %r' % value)
elif not self._unicode and is_unicode(value):
if not self.default_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into bytes '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (
msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, **kw):
name = kw.get('__name')
tmpl = Template(content, name=name)
return tmpl.substitute(kw)
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in iteritems(kw):
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
def __repr__(self):
items = [
(k, v) for k, v in iteritems(self)]
items.sort()
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%r' % (k, v) for k, v in items]))
############################################################
## HTML Templating
############################################################
class html(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __html__(self):
return self.value
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__, self.value)
def html_quote(value, force=True):
if not force and hasattr(value, '__html__'):
return value.__html__()
if value is None:
return ''
if not isinstance(value, basestring_):
value = coerce_text(value)
if sys.version >= "3" and isinstance(value, bytes):
value = escape(value.decode('latin1'), 1)
value = value.encode('latin1')
else:
value = escape(value, 1)
if sys.version < "3":
if is_unicode(value):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def url(v):
v = coerce_text(v)
if is_unicode(v):
v = v.encode('utf8')
return url_quote(v)
def attr(**kw):
kw = list(kw.iteritems())
kw.sort()
parts = []
for name, value in kw:
if value is None:
continue
if name.endswith('_'):
name = name[:-1]
parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
return html(' '.join(parts))
class HTMLTemplate(Template):
default_namespace = Template.default_namespace.copy()
default_namespace.update(dict(
html=html,
attr=attr,
url=url,
html_quote=html_quote,
))
def _repr(self, value, pos):
if hasattr(value, '__html__'):
value = value.__html__()
quote = False
else:
quote = True
plain = Template._repr(self, value, pos)
if quote:
return html_quote(plain)
else:
return plain
def sub_html(content, **kw):
name = kw.get('__name')
tmpl = HTMLTemplate(content, name=name)
return tmpl.substitute(kw)
class TemplateDef(object):
def __init__(self, template, func_name, func_signature,
body, ns, pos, bound_self=None):
self._template = template
self._func_name = func_name
self._func_signature = func_signature
self._body = body
self._ns = ns
self._pos = pos
self._bound_self = bound_self
def __repr__(self):
return '<tempita function %s(%s) at %s:%s>' % (
self._func_name, self._func_signature,
self._template.name, self._pos)
def __str__(self):
return self()
def __call__(self, *args, **kw):
values = self._parse_signature(args, kw)
ns = self._ns.copy()
ns.update(values)
if self._bound_self is not None:
ns['self'] = self._bound_self
out = []
subdefs = {}
self._template._interpret_codes(self._body, ns, out, subdefs)
return ''.join(out)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.__class__(
self._template, self._func_name, self._func_signature,
self._body, self._ns, self._pos, bound_self=obj)
def _parse_signature(self, args, kw):
values = {}
sig_args, var_args, var_kw, defaults = self._func_signature
extra_kw = {}
for name, value in iteritems(kw):
if not var_kw and name not in sig_args:
raise TypeError(
'Unexpected argument %s' % name)
if name in sig_args:
values[sig_args] = value
else:
extra_kw[name] = value
args = list(args)
sig_args = list(sig_args)
while args:
while sig_args and sig_args[0] in values:
sig_args.pop(0)
if sig_args:
name = sig_args.pop(0)
values[name] = args.pop(0)
elif var_args:
values[var_args] = tuple(args)
break
else:
raise TypeError(
'Extra position arguments: %s'
% ', '.join(repr(v) for v in args))
for name, value_expr in iteritems(defaults):
if name not in values:
values[name] = self._template._eval(
value_expr, self._ns, self._pos)
for name in sig_args:
if name not in values:
raise TypeError(
'Missing argument: %s' % name)
if var_kw:
values[var_kw] = extra_kw
return values
class TemplateObject(object):
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.__name)
class TemplateObjectGetter(object):
def __init__(self, template_obj):
self.__template_obj = template_obj
def __getattr__(self, attr):
return getattr(self.__template_obj, attr, Empty)
def __repr__(self):
return '<%s around %r>' % (self.__class__.__name__, self.__template_obj)
class _Empty(object):
def __call__(self, *args, **kw):
return self
def __str__(self):
return ''
def __repr__(self):
return 'Empty'
def __unicode__(self):
return u''
def __iter__(self):
return iter(())
def __bool__(self):
return False
if sys.version < "3":
__nonzero__ = __bool__
Empty = _Empty()
del _Empty
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0):
"""
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
in_expr = False
chunks = []
last = 0
last_pos = (1, 1)
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), line_offset)
if expr == '{{' and in_expr:
raise TemplateError('{{ inside expression', position=pos,
name=name)
elif expr == '}}' and not in_expr:
raise TemplateError('}} outside expression', position=pos,
name=name)
if expr == '{{':
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No }} to finish last expression',
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
last_trim = None
for i in range(len(tokens)):
current = tokens[i]
if isinstance(tokens[i], basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ''
else:
next_chunk = tokens[i + 1]
if (not isinstance(next_chunk, basestring_)
or not isinstance(prev, basestring_)):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = 'last'
if (prev_ok
and (not next_chunk or lead_whitespace_re.search(next_chunk)
or (i == len(tokens) - 2 and not next_chunk.strip()))):
if prev:
if ((i == 1 and not prev.strip())
or prev_ok == 'last'):
tokens[i - 1] = ''
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ''
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
def find_position(string, index, line_offset):
"""Given a string and index, return (line, column)"""
leading = string[:index].splitlines()
return (len(leading) + line_offset, len(leading[-1]) + 1)
def parse(s, name=None, line_offset=0):
r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
"""
tokens = lex(s, name=name, line_offset=line_offset)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n') or expr.startswith('\r'):
expr = expr.lstrip('\r\n')
if '\r' in expr:
expr = expr.replace('\r\n', '\n')
expr = expr.replace('\r', '')
expr += '\n'
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ')
or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor', 'enddef'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('inherit '):
return parse_inherit(tokens, name, context)
elif expr.startswith('def '):
return parse_def(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and (tokens[0][0] == 'endif'
or tokens[0][0].startswith('elif ')
or tokens[0][0] == 'else')):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for ')
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" % first,
position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('inherit ')
expr = first.split(None, 1)[1]
return ('inherit', pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith('def ')
first = first.split(None, 1)[1]
if first.endswith(':'):
first = first[:-1]
if '(' not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(')'):
raise TemplateError("Function definition doesn't end with ): %s" % first,
position=start, name=name)
else:
first = first[:-1]
func_name, sig_text = first.split('(', 1)
sig = parse_signature(sig_text, name, start)
context = context + ('def',)
content = []
while 1:
if not tokens:
raise TemplateError(
'Missing {{enddef}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'enddef'):
return ('def', start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
if PY2 and isinstance(sig_text, str):
lines = BytesIO(sig_text).readline
else:
lines = StringIO(sig_text).readline
tokens = tokenize.generate_tokens(lines)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens)
except StopIteration:
return tokenize.ENDMARKER, ''
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','):
if var_arg_type == '*':
var_arg = var_name
elif var_arg_type == '**':
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if tok_type == tokenize.OP and tok_string == '=':
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if (not nest_count and
(tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))):
default_expr = isolate_expression(sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count and tok_type == tokenize.OP and tok_string == nest_type:
nest_count += 1
elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type:
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'):
nest_type = tok_string
nest_count = 1
unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow+1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return ''.join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import pkg_resources
import os
if args is None:
args = sys.argv[1:]
dist = pkg_resources.get_distribution('Paste')
parser = optparse.OptionParser(
version=coerce_text(dist),
usage=_fill_command_usage)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--html',
dest='use_html',
action='store_true',
help="Use HTML style filling (including automatic HTML quoting)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print('You must give a template filename')
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print(('Bad argument: %r' % value))
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
f = open(template_name, 'rb')
template_content = f.read()
f.close()
if options.use_html:
TemplateClass = HTMLTemplate
else:
TemplateClass = Template
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
f = open(options.output, 'wb')
f.write(result)
f.close()
else:
sys.stdout.write(result)
if __name__ == '__main__':
fill_command() | PypiClean |
/ocn-xmlchecker.env.tar.gz/env (copy)/lib/python2.7/encodings/mac_centeuro.py |
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
u'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
u'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
u'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
u'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
u'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
u'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
u'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
u'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
u'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
u'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
u'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
u'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
u'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
u'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
u'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
u'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
u'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
u'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
u'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
u'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table) | PypiClean |
/ClusterShell-1.9.1.tar.gz/ClusterShell-1.9.1/doc/sphinx/release.rst | .. highlight:: console
Release Notes
=============
Version 1.9
-----------
We are pleased to announce the availability of this new release, which comes
with some exciting new features and improvements. We would like to thank
everyone who participated in this release in a way or another.
Version 1.9.1
^^^^^^^^^^^^^
This version contains a few bug fixes and improvements over 1.9, mostly
affecting packaging:
* Allow ``clustershell`` to be installed as user in a ``venv`` using
``pip install`` or using ``pip install --user`` with man pages. Root
installation using pip is now discouraged. If done, ``/usr/local`` is
likely to be used as the install prefix. See :ref:`install-python` for
more information.
* :ref:`clush-tool`: ``$CFGDIR`` was broken if ``/etc/clustershell`` did not
exist
* Add support for negative ranges in :class:`.RangeSet`.
For more details, please have a look at `GitHub Issues for 1.9.1 milestone`_.
Main changes in 1.9
^^^^^^^^^^^^^^^^^^^
Python support
""""""""""""""
.. warning:: Support for Python 2.6 has been dropped in this version.
Upgrading to Python 3 is highly recommended as Python 2 reached end of
life in 2020. See :ref:`install-requirements`.
clush
"""""
* :ref:`clush-tool` has now support for :ref:`clush-modes` to support more
authentication use cases. A run mode has pre-defined
:ref:`clush.conf <clush-config>` settings with a given name, and can then
be activated with ``--mode=MODE``. We also added the new options
``command_prefix`` and ``password_prompt`` (see
:ref:`clush.conf <clush-config>`). Two examples of run modes are included
and can be easily enabled:
* :ref:`password-based ssh authentication with sshpass <clush-sshpass>`
* :ref:`sudo password forwarding over stdin <clush-sudo>`
.. note:: ``clush.conf`` comes with a new variable
:ref:`confdir <clush-config>` to specify where to look for run mode
configuration files. If you upgrade from 1.8.4 and want to use run modes,
make sure ``confdir`` is present in your ``clush.conf``.
* :ref:`clush-tool`: add arguments ``--outdir=OUTDIR`` and
``--errdir=ERRDIR``; similar to *pssh(1)*, it allows to save the standard
output (stdout) and/or error (stderr) of all remote commands to local
files. See :ref:`clush-outdir`.
Node sets and node groups
"""""""""""""""""""""""""
.. warning:: To support mixed-length 0-padding ranges, version 1.9 introduces
changes in :class:`.RangeSet`'s API that might break existing code. If you
use :class:`.RangeSet` directly, see below for more information.
* :class:`.NodeSet`, :class:`.RangeSet` and :class:`.RangeSetND` objects now
support sets with mixed length zero padding, meaning you can safely mix
ranges like ``2-3``, ``03-09`` and ``005-123``.
The following example with :ref:`nodeset-tool` shows that not only ``01``
and ``001`` are now seen as separate indexes, but it is also possible to mix
non-padded indexes like ``1`` with zero-padded indexes::
$ nodeset --fold node001 node1 node01
node[1,01,001]
See ``nodeset``'s :ref:`zero padding <nodeset-zeropadding>` for more examples.
:class:`.RangeSet` now internally manages indexes as strings with the zero
padding included. Prior to v1.9, indexes were stored as integers and zero
padding was a simple display feature of fixed length per :class:`.RangeSet`
object. If you are using this class directly in your code, please see the
:ref:`class-RangeSet` in the Programming Guide section for portability
recommendations (especially the new method :meth:`.RangeSet.intiter()`).
.. note:: The :class:`.NodeSet` class API has NOT changed so as long as you do
not use :class:`.RangeSet` directly, you may safely upgrade to 1.9.
* :ref:`nodeset-rawgroupnames`: the **@@** operator may be used in any node
set expression to manipulate group names as a node set::
$ nodeset -l -s rack
@rack:J1
@rack:J2
@rack:J3
$ nodeset -f @@rack
J[1-3]
* :class:`.RangeSet`: multidimensional folding performance optimization,
useful for "xnames" on HPE Cray EX supercomputers that encode up to 5
dimensions.
* :ref:`Slurm group bindings <group-slurm-bindings>`: filter out more Slurm
node state flags
Configuration
"""""""""""""
* Introduce ``$CLUSTERSHELL_CFGDIR`` as an alternate location for
configuration files; useful on a cluster where ClusterShell is provided
as a user-facing tool installed on a shared file system (see
:ref:`clush-config`, :ref:`groups_config_conf` and :ref:`defaults-config`).
Tree mode
"""""""""
* Fix start by implementing a proper asynchronous start for :class:`.TreeWorker`,
which is now only triggered when the engine actually starts.
* Fix error with intermediate gateways
For more details, please have a look at `GitHub Issues for 1.9 milestone`_.
Version 1.8
-----------
This adaptive major release is now compatible with both Python 2 and Python 3.
We hope this release will help you manage your clusters, server farms or cloud
farms! Special thanks to the many of you that have sent us feedback on GitHub!
.. warning:: Support for Python 2.5 and below has been dropped in this version.
Version 1.8.4
^^^^^^^^^^^^^
This version contains a few bug fixes and improvements:
* allow out-of-tree worker modules
* use default local_worker and allow overriding :ref:`defaults-config` (tree mode)
* return maxrc properly in the case of the Rsh Worker
* :ref:`clush-tool`: improve stdin support with Python 3
* :ref:`clush-tool`: add maxrc option to :ref:`clush.conf <clush-config>`
* :ref:`clush-tool`: add support for NO_COLOR and CLICOLOR
For more details, please have a look at `GitHub Issues for 1.8.4 milestone`_.
Version 1.8.3
^^^^^^^^^^^^^
This version contains a few bug fixes and improvements, mostly affecting the
:ref:`tree mode <clush-tree>`:
* propagate ``CLUSTERSHELL_GW_PYTHON_EXECUTABLE`` environment variable to
remote gateways (see :ref:`clush-tree-python`)
* fix defect to properly close gateway channel when worker has aborted
* improve error reporting from gateways
* :ref:`clush-tool`: now properly handles ``--worker=ssh`` when
:ref:`topology.conf <clush-tree-enabling>` is present to explicitly disable
:ref:`tree mode <clush-tree>`
* use safe yaml load variant to avoid warning from :class:`.YAMLGroupLoader`
For more details, please have a look at `GitHub Issues for 1.8.3 milestone`_.
We also added a :ref:`Python support matrix <install-python-support-overview>`
for the main Linux distributions.
Version 1.8.2
^^^^^^^^^^^^^
This version contains a few minor fixes:
* :ref:`clush-tool`: support UTF-8 string encoding with
:ref:`--diff <clush-diff>`
* in some cases, :ref:`timers <configuring-a-timer>` were too fast due to an
issue in :class:`.EngineTimer`
* fix issue in the :ref:`Slurm group bindings <group-slurm-bindings>` where job
ids were used instead of user names
* performance update for :ref:`xCAT group bindings <group-xcat-bindings>`
For more details, please have a look at `GitHub Issues for 1.8.2 milestone`_.
Python support
""""""""""""""
Version 1.8.2 adds support for Python 3.7.
.. note:: This version still supports Python 2.6 and thus also RHEL/CentOS
6, but please note that ClusterShell 1.9 is expected to require at least
Python 2.7.
OS support
""""""""""
Version 1.8.2 adds support for RHEL 8/CentOS 8 and Fedora 31+, where only the
Python 3 package is provided. The ``clustershell`` packages will be made
available in EPEL-8 as soon as possible.
No packaging changes were made to ``clustershell`` in RHEL/CentOS 6 or 7.
Version 1.8.1
^^^^^^^^^^^^^
This update contains a few bug fixes and some performance improvements of the
:class:`.NodeSet` class.
The :ref:`tree mode <clush-tree>` has been fixed to properly support offline
gateways.
We added the following command line options:
* ``--conf`` to specify alternative clush.conf (clush only)
* ``--groupsconf`` to specify alternative groups.conf (all CLIs)
In :class:`.EventHandler`, we reinstated :meth:`.EventHandler.ev_error`: and
:meth:`.EventHandler.ev_error`: (as deprecated) for compatibility purposes.
Please see below for more details about important :class:`.EventHandler`
changes in 1.8.
Finally, :ref:`cluset <cluset-tool>`/:ref:`nodeset <nodeset-tool>` have been
improved by adding support for:
* literal new line in ``-S``
* multiline shell variables in options
For more details, please have a look at `GitHub Issues for 1.8.1 milestone`_.
Main changes in 1.8
^^^^^^^^^^^^^^^^^^^
For more details, please have a look at `GitHub Issues for 1.8 milestone`_.
CLI (command line interface)
""""""""""""""""""""""""""""
If you use the :ref:`clush <clush-tool>` or
:ref:`cluset <cluset-tool>`/:ref:`nodeset <nodeset-tool>` tools, there are no
major changes since 1.7, though a few bug fixes and improvements have been
done:
* It is now possible to work with numeric node names with cluset/nodeset::
$ nodeset --fold 6704 6705 r931 r930
[6704-6705],r[930-931]
$ squeue -h -o '%i' -u $USER | cluset -f
[680240-680245,680310]
As a reminder, cluset/nodeset has always had an option to switch to numerical
cluster ranges (only), using ``-R/--rangeset``::
$ squeue -h -o '%i' -u $USER | cluset -f -R
680240-680245,680310
* Node group configuration is now loaded and processed only when required.
This is actually an improvement of the :class:`.NodeSet` class that the
tools readily benefit. This should improve both usability and performance.
* YAML group files are now ignored for users that don't have the permission
to read them (see :ref:`group-file-based` for more info about group files).
* :ref:`clush <clush-tool>` now use slightly different colors that are legible
on dark backgrounds.
* :ref:`clush-tree`:
+ Better detection of the Python executable, and, if needed, we added a new
environment variable to override it, see :ref:`clush-tree-python`.
+ You must use the same major version of Python on the gateways and the root
node.
.. highlight:: python
Python library
""""""""""""""
If you're a developer and use the ClusterShell Python library, please read
below.
Python 3 support
++++++++++++++++
Starting in 1.8, the library can also be used with Python 3. The code is
compatible with both Python 2 and 3 at the same time. To make it possible,
we performed a full code refactoring (without changing the behavior).
.. note:: When using Python 3, we recommend Python 3.4 or any more recent
version.
Improved Event API
++++++++++++++++++
We've made some changes to :class:`.EventHandler`, a class that defines a
simple interface to handle events generated by :class:`.Worker`,
:class:`.EventTimer` and :class:`.EventPort` objects.
Please note that all programs already based on :class:`.EventHandler` should
work with this new version of ClusterShell without any code change (backward
API compatibility across 1.x versions is enforced). We use object
*introspection*, the ability to determine the type of an object at runtime,
to make the Event API evolve smoothly. We do still recommend to change your
code as soon as possible as we'll break backward compatibility in the future
major release 2.0.
The signatures of the following :class:`.EventHandler` methods **changed** in
1.8:
* :meth:`.EventHandler.ev_pickup`: new ``node`` argument
* :meth:`.EventHandler.ev_read`: new ``node``, ``sname`` and ``msg`` arguments
* :meth:`.EventHandler.ev_hup`: new ``node``, ``rc`` argument
* :meth:`.EventHandler.ev_close`: new ``timedout`` argument
Both old and new signatures are supported in 1.8. The old signatures will
be deprecated in a future 1.x release and **removed** in version 2.0.
The new methods aims to be more convenient to use by avoiding the need of
accessing context-specific :class:`.Worker` attributes like
``worker.current_node`` (replaced with the ``node`` argument in that case).
Also, please note that the following :class:`.EventHandler` methods will be
removed in 2.0:
* ``EventHandler.ev_error()``: its use should be replaced with
:meth:`.EventHandler.ev_read` by comparing the stream name ``sname``
with :attr:`.Worker.SNAME_STDERR`, like in the example below::
class MyEventHandler(EventHandler):
def ev_read(self, worker, node, sname, msg):
if sname == worker.SNAME_STDERR:
print('error from %s: %s' % (node, msg))
* ``EventHandler.ev_timeout()``: its use should be replaced with
:meth:`.EventHandler.ev_close` by checking for the new ``timedout``
argument, which is set to ``True`` when a timeout occurred.
We recommend developers to start using the improved :mod:`.Event` API now.
Please don't forget to update your packaging requirements to use ClusterShell
1.8 or later.
Task and standard input (stdin)
+++++++++++++++++++++++++++++++
:meth:`.Task.shell` and :meth:`.Task.run` have a new ``stdin`` boolean
argument which if set to ``False`` prevents the use of stdin by sending
EOF at first read, like if it is connected to /dev/null.
If not specified, its value is managed by the :ref:`defaults-config`.
Its default value in :class:`.Defaults` is set to ``True`` for backward
compatibility, but could change in a future major release.
If your program doesn't plan to listen to stdin, it is recommended to set
``stdin=False`` when calling these two methods.
.. highlight:: console
Packaging changes
"""""""""""""""""
We recommend that package maintainers use separate subpackages for Python 2
and Python 3, to install ClusterShell modules and related command line tools.
The Python 2 and Python 3 stacks should be fully installable in parallel.
For the RPM packaging, there is now two subpackages
``python2-clustershell`` and ``python3-clustershell`` (or
``python34-clustershell`` in EPEL), each providing
the library and tools for the corresponding version of Python.
The ``clustershell`` package includes the common configuration files and
documentation and requires ``python2-clustershell``, mainly because
Python 2 is still the default interpreter on most operating systems.
``vim-clustershell`` was confusing so we removed it and added the vim
extensions to the main ``clustershell`` subpackage.
Version 1.8 should be readily available as RPMs in the following
distributions or RPM repositories:
* EPEL 6 and 7
* Fedora 26 and 27
* openSUSE Factory and Leap
On a supported environment, you can expect a smooth upgrade from version 1.6+.
We also expect the packaging to be updated for Debian.
Version 1.7
-----------
It's just a small version bump from the well-known 1.6 version, but
ClusterShell 1.7 comes with some nice new features that we hope you'll enjoy!
Most of these features have already been tested on some very large Linux
production systems.
Version 1.7 and possible future minor versions 1.7.x are compatible with
Python 2.4 up to Python 2.7 (for example: from RedHat EL5 to EL7). Upgrade
from version 1.6 to 1.7 should be painless and is fully supported.
Version 1.7.3
^^^^^^^^^^^^^
This update contains a few bug fixes and some interesting performance
improvements. This is also the first release published under the
GNU Lesser General Public License, version 2.1 or later (`LGPL v2.1+`_).
Previous releases were published under the `CeCILL-C V1`_.
Quite a bit of work has been done on the *fanout* of processes that the library
uses to execute commands. We implemenented a basic per-worker *fanout* to fix
the broken behaviour in tree mode. Thanks to this, it is now possible to use
fanout=1 with gateways. The :ref:`documentation <clush-tree-fanout>` has also
been clarified.
An issue that led to broken pipe errors but also affected performance has been
fixed in :ref:`tree mode <clush-tree>` when copying files.
An issue with :ref:`clush-tool` -L where nodes weren't always properly sorted
has been fixed.
The performance of :class:`.MsgTree`, the class used by the library to
aggregate identical command outputs, has been improved. We have seen up to 75%
speed improvement in some cases.
Finally, a :ref:`cluset <cluset-tool>` command has been added to avoid a
conflict with `xCAT`_ nodeset command. It is the same command as
:ref:`nodeset-tool`.
For more details, please have a look at `GitHub Issues for 1.7.3 milestone`_.
ClusterShell 1.7.3 is compatible with Python 2.4 up to Python 2.7 (for
example: from RedHat EL5 to EL7). Upgrades from versions 1.6 or 1.7 are
supported.
Version 1.7.2
^^^^^^^^^^^^^
This minor version fixes a defect in :ref:`tree mode <clush-tree>` that led
to broken pipe errors or unwanted backtraces.
The :class:`.NodeSet` class now supports the empty string as input. In
practice, you may now safely reuse the output of a
:ref:`nodeset <nodeset-tool>` command as input argument for another
:ref:`nodeset <nodeset-tool>` command, even if the result is an empty string.
A new option ``--pick`` is available for :ref:`clush <clush-pick>` and
:ref:`nodeset <nodeset-pick>` to pick N node(s) at random from the resulting
node set.
For more details, please have a look at `GitHub Issues for 1.7.2 milestone`_.
ClusterShell 1.7.2 is compatible with Python 2.4 up to Python 2.7 (for
example: from RedHat EL5 to EL7). Upgrades from versions 1.6 or 1.7 are
supported.
Version 1.7.1
^^^^^^^^^^^^^
This minor version contains a few bug fixes, mostly related to
:ref:`guide-NodeSet`.
This version also contains bug fixes and performance improvements in tree
propagation mode.
For more details, please have a look at `GitHub Issues for 1.7.1 milestone`_.
ClusterShell 1.7.1 is compatible with Python 2.4 up to Python 2.7 (for
example: from RedHat EL5 to EL7). Upgrades from versions 1.6 or 1.7 are
supported.
Main changes in 1.7
^^^^^^^^^^^^^^^^^^^
This new version comes with a refreshed documentation, based on the Sphinx
documentation generator, available on http://clustershell.readthedocs.org.
The main new features of version 1.7 are described below.
Multidimensional nodesets
"""""""""""""""""""""""""
The :class:`.NodeSet` class and :ref:`nodeset <nodeset-tool>` command-line
have been improved to support multidimentional node sets with folding
capability. The use of nD naming scheme is sometimes used to map node names to
physical location like ``name-<rack>-<position>`` or node position within the
cluster interconnect network topology.
A first example of 3D nodeset expansion is a good way to start::
$ nodeset -e gpu-[1,3]-[4-5]-[0-6/2]
gpu-1-4-0 gpu-1-4-2 gpu-1-4-4 gpu-1-4-6 gpu-1-5-0 gpu-1-5-2 gpu-1-5-4
gpu-1-5-6 gpu-3-4-0 gpu-3-4-2 gpu-3-4-4 gpu-3-4-6 gpu-3-5-0 gpu-3-5-2
gpu-3-5-4 gpu-3-5-6
You've probably noticed the ``/2`` notation of the last dimension. It's called
a step and behaves as one would expect, and is fully supported with nD
nodesets.
All other :ref:`nodeset <nodeset-tool>` commands and options are supported
with nD nodesets. For example, it's always useful to have a quick way to count
the number of nodes in a nodeset::
$ nodeset -c gpu-[1,3]-[4-5]-[0-6/2]
16
Then to show the most interesting new capability of the underlying
:class:`.NodeSet` class in version 1.7, a folding example is probably
appropriate::
$ nodeset -f compute-1-[1-34] compute-2-[1-34]
compute-[1-2]-[1-34]
In the above example, nodeset will try to find a very compact nodesets
representation whenever possible. ClusterShell is probably the first and only
cluster tool capable of doing such complex nodeset folding.
Attention, as not all cluster tools are supporting this kind of complex
nodesets, even for nodeset expansion, we added an ``--axis`` option to select
to fold along some desired dimension::
$ nodeset --axis 2 -f compute-[1-2]-[1-34]
compute-1-[1-34],compute-2-[1-34]
The last dimension can also be selected using ``-1``::
$ nodeset --axis -1 -f compute-[1-2]-[1-34]
compute-1-[1-34],compute-2-[1-34]
All set-like operations are also supported with several dimensions, for
example *difference* (``-x``)::
$ nodeset -f c-[1-10]-[1-44] -x c-[5-10]-[1-34]
c-[1-4]-[1-44],c-[5-10]-[35-44]
Hard to follow? Don't worry, ClusterShell does it for you!
File-based node groups
""""""""""""""""""""""
Cluster node groups have been a great success of previous version of
ClusterShell and are now widely adopted. So we worked on improving it even
more for version 1.7.
For those of you who use the file ``/etc/clustershell/group`` to describe
node groups, that is still supported in 1.7 and upgrade from your 1.6 setup
should work just fine. However, for new 1.7 installations, we have put this
file in a different location by default::
$ vim /etc/clustershell/groups.d/local.cfg
Especially if you're starting a new setup, you have also the choice to switch
to a more advanced groups YAML configuration file that can define multiple
*sources* in a single file (equivalent to separate namespaces for node
groups). The YAML format possibly allows you to edit the file content with
YAML tools but it's also a file format convenient to edit just using the vim
editor. To enable the example file, you need to rename it first as it needs to
have the **.yaml** extension::
$ cd /etc/clustershell/groups.d
$ mv cluster.yaml.example cluster.yaml
You can make the first dictionary found on this file (named *roles*) to be the
**default** source by changing ``default: local`` to ``default: roles`` in
``/etc/clustershell/groups.conf`` (main config file for groups).
For more info about the YAML group files, please see :ref:`group-file-based`.
Please also see :ref:`node groups configuration <groups-config>` for node
groups configuration in general.
nodeset -L/--list-all option
""""""""""""""""""""""""""""
Additionally, the :ref:`nodeset <nodeset-tool>` command also has a new option
``-L`` or ``--list-all`` to list groups from all sources (``-l`` only lists
groups from the **default** source). This can be useful when configuring
ClusterShell and/or troubleshooting node group sources::
$ nodeset -LL
@adm example0
@all example[2,4-5,32-159]
@compute example[32-159]
@gpu example[156-159]
@io example[2,4-5]
@racks:new example[4-5,156-159]
@racks:old example[0,2,32-159]
@racks:rack1 example[0,2]
@racks:rack2 example[4-5]
@racks:rack3 example[32-159]
@racks:rack4 example[156-159]
@cpu:hsw example[64-159]
@cpu:ivy example[32-63]
Special group @*
""""""""""""""""
The special group syntax ``@*`` (or ``@source:*`` if using explicit source
selection) has been added and can be used in configuration files or with
command line tools. This special group is always available for file-based node
groups (return the content of the **all** group, or all groups from the source
otherwise). For external sources, it is available when either the **all**
upcall is defined or both **map** and **list** upcalls are defined. The all
special group is also used by ``clush -a`` and ``nodeset -a``. For example,
the two following commands are equivalent::
$ nodeset -a -f
example[2,4-5,32-159]
$ nodeset -f @*
example[2,4-5,32-159]
Exec worker
"""""""""""
Version 1.7 introduces a new generic execution worker named
:class:`.ExecWorker` as the new base class for most exec()-based worker
classes. In practice with :ref:`clush-tool`, you can now specify the worker in
command line using ``--worker`` or ``-R`` and use **exec**. It also supports
special placeholders for the node (**%h**) or rank (**%n**). For example, the
following command will execute *ping* commands in parallel, each with a
different host from hosts *cs01*, etc. to *cs05* as argument and then
aggregate the results::
$ clush -R exec -w cs[01-05] -bL 'ping -c1 %h >/dev/null && echo ok'
cs[01-04]: ok
clush: cs05: exited with exit code 1
This feature allows the system administrator to use non cluster-aware tools in
a more efficient way. You may also want to explicitly set the fanout (using
``-f``) to limit the number of parallel local commands launched.
Please see also :ref:`clush worker selection <clush-worker>`.
Rsh worker
""""""""""
Version 1.7 adds support for ``rsh`` or any of its variants like ``krsh`` or
``mrsh``.
``rsh`` and ``ssh`` also share a lot of common mechanisms. Worker Rsh was
added moving a lot of Worker Ssh code into it.
For ``clush``, please see :ref:`clush worker selection <clush-worker>` to
enable ``rsh``.
To use ``rsh`` by default instead of ``ssh`` at the library level, install the
provided example file named ``defaults.conf-rsh`` to
``/etc/clustershell/defaults.conf``.
Tree Propagation Mode
"""""""""""""""""""""
The ClusterShell Tree Mode allows you to send commands to target nodes through
a set of predefined gateways (using ssh by default). It can be useful to
access servers that are behind some other servers like bastion hosts, or to
scale on very large clusters when the flat mode (eg. sliding window of ssh
commands) is not enough anymore.
The tree mode is now :ref:`documented <clush-tree>`, it has been improved and
is enabled by default when a ``topology.conf`` file is found. While it is still
a work in progress, the tree mode is known to work pretty well when all gateways
are online. We'll continue to improve it and make it more robust in the next
versions.
Configuration files
"""""""""""""""""""
When ``$CLUSTERSHELL_CFGDIR`` or ``$XDG_CONFIG_HOME`` are defined,
ClusterShell will use them to search for additional configuration files.
If ``$CLUSTERSHELL_CFGDIR`` is not defined, the global configuration files will
be searched for in `/etc/clustershell`
PIP user installation support
"""""""""""""""""""""""""""""
ClusterShell 1.7 is now fully compatible with PIP and supports user
configuration files::
$ pip install --user clustershell
Please see :ref:`install-pip-user`.
.. _GitHub Issues for 1.7.1 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.7.1
.. _GitHub Issues for 1.7.2 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.7.2
.. _GitHub Issues for 1.7.3 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.7.3
.. _GitHub Issues for 1.8 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.8
.. _GitHub Issues for 1.8.1 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.8.1
.. _GitHub Issues for 1.8.2 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.8.2
.. _GitHub Issues for 1.8.3 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.8.3
.. _GitHub Issues for 1.8.4 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.8.4
.. _GitHub Issues for 1.9 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.9
.. _GitHub Issues for 1.9.1 milestone: https://github.com/cea-hpc/clustershell/issues?q=milestone%3A1.9.1
.. _LGPL v2.1+: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
.. _CeCILL-C V1: http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
.. _xCAT: https://xcat.org/
| PypiClean |
/GautamX-6.1-py3-none-any.whl/bot/modules/watch.py | from telegram.ext import CommandHandler
from telegram import Bot, Update
from bot import Interval, DOWNLOAD_DIR, DOWNLOAD_STATUS_UPDATE_INTERVAL, dispatcher, LOGGER
from bot.helper.ext_utils.bot_utils import setInterval
from bot.helper.telegram_helper.message_utils import update_all_messages, sendMessage, sendStatusMessage
from .mirror import MirrorListener
from bot.helper.mirror_utils.download_utils.youtube_dl_download_helper import YoutubeDLHelper
from bot.helper.telegram_helper.bot_commands import BotCommands
from bot.helper.telegram_helper.filters import CustomFilters
import threading
def _watch(bot: Bot, update, isTar=False):
mssg = update.message.text
message_args = mssg.split(' ')
name_args = mssg.split('|')
try:
link = message_args[1]
except IndexError:
msg = f"/{BotCommands.WatchCommand} [yt_dl supported link] [quality] |[CustomName] to mirror with youtube_dl.\n\n"
msg += "<b>Note :- Quality and custom name are optional</b>\n\nExample of quality :- audio, 144, 240, 360, 480, 720, 1080, 2160."
msg += "\n\nIf you want to use custom filename, plz enter it after |"
msg += f"\n\nExample :-\n<code>/{BotCommands.WatchCommand} https://youtu.be/ocX2FN1nguA 720 |My video bro</code>\n\n"
msg += "This file will be downloaded in 720p quality and it's name will be <b>My video bro</b>"
sendMessage(msg, bot, update)
return
try:
if "|" in mssg:
mssg = mssg.split("|")
qual = mssg[0].split(" ")[2]
if qual == "":
raise IndexError
else:
qual = message_args[2]
if qual != "audio":
qual = f'bestvideo[height<={qual}]+bestaudio/best[height<={qual}]'
except IndexError:
qual = "bestvideo+bestaudio/best"
try:
name = name_args[1]
except IndexError:
name = ""
reply_to = update.message.reply_to_message
if reply_to is not None:
tag = reply_to.from_user.username
else:
tag = None
pswd = ""
listener = MirrorListener(bot, update, pswd, isTar, tag)
ydl = YoutubeDLHelper(listener)
threading.Thread(target=ydl.add_download,args=(link, f'{DOWNLOAD_DIR}{listener.uid}', qual, name)).start()
sendStatusMessage(update, bot)
if len(Interval) == 0:
Interval.append(setInterval(DOWNLOAD_STATUS_UPDATE_INTERVAL, update_all_messages))
def watchTar(update, context):
_watch(context.bot, update, True)
def watch(update, context):
_watch(context.bot, update)
mirror_handler = CommandHandler(BotCommands.WatchCommand, watch,
filters=CustomFilters.authorized_chat | CustomFilters.authorized_user, run_async=True)
tar_mirror_handler = CommandHandler(BotCommands.TarWatchCommand, watchTar,
filters=CustomFilters.authorized_chat | CustomFilters.authorized_user, run_async=True)
dispatcher.add_handler(mirror_handler)
dispatcher.add_handler(tar_mirror_handler) | PypiClean |
/OBP_reliability_pillar_2-0.0.13.tar.gz/OBP_reliability_pillar_2-0.0.13/OBP_reliability_pillar_2/dynamodb/dynamodb_autoscaling_enabled.py | import botocore
import logging
from OBP_reliability_pillar_2.dynamodb.utils import list_dynamodb_tables
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# checks compliance.py for dynamodb auto-scaling is enabled
def dynamodb_autoscaling_enabled(self) -> dict:
"""
:param self:
:return:
"""
logger.info(" ---Inside dynamodb :: dynamodb_autoscaling_enabled()")
result = True
failReason = ''
offenders = []
compliance_type = "Dynamodb autoscaling enabled"
description = "Checks if Auto Scaling or On-Demand is enabled on your DynamoDB tables"
resource_type = "Dynamodb"
risk_level = 'Medium'
regions = self.session.get_available_regions('dynamodb')
for region in regions:
try:
client = self.session.client('dynamodb', region_name=region)
dynamodb_tables = list_dynamodb_tables(client)
for table in dynamodb_tables:
response = client.describe_table(
TableName=table
)
global_secondary_index = [index_name['IndexName'] for index_name in response['table']['GlobalSecondaryIndexes']]
response_scalable_targets = client.describe_scalable_targets(
ServiceNamespace='dynamodb',
ResourceIds= [table].extend(global_secondary_index)
)
if len(response_scalable_targets['ScalableTargets']) == 0:
result = False
failReason = 'AWS DynamoDB Auto Scaling is not enabled for the table and/or its global secondary index.'
offenders.append(table)
except botocore.exceptions.ClientError as e:
logger.error("Something went wrong with region {}: {}".format(region, e))
return {
'Result': result,
'failReason': failReason,
'resource_type': resource_type,
'Offenders': offenders,
'Compliance_type': compliance_type,
'Description': description,
'Risk Level': risk_level
} | PypiClean |
/Elephantoplasty-0.1.zip/Elephantoplasty-0.1/doc/basics/objects.rst | ---------------------------------------------------
Introduction to Elephantoplasty objects
---------------------------------------------------
As the name suggests, object relational mapper is about objects which map to
data in the relational database. Objects are representation of table rows while
classes represent entire tables. In Elephantoplasty objects that store
persistent data inherit from `py:eplasty.Object` class. This class itself is
abstract and cannot be instantiated. To create an object you have to first
create a child class with several `py:eplasty.Field` attributes. These fields
mostly represent columns of the table, however sometimes they have different
meaning.
As an example let us create a class and some instences of it::
>>> import eplasty as ep
>>> class Bird(ep.Object)
... species = ep.f.CharacterVarying(length=30)
... voltage = ep.f.Integer(default=0)
... sound = ep.f.CharacterVarying(length=30)
...
>>> parrot = Bird(species='Parrot', voltage='2000', sound='voom')
>>> swallow = Bird()
>>> swallow.name = 'Swallow'
As you can see, there are two ways of setting the field values. One is by
passing arguments to constructor, the other - to set them as attributes.
Unlike SQLAlchemy_ which has two modes - one with explicit table creation and
one called 'declarative', Elephantoplasty features only 'declarative'. The
database schema is created based on the class declaration. Two things that get
deduced:
.. _SQLAlchemy: http://sqlalchemy.org/
* The table name can be specified with ``__table_name__``. It defaults to the
pluralization of the class name (here it would be "birds").
* The default primary key is called ``id`` and is a column of type serial_. You
can also provide a different primary key.
| PypiClean |
/BlueWhale3-3.31.3.tar.gz/BlueWhale3-3.31.3/Orange/widgets/model/owloadmodel.py | import os
import pickle
from typing import Any, Dict
from AnyQt.QtWidgets import QSizePolicy, QStyle, QFileDialog
from AnyQt.QtCore import QTimer
from orangewidget.workflow.drophandler import SingleFileDropHandler
from Orange.base import Model
from Orange.widgets import widget, gui
from Orange.widgets.model import owsavemodel
from Orange.widgets.utils.filedialogs import RecentPathsWComboMixin, RecentPath
from Orange.widgets.utils import stdpaths
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.widget import Msg, Output
from Orange.i18n_config import *
def __(key):
return i18n.t("widget.model.model.owloadmodel." + key)
class OWLoadModel(widget.OWWidget, RecentPathsWComboMixin):
name = __("name")
description = __("desc")
priority = 3050
replaces = ["Orange.widgets.classify.owloadclassifier.OWLoadClassifier"]
icon = "icons/LoadModel.svg"
keywords = ["file", "open", "model"]
class Outputs:
model = Output("Model", Model, label=i18n.t("common.general.model"))
class Error(widget.OWWidget.Error):
load_error = Msg(__("msg_reading_error"))
FILTER = ";;".join(owsavemodel.OWSaveModel.filters)
want_main_area = False
buttons_area_orientation = None
resizing_enabled = False
def __init__(self):
super().__init__()
RecentPathsWComboMixin.__init__(self)
self.loaded_file = ""
vbox = gui.vBox(self.controlArea, __("box_file"))
box = gui.hBox(vbox)
self.file_combo.setMinimumWidth(300)
box.layout().addWidget(self.file_combo)
self.file_combo.activated[int].connect(self.select_file)
button = gui.button(box, self, '...', callback=self.browse_file)
button.setIcon(self.style().standardIcon(QStyle.SP_DirOpenIcon))
button.setSizePolicy(
QSizePolicy.Maximum, QSizePolicy.Fixed)
button = gui.button(
box, self, i18n.t("common.btn.reload"), callback=self.reload, default=True)
button.setIcon(self.style().standardIcon(QStyle.SP_BrowserReload))
button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.set_file_list()
QTimer.singleShot(0, self.open_file)
def browse_file(self):
start_file = self.last_path() or stdpaths.Documents
filename, _ = QFileDialog.getOpenFileName(
self, __("tooltip_open_distance_file"), start_file, self.FILTER)
if not filename:
return
self.add_path(filename)
self.open_file()
def select_file(self, n):
super().select_file(n)
self.open_file()
def reload(self):
self.open_file()
def open_file(self):
self.clear_messages()
fn = self.last_path()
if not fn:
return
try:
with open(fn, "rb") as f:
model = pickle.load(f)
except (pickle.UnpicklingError, OSError, EOFError):
self.Error.load_error(os.path.split(fn)[-1])
self.Outputs.model.send(None)
else:
self.Outputs.model.send(model)
class OWLoadModelDropHandler(SingleFileDropHandler):
WIDGET = OWLoadModel
def canDropFile(self, path: str) -> bool:
return path.endswith(".pkcls")
def parametersFromFile(self, path: str) -> Dict[str, Any]:
r = RecentPath(os.path.abspath(path), None, None,
os.path.basename(path))
parameters = {"recent_paths": [r]}
return parameters
if __name__ == "__main__": # pragma: no cover
WidgetPreview(OWLoadModel).run() | PypiClean |
/FormBuild-4.0.0.tar.gz/FormBuild-4.0.0/formbuild/__init__.py | import logging
import re
from cgi import escape
from markupsafe import Markup
from bn import HTMLFragment
log = logging.getLogger(__name__)
try:
from collections import OrderedDict
except ImportError: # Python 2.5 and below
## {{{ http://code.activestate.com/recipes/576693/ (r6)
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
## end of http://code.activestate.com/recipes/576693/ }}}
class XHTMLBuilder(object):
close = Markup('/>')
def html_open(self, name, close, attributes=None):
"""\
Returns an HTML open tag for ``name`` with everything properly escaped.
"""
fragment = Markup('<')+name
if attributes is not None:
for k, v in attributes.items():
fragment += (Markup(' %s="%s"')%(k, v))
if close and self.close:
fragment += (Markup(" %s")%self.close)
else:
fragment += Markup(">")
return fragment
def html_close(self, name):
"""\
Returns an HTML close tag for ``name``
"""
return Markup('</%s>')%(name,)
class HTMLBuilder(XHTMLBuilder):
close = None
def check_attributes(attributes, to_exclude):
if attributes is None:
return {}
final = OrderedDict()
attribute_keys = []
for key in attributes:
if not isinstance(key, unicode):
try:
attribute_keys.append(unicode(key.lower()))
except:
raise Exception('Attribute keys should be unicode values, so %r is an invalid value'%key)
else:
attribute_keys.append(key.lower())
if isinstance(attributes[key], (int, long)):
final[key] = Markup(attributes[key])
elif not isinstance(attributes[key], unicode):
try:
final[key] = unicode(attributes[key])
except:
raise Exception('Attribute values should be unicode values, so %r is an invalid value'%attributes[key])
else:
final[key] = attributes[key]
for attribute in to_exclude:
if attribute in attribute_keys:
raise Exception(
"You cannot directly specify %r as a field attribute, "
"instead use the correct API for the field you are trying "
"to create" % (
attribute,
)
)
return final
def _handle_input(type, name, value, attributes, builder):
attributes = check_attributes(attributes, ['type', 'name', 'value'])
attributes.update(
dict(
type=type,
name=name,
)
)
if value is not None:
attributes['value'] = value
return builder.html_open('input', True, attributes)
def _split(name):
parsed_options = []
for part in name.split('.'):
parts = part.split('[')
name = parts[0]
number = None
if len(parts):
number = parts[1].replace(']', '')
parsed_options.append((name, number))
return parsed_options
def group(
name,
selected_values,
options,
group_type,
align='horiz',
cols=4,
sub_name=None,
builder=None
):
if builder is None:
builder = XHTMLBuilder()
if not group_type in ['checkbox', 'radio']:
raise Exception('Invalid group type %s'%group_type)
if selected_values is None:
raise Exception(selected_values)
fragment = HTMLFragment()
item_counter = 0
if len(options) > 0:
if align <> 'table':
for option in options:
v = option['id']
k = option['label']
checked=u''
# This isn't a good enough check
if unicode(v) in selected_values:
checked=' checked="checked"'
break_ = u'\n'
if align == 'vert':
break_='<br />\n'#builder.html_open('br', close=True)+'\n'
fragment.safe('<input type="')
# It was checked earlier, so it is safe
fragment.safe(group_type)
fragment.safe('" name="')
if sub_name:
fragment.write(name)
fragment.safe('[%s].'%(item_counter))
fragment.write(sub_name)
else:
fragment.write(name)
fragment.safe('" value="')
fragment.write(unicode(v))
fragment.safe('"'+checked+' /> ')
fragment.write(unicode(k))
fragment.safe(break_)
item_counter += 1
else:
fragment.safe(
u'<table border="0" width="100%" cellpadding="0" '
u'cellspacing="0">\n <tr>\n'
)
counter = -1
for option in options:
counter += 1
if ((counter % cols) == 0) and (counter <> 0):
fragment.safe(u' </tr>\n <tr>\n')
fragment.safe(' <td>')
checked=u''
align=u''
v = option['id']
k = option['label']
if unicode(v) in selected_values:
checked=' checked="checked"'
break_ = u'</td>\n <td> </td>\n'
fragment.safe('<input type="')
# It was checked earlier, so it is safe
fragment.safe(group_type)
fragment.safe('" name="')
if sub_name:
fragment.write(name)
fragment.safe('[%s].'%(item_counter))
fragment.write(sub_name)
else:
fragment.write(name)
fragment.safe('" value="')
fragment.write(unicode(v))
fragment.safe('"'+checked+' /> ')
fragment.write(unicode(k))
fragment.safe(break_)
item_counter += 1
counter += 1
while (counter % cols):
counter += 1
fragment.safe(
u' <td></td>\n '
u'<td> </td>\n'
)
fragment.safe(u' </tr>\n</table>\n')
return Markup(fragment.getvalue()[:-1])
def _checkable(checked, type, name, value, attributes=None, builder=None):
if builder is None:
builder = XHTMLBuilder()
attributes = check_attributes(
attributes,
['type', 'name', 'checked', 'value'],
)
attributes.update(
dict(
type=type,
name=name,
value=value,
)
)
if checked.get(name, False) is True:
attributes['checked'] = u'checked'
return builder.html_open('input', True, attributes)
def _select(
value,
options,
multiple,
name,
attributes=None,
get_option_attributes=None,
self=None,
):
"""\
Private method for generating ``<select>`` fields.
You should use ``dropdown()`` for single value selects or ``combo()``
for multi-value selects.
"""
attributes = check_attributes(attributes, ['name', 'multiple'])
if multiple:
attributes['multiple'] = 'multiple'
attributes['name'] = name
fragment = Markup(u'')
fragment += self.builder.html_open(u'select', False, attributes)+Markup(u'\n')
counter = 0
for option in options:
v = option['id']
k = option['label']
if get_option_attributes:
option_attr = get_option_attributes(self, v, k)
else:
option_attr = {}
option_attr = check_attributes(option_attr, ['value', 'selected'])
if unicode(v) in value:
option_attr['selected'] = 'selected'
option_attr['value'] = v
fragment += self.builder.html_open(u'option', False, option_attr)
fragment += k
fragment += self.builder.html_close('option')+Markup(u'\n')
fragment += self.builder.html_close('select')
return fragment
class Field(object):
def __init__(
self,
value=None,
option=None,
checked=None,
builder=None,
):
"""\
``value``
a dictionary of field values where the name represents the field name and
the value is a Unicode string representing the field value.
``option``
an iterable of ``(value, label)`` pairs. The value is what's returned to
the application if this option is chosen; the label is what's shown in the
form. You can also pass an iterable of strings in which case the labels will
be identical to the values.
``checked``
a dictionary where the keys are field names and the values are ``True`` or
``False`` depending on whether the box should be checked or not. The value
of a checked checkbox comes from the ``value`` argument. Checkbox groups
are handled differently, so this argument only applies to single checkboxes.
``builder``
a custom HTML builder to use. Defaults to ``XHTMLBuilder`` if not specified.
"""
self.builder = builder or XHTMLBuilder()
_ensure_flat_and_set(self, 'value', value)
option_final = {}
if option is not None:
if not isinstance(option, dict):
raise Exception("Expected the 'option' argument to be a dictionary")
else:
for k in option:
options = []
if not isinstance(option[k], (list, tuple)):
raise Exception("Expected the value of the 'option' argument's %r key to be a list or tuple of dictionaries"%(k,))
for i, item in enumerate(option[k]):
if not isinstance(item, dict):
error = (
"Expected item %s in the list of options for the "
"%r field to be a dictionary not %s with value "
"%s..." % (
i,
k,
str(type(item))[1:-1],
item,
)
)
if isinstance(item, (list, tuple)) and len(item) == 2 and isinstance(item[0], (str, unicode)) and isinstance(item[1], (str, unicode)):
log.warning(error+'. Converting to a dict')
options.append({u'id': item[0], u'label': item[1]})
else:
raise Exception(error)
else:
for key in ['id', 'label']:
if not item.has_key(key):
raise Exception("Expected item %s in the list of options for the %r field to be dictionary with a key named %r"%(i, k, key))
if not isinstance(item[key], unicode):
raise Exception("Expected item %s in the list of options for the %r field to be dictionary where the key named %r has a value which is a unicode string, not %r"%(i, k, key, item[key]))
options.append(item)
option_final[k] = options
self.option = option_final
else:
self.option = {}
if checked is not None:
if not isinstance(checked, dict):
raise Exception("Expected the 'checked' argument to be a dictionary")
else:
for k in checked:
if not isinstance(checked[k], bool):
raise Exception("Expected the values of the 'checked' argument to be True or False, but the %s key has a value %s"%(k, checked[k]))
self.checked = checked
else:
self.checked = {}
#
# Single value fields
#
def password(self, name=u"password", attributes=None, populate=True):
"""\
Creates a password field
``name``
Defaults to ``password``.
>>> field = Field(value=dict(name=u'James>'))
>>> print field.password(u'name')
<input type="password" name="name" value="James>" />
>>> print field.password(u'name', populate=False)
<input type="password" name="name" value="" />
"""
return _handle_input(
'password',
name,
populate and self.value.get(name) or u'',
attributes,
self.builder,
)
def hidden(self, name, attributes=None):
"""\
Creates a hidden field.
Note: You can also add hidden fields to a ``Form`` instance in the ``end()`` or
``end_with_layout()`` fields by specifying the names of the all the
hidden fields you want added.
>>> field = Field(value=dict(name=u'value'))
>>> print field.hidden(u'name')
<input type="hidden" name="name" value="value" />
"""
return _handle_input(
'hidden',
name,
self.value.get(name),
attributes,
self.builder,
)
def text(self, name, attributes=None):
"""\
Create a text input field.
>>> field = Field()
>>> print field.text('name')
<input type="text" name="name" />
>>> field = Field(value=dict(name=u'James>'))
>>> print field.text('name')
<input type="text" name="name" value="James>" />
"""
return _handle_input(
'text',
name,
self.value.get(name),
attributes,
self.builder,
)
def textarea(self, name, attributes=None):
"""\
Creates a textarea field.
>>> field = Field(value=dict(name=u'James>'))
>>> print field.textarea('name')
<textarea name="name">James></textarea>
"""
attributes = check_attributes(attributes, ['name'])
attributes["name"] = name
return self.builder.html_open('textarea', False, attributes)+\
(self.value.get(name) or u'')+self.builder.html_close(u'textarea')
#
# Zero Value fields
#
def static(self, name):
"""\
Return the static value instead of an HTML field.
>>> field = Field(value=dict(name=u'James>'))
>>> print field.static('name')
James>
"""
value = self.value.get(name)
return escape(unicode(value))
def file(self, name, attributes=None):
"""\
Creates a file upload field.
If you are using file uploads then you will also need to set the
form's ``enctype`` attribute to ``"multipart/form-data"`` and
ensure the ``method`` attribute is set to ``POST``.
Example:
>>> field = Field()
>>> print field.file('myfile')
<input type="file" name="myfile" />
Note: File fields cannot have a ``value`` attribute.
"""
return _handle_input(
'file',
name,
None,
attributes,
self.builder,
)
#
# Single value fields with read-only values set at desgin time
#
def image_button(self, name, value, src, alt=None, attributes=None):
"""\
Create a submit button with an image background
``value``
The value of the field. Also used as the ``alt`` text if ``alt``
is not also specified.
``src``
The URL of the image to use as the button
``alt``
The text to use as the alt text
>>> field = Field()
>>> print field.image_button('go', 'Next', '../go.png', alt='Go')
<input src="../go.png" alt="Go" type="image" name="go" value="Next" />
"""
if alt is None:
alt=value
attributes = check_attributes(
attributes,
['type', 'name', 'value', 'src', 'alt']
)
attributes.update(
dict(
type='image',
name=name,
value=value,
src=src,
alt=alt,
)
)
return self.builder.html_open('input', True, attributes)
def submit(self, name='sumbit', value='Submit', attributes=None):
"""\
Creates a submit button with the text ``<tt>value</tt>`` as the
caption.
>>> field = Field()
>>> print field.submit(u'name', u'Submit >')
<input type="submit" name="name" value="Submit >" />
"""
return _handle_input(
'submit',
name,
value,
attributes,
self.builder,
)
#
# Single value fields whose value is set at constuct time but should not
# be allowed to change
#
def checkbox(self, name, value, attributes=None):
"""\
Creates a check box.
>>> field = Field()
>>> print field.checkbox('name', 'James >')
<input type="checkbox" name="name" value="James >" />
>>> field = Field(value={u'name': u'Set at runtime'})
>>> print field.checkbox(u'name', u'Set at design time')
<input type="checkbox" name="name" value="Set at design time" />
>>> field = Field(checked={'name': True})
>>> print field.checkbox('name', 'J>')
<input checked="checked" type="checkbox" name="name" value="J>" />
"""
return _checkable(self.checked, 'checkbox', name, value, attributes, self.builder)
def radio(self, name, value, attributes=None):
"""\
Creates a radio button.
>>> field = Field()
>>> print field.radio('name', 'James >')
<input type="radio" name="name" value="James >" />
>>> field = Field(value={u'name': u'Set at runtime'})
>>> print field.radio(u'name', u'Set at design time')
<input type="radio" name="name" value="Set at design time" />
>>> field = Field(checked={'name': True})
>>> print field.radio('name', 'J>')
<input checked="checked" type="radio" name="name" value="J>" />
"""
return _checkable(self.checked, 'radio', name, value, attributes, self.builder)
#
# Single value fields with options
#
def dropdown(self, name, option=None, attributes=None, get_option_attributes=None):
"""\
Create a single-valued <select> field
>>> field = Field(
... value={u'fruit': u'1'},
... option={
... u'fruit': [
... (u'1', u'Bananas'),
... (u'2>', u'Apples <>'),
... (u'3', u'Pears'),
... ]
... }
... )
>>> print field.dropdown('fruit')
<select name="fruit">
<option selected="selected" value="1">Bananas</option>
<option value="2>">Apples <></option>
<option value="3">Pears</option>
</select>
If not options for the select field are specified in the ``Field``
constructor, no options will be rendered:
>>> field = Field(
... value={u'fruit': u'1'},
... option={}
... )
>>> print field.dropdown(u'fruit')
<select name="fruit">
</select>
Create a single-valued <select> field from nested data with shared options
>>> field = Field(
... value={u'fruit[0].id': u'1', u'fruit[1].id': u'3'},
... option={
... u'fruit[*].id': [
... (u'1', u'Bananas'),
... (u'2>', u'Apples <>'),
... (u'3', u'Pears'),
... ]
... }
... )
>>> print field.dropdown('fruit[0].id')
<select name="fruit[0].id">
<option selected="selected" value="1">Bananas</option>
<option value="2>">Apples <></option>
<option value="3">Pears</option>
</select>
>>> print field.dropdown('fruit[1].id')
<select name="fruit[1].id">
<option value="1">Bananas</option>
<option value="2>">Apples <></option>
<option selected="selected" value="3">Pears</option>
</select>
"""
value = self.value.get(name, u'')
if '.' in name or '[' in name:
parts = name.split('.')
name_ = '.'.join(parts[:-1])
sub_name = parts[-1]
real_option = _get_option(self, option, name_, sub_name=sub_name)
else:
real_option = self.option.get(name, [])
if not isinstance(value, (str, unicode)):
raise Exception(
'The value for a dropdown should be a unicode '
'string, not %r'%(
type(value),
)
)
return _select(self.value.get(name, []), real_option, False, name, attributes, get_option_attributes, self)
def radio_group(self, name, option=None, align='horiz', cols=4, sub_name=None):
"""Radio Group Field.
``value``
The value of the selected option, or ``None`` if no radio button
is selected
``align``
can be ``'horiz'`` (default), ``'vert'`` or ``table``. If table layout is
chosen then you can also use the ``cols`` argument to specify the number
of columns in the table, the default is 4.
Examples (deliberately including some '>' characters to check they are properly escaped)
>>> field = Field(
... value={u'fruit': u'1'},
... option={
... u'fruit': [
... (u'1', u'Bananas'),
... (u'2>', u'Apples <>'),
... (u'3', u'Pears'),
... ]
... }
... )
>>> print field.radio_group('fruit')
<input type="radio" name="fruit" value="1" checked="checked" /> Bananas
<input type="radio" name="fruit" value="2>" /> Apples <>
<input type="radio" name="fruit" value="3" /> Pears
>>> print field.radio_group('fruit', align='vert')
<input type="radio" name="fruit" value="1" checked="checked" /> Bananas<br />
<input type="radio" name="fruit" value="2>" /> Apples <><br />
<input type="radio" name="fruit" value="3" /> Pears<br />
>>> print field.radio_group('fruit', align='table', cols=2)
<table border="0" width="100%" cellpadding="0" cellspacing="0">
<tr>
<td><input type="radio" name="fruit" value="1" checked="checked" /> Bananas</td>
<td> </td>
<td><input type="radio" name="fruit" value="2>" /> Apples <></td>
<td> </td>
</tr>
<tr>
<td><input type="radio" name="fruit" value="3" /> Pears</td>
<td> </td>
<td></td>
<td> </td>
</tr>
</table>
If no options are present in the ``Field`` constructor, none will be
rendered:
>>> field = Field(
... value={u'fruit': u'1'},
... option={}
... )
>>> field.radio_group('fruit')
Markup(u'')
>>> field.radio_group('fruit', align='table')
Markup(u'')
Here's an example with nested variables:
>>> field = Field(
... value={u'fruit[0].id': u'1'},
... option={
... u'fruit[*].id': [
... (u'1', u'Bananas'),
... (u'2>', u'Apples <>'),
... (u'3', u'Pears'),
... ]
... }
... )
>>> print field.radio_group('fruit[0].id')
<input type="radio" name="fruit[0].id" value="1" checked="checked" /> Bananas
<input type="radio" name="fruit[1].id" value="2>" /> Apples <>
<input type="radio" name="fruit[2].id" value="3" /> Pears
"""
if '.' in name or '[' in name:
parts = name.split('.')
name_ = '['.join('.'.join(parts[:-1]).split('[')[:-1])
sub_name = parts[-1]
real_option = _get_option(self, option, name_, sub_name=sub_name)
else:
name_ = name
real_option = self.option.get(name, [])
if self.value.get(name, []):
selected_values = [self.value[name]]
else:
selected_values = []
return group(
name_,
selected_values,
real_option,
'radio',
align,
cols,
sub_name
)
#
# Multi-valued fields
#
def combo(self, name, attributes=None, sub_name=None, get_option_attributes=None):
"""\
Create a multi-valued <select> field
>>> field = Field(
... value={u'fruit[0].id': u'1', u'fruit[1].id': u'3'},
... option={
... u'fruit[*].id': [
... (u'1', u'Bananas'),
... (u'2>', u'Apples <>'),
... (u'3', u'Pears'),
... ]}
... )
>>> print field.combo('fruit', sub_name='id')
<select multiple="multiple" name="fruit">
<option selected="selected" value="1">Bananas</option>
<option value="2>">Apples <></option>
<option selected="selected" value="3">Pears</option>
</select>
If not options for the select field are specified in the ``Field``
constructor, no options will be rendered:
>>> field = Field(
... value={u'fruit[0].id': u'1', u'fruit[1].id': u'3'},
... option={}
... )
>>> print field.combo('fruit', sub_name='id')
<select multiple="multiple" name="fruit">
</select>
Note that a combo box submits multiple values for the same field name
so is tricky to handle because it doesn't fit a NORM model (see docs for a
definition). Instead it is recommended you use a multi-value autocomplete field
if there are lots of options or a checkbox group if there aren't too many.
"""
if not sub_name:
raise Exception('No sub_name specified')
selected_values = []
for k, v in self.value.items():
if k.startswith(name+'[') and k.endswith('].'+sub_name):
selected_values.append(v)
return _select(
selected_values,
#self.option.get(name, []),
_get_option(self, None, name, sub_name),
True,
name,
attributes,
get_option_attributes,
self,
)
def checkbox_group(self, name, align='horiz', cols=4, sub_name=None):
"""Check Box Group Field.
``align``
can be ``'horiz'`` (default), ``'vert'`` or ``table``. If table layout is
chosen then you can also use the ``cols`` argument to specify the number
of columns in the table, the default is 4.
Examples (deliberately including some '>' characters to check they are properly escaped)
If no options are present in the ``Field`` constructor, none will be
rendered:
>>> field = Field(
... value={
... u'fruit[0].id': u'1',
... u'fruit[1].id': u'3',
... },
... option={}
... )
>>> field.checkbox_group('fruit', sub_name='id')
Markup(u'')
Let's have some values:
>>> field = Field(
... value={u'fruit[0].id': u'1', u'fruit[1].id': u'3'},
... option={
... u'fruit[*].id': [
... (u'1', u'Bananas'),
... (u'2>', u'Apples <>'),
... (u'3', u'Pears'),
... ]
... }
... )
>>> print field.checkbox_group('fruit', sub_name='id')
<input type="checkbox" name="fruit[0].id" value="1" checked="checked" /> Bananas
<input type="checkbox" name="fruit[1].id" value="2>" /> Apples <>
<input type="checkbox" name="fruit[2].id" value="3" checked="checked" /> Pears
>>> print field.checkbox_group('fruit', sub_name='id', align='vert')
<input type="checkbox" name="fruit[0].id" value="1" checked="checked" /> Bananas<br />
<input type="checkbox" name="fruit[1].id" value="2>" /> Apples <><br />
<input type="checkbox" name="fruit[2].id" value="3" checked="checked" /> Pears<br />
>>> print field.checkbox_group('fruit', sub_name='id', align='table', cols=2)
<table border="0" width="100%" cellpadding="0" cellspacing="0">
<tr>
<td><input type="checkbox" name="fruit[0].id" value="1" checked="checked" /> Bananas</td>
<td> </td>
<td><input type="checkbox" name="fruit[1].id" value="2>" /> Apples <></td>
<td> </td>
</tr>
<tr>
<td><input type="checkbox" name="fruit[2].id" value="3" checked="checked" /> Pears</td>
<td> </td>
<td></td>
<td> </td>
</tr>
</table>
This also works with more deeply nested fields:
>>> field = Field(
... value={
... u'person[0].fruit[0].id': u'1',
... u'person[0].fruit[1].id': u'3',
... },
... option={
... u'person[*].fruit[*].id': [
... (u'1', u'Bananas'),
... (u'2>', u'Apples <>'),
... (u'3', u'Pears'),
... ]
... }
... )
>>> print field.checkbox_group('person[0].fruit', sub_name='id')
<input type="checkbox" name="person[0].fruit[0].id" value="1" checked="checked" /> Bananas
<input type="checkbox" name="person[0].fruit[1].id" value="2>" /> Apples <>
<input type="checkbox" name="person[0].fruit[2].id" value="3" checked="checked" /> Pears
"""
if sub_name is None:
raise Exception('Expected a sub_name argument')
if name.endswith(']') and sub_name:
raise Exception('The name should not end with %r when using sub_name'%name[name.rfind('['):])
# Format the selected values into the correct flattened structure
if sub_name:
selected_values = []
for k, v in self.value.items():
if k.startswith(name+'[') and k.endswith('].'+sub_name):
selected_values.append(v)
else:
selected_values = self.value.get(name) or []
return group(
name,
selected_values,
_get_option(self, None, name, sub_name),
'checkbox',
align,
cols,
sub_name
)
def _get_option(form, option, name, sub_name=None):
if option is None:
if not sub_name:
real_options = form.option.get(name, [])
else:
# First see if there is an exact match for this key
real_options = None
for option in form.option:
if option == name+'.'+sub_name:
real_options = form.option[option]
# Otherwise treat all the keys as regexes and merge the options
# of any matching keys
found = []
if real_options is None:
for option in form.option:
key = name+'.'+sub_name
match = re.match(option.replace('[', '\[').replace(']', '\]'), key)
if match is None:
if found:
raise Exception('The option keys %r and %r both match this checkbox group %r'%(found[0], option, key))
else:
found.append(option)
if found:
real_options = form.option[found[0]]
else:
real_options = option
return real_options or []
#
# Layout Methods
#
def _ensure_flat_and_set(self, name, value):
if value is None:
setattr(self, name, {})
else:
if not isinstance(value, dict):
raise Exception('Expected the %s argument to be a dictionary, not %s'%(name, type(value)))
for k in value:
if not isinstance(value[k], unicode):
try:
value[k] = unicode(value[k])
except Exception, e:
raise Exception(
'Values of the %r dict must always be a unicode '
'string, the key %r is %r, type %r and could not '
'be automatically converted to unicode because of '
'the following error: %s'%(
name,
k,
value[k],
type(value[k]),
e,
)
)
setattr(self, name, value)
class Form(Field):
def __init__(
self,
value=None,
option=None,
checked=None,
error=None,
label=None,
):
Field.__init__(self, value, option, checked)
_ensure_flat_and_set(self, 'error', error)
_ensure_flat_and_set(self, 'label', label)
#
# Form Methods
#
def start(self, action="", method="post", enctype=None, attributes=None):
"""\
Open a form tag which submits via the POST method. You must close the
form yourself.
``action``
The URL the form will submit to. Defaults to ``''`` so that the
form submits to the current URL
``method``
Can be ``post`` or ``get`` and affects the HTTP method used to
submit the form.
``enctype``
The encoding type, only usually set if your form contains fields
for uploading a file.
``attributes``
A dictionary containing other HTML attributes (apart from
``action``, ``method`` and ``enctype``)
Here are some examples:
>>> from formbuild import Form
>>> form = Form()
>>> print form.start("/submit")
<form action="/submit" method="post">
>>> print form.start("/submit", method="get")
<form action="/submit" method="get">
If your form contains file fields you must use ``method='post`` (the
default) and also set the ``enctype`` attribute to contain the value
``"multipart/form-data"`` otherwise your browser will submit the
filename instead of the file content. Here's an example:
>>> print form.start(
... "/submit",
... "post",
... enctype="multipart/form-data",
... )
<form action="/submit" method="post" enctype="multipart/form-data">
"""
attributes = check_attributes(attributes, ['method', 'enctype', 'action'])
if method.lower() in ['post', 'get']:
attributes['method'] = method
if enctype is not None:
attributes['enctype'] = enctype
attributes["action"] = action
return self.builder.html_open('form', False, attributes or {})
def end(self, hidden_field_names=None):
"""\
End a form, adding hidden fields for any values with names in the
``hidden_field_names`` list.
>>> form = Form()
>>> print form.end()
</form>
>>> form = Form(value={'firstname': u'James', 'surname': u'Gardner'})
>>> print form.end(hidden_field_names=['firstname', 'surname'])
<input type="hidden" name="firstname" value="James" />
<input type="hidden" name="surname" value="Gardner" />
</form>
"""
if hidden_field_names:
return Markup('\n'.join([
'<input type="hidden" name="'+field+'" value="'+self.value.get(field, '')+'" />' for field in hidden_field_names
])+u'\n</form>')
else:
return Markup(u'</form>')
#
# Fieldset methods
#
def fieldset_start(self, legend=None, name=None):
"""\
>>> form = Form(error=dict(person=u"This is an error message"))
>>> print form.fieldset_start(),
<fieldset>
>>> print form.fieldset_end(),
</fieldset>
>>> print form.fieldset_start(u'People'),
<fieldset>
<legend>People</legend>
>>> print form.fieldset_start(u'People', u'person'),
<fieldset>
<legend>People</legend>
<span class="error">This is an error message</span>
"""
html = Markup(u'<fieldset>\n')
if legend is not None:
html += self.builder.html_open(u'legend', False, dict())+legend+self.builder.html_close(u'legend')
if name and self.error.get(name) is not None:
html += Markup('\n<span class="error">')+self.error.get(name)+Markup('</span>')
return html
def fieldset_end(self):
"""\
>>> form = Form()
>>> print form.fieldset_end(),
</fieldset>
"""
return Markup(u'</fieldset>\n')
def _get_field(self, name, type, args):
if type in ['checkbox_group', 'radio_group']:
field_html = getattr(self, type)(
name,
**(args or {})
)
else:
field_html = getattr(self, type)(
name,
**(args or {})
)
return field_html
def field(
self,
name,
type,
label='',
error='',
required=False,
field_desc='',
field_pre='',
args=None,
colon=True,
required_position='before',
):
"""\
Generate a field with a label and any error.
"""
if required and required_position not in ['before', 'after']:
raise Exception("The required_position argument can either be 'before' or 'after', not %r"%required_position)
html = Markup('')
if args and 'attributes' in args and 'id' in args['attributes']:
html += self.builder.html_open('label', close=False, attributes={'for': args['attributes']['id']})
else:
html += self.builder.html_open('label', close=False)
if required and required_position=='before':
html += '*'
html += label or self.label.get(name, '')
if required and required_position=='after':
html += '*'
if colon:
html += ':'
html += self.builder.html_close('label')
if error or self.error.get(name):
html += Markup('<span class="error">%s</span>')%(error or self.error.get(name))
if field_pre:
html += field_pre + Markup('<br />')
html += self._get_field(name, type, args)+u'\n'
if field_desc:
html += Markup('<br />') + field_desc
html += Markup('<br />')
return html
def action_bar(self, actions):
"""\
Enter some HTML into the form layout starting at the same level as the
fields.
This is useful for generating an action bar containing submit buttons.
``actions``
A ``Markup()`` object representing the HTML for the actions
>>> form = Form()
>>> print form.action_bar(
... Markup('\\n'.join([
... form.submit('submit', '< Back'),
... form.submit('submit', 'Forward >')
... ]))
... ),
<input type="submit" name="submit" value="< Back" />
<input type="submit" name="submit" value="Forward >" />
"""
return Markup(u'').join(actions)+Markup(u'\n')
class TableForm(Form):
def __init__(
self,
value=None,
option=None,
checked=None,
error=None,
label=None,
table_class='formbuild'
):
self.table_class = table_class
Form.__init__(self, value, option, checked, error, label)
def start_layout(self, table_class=None):
"""\
Start a layout without adding the form tag
>>> form=TableForm()
>>> print form.start_layout()
<table>
>>> print form.start_layout(table_class='form')
<table class="form">
"""
if table_class is None:
return Markup(u'<table>')
else:
return Markup(u'<table class="%s">')%(table_class)
def end_layout(self):
"""\
End a layout without adding the end form tag
>>> form = TableForm()
>>> print form.end_layout()
</table>
"""
return Markup(u'</table>')
def start_with_layout(
self,
action='',
method="post",
enctype=None,
table_class=None,
attributes=None
):
"""\
Start a form the way you would with ``start_form()`` but include the
HTML necessary for the use of the ``fields()`` helper.
>>> form=TableForm()
>>> print form.start_with_layout('/action', method='post')
<form action="/action" method="post"><table class="formbuild">
>>> print form.start_with_layout('/action', method='post', table_class='form')
<form action="/action" method="post"><table class="form">
"""
attributes = check_attributes(attributes, ['method', 'enctype', 'action'])
if method.lower() in ['post', 'get']:
attributes['method'] = method
if enctype is not None:
attributes['enctype'] = enctype
attributes["action"] = action
html = self.builder.html_open('form', False, attributes or {})
html += self.start_layout(table_class or self.table_class)
return html
def end_with_layout(self, hidden_field_names=None):
"""\
End a form started with ``start_with_layout()``
>>> form = TableForm()
>>> print form.end_with_layout()
</table></form>
"""
html = ''
html += '</table>'
if hidden_field_names:
html += '\n'.join([
'<input type="hidden" name="'+field+'" value="'+self.value.get(field, '')+'" />' for field in hidden_field_names
])+u'\n</form>'
else:
html += u'</form>'
# XXX Really bad, not guaranteed safe
return Markup(html)
def action_bar(self, escaped_html):
"""\
Enter some HTML into the form layout starting at the same level as the
fields.
This is useful for generating an action bar containing submit buttons.
``escaped_html``
An HTML string, properly escaped, containing all the fields to
appear in the action bar
>>> form = TableForm()
>>> print form.action_bar(
... '\\n '.join([
... form.submit('submit', '< Back'),
... form.submit('submit', 'Forward >')
... ])
... )
<tr>
<td></td>
<td colspan="2">
<input type="submit" name="submit" value="< Back" />
<input type="submit" name="submit" value="Forward >" />
</td>
</tr>
"""
if isinstance(escaped_html, (list, tuple)):
escaped_html = '\n'.join(escaped_html)
# XXX This is really bad, not really guaranteed escaped
return Markup(u'<tr>\n <td></td>\n <td colspan="2">\n %s\n </td>\n</tr>'%(
escaped_html,
))
def row(self, escaped_html):
"""\
Enter some HTML into the form layout as a new row.
This is useful for form sections. For example:
>>> form = TableForm()
>>> print form.row('<h2>Extra Fields</h2>')
<tr><td colspan="3"><h2>Extra Fields</h2></td></tr>
"""
return '<tr><td colspan="3">'+escaped_html+'</td></tr>'
def field(
self,
name,
type,
label='',
required=False,
label_desc='',
field_desc='',
help='',
field_pre='',
attributes=None,
args=None,
side=True,
colon=True,
required_position='before',
):
"""\
Format a field with a label.
``label``
The label for the field
``field``
The HTML representing the field, wrapped in ``literal()``
``required``
Can be ``True`` or ``False`` depending on whether the label
should be formatted as required or not. By default required
fields have an asterix.
``label_desc``
Any text to appear underneath the label, level with ``field_desc``
``field_desc``
Any text to appear underneath the field
``help``
Any HTML or JavaScript to appear imediately to the right of the
field which could be used to implement a help system on the form
``field_pre``
Any HTML to appear immediately above the field.
``side``
Whether the label goes at the side of the field or above it.
Defaults to ``True``, putting the label at the side.
TIP: For future compatibility, always specify arguments explicitly
and do not rely on their order in the function definition.
Here are some examples:
>>> form = TableForm(value=dict(test=u''))
>>> print form.start_with_layout()
<form action="" method="post"><table class="formbuild">
>>> print form.field('test', 'text', 'email >', required=True)
<tr class="field">
<td class="label" valign="top" height="10">
<span class="required">*</span><label for="test">email >:</label>
</td>
<td class="field" valign="top">
<input type="text" name="test" value="" />
</td>
<td rowspan="2" valign="top"></td>
</tr>
>>> print form.field(
... 'test',
... 'text',
... label='email >',
... label_desc='including the @ sign',
... field_desc='Please type your email carefully',
... help = 'No help available for this field',
... required=True,
... )
...
<tr class="field">
<td class="label" valign="top" height="10">
<span class="required">*</span><label for="test">email >:</label>
</td>
<td class="field" valign="top">
<input type="text" name="test" value="" />
</td>
<td rowspan="2" valign="top">No help available for this field
</td>
</tr>
<tr class="description">
<td class="label_desc" valign="top">
<span class="small">including the @ sign</span>
</td>
<td class="field_desc" valign="top">
<span class="small">Please type your email carefully</span>
</td>
</tr>
>>> print form.end_with_layout()
</table></form>
An appropriate stylesheet to use to style forms generated with field() when
the table class is specified as "formbuild" would be::
table.formbuild span.error-message, table.formbuild div.error, table.formbuild span.required {
font-weight: bold;
color: #f00;
}
table.formbuild span.small {
font-size: 85%;
}
table.formbuild form {
margin-top: 20px;
}
table.formbuild form table td {
padding-bottom: 3px;
}
"""
field_html = self._get_field(name, type, args)
error = self.error.get(name)
error_html = error and u'<div class="error">'+escape(error)+'</div>\n' or u''
if side == True:
html = """\
<tr class="field">
<td class="label" valign="top" height="10">
%(required_html_before)s<label for="%(label_for)s">%(label_html)s%(colon)s</label>%(required_html_after)s
</td>
<td class="field" valign="top">
%(field_pre_html)s%(error_html)s%(field_html)s
</td>
<td rowspan="2" valign="top">%(help_html)s</td>
</tr>""" %dict(
required_html_after = required_position == 'after' and (required and u'<span class="required">*</span>' \
or u'<span style="visibility: hidden">*</span>') or '',
required_html_before = required_position == 'before' and (required and u'<span class="required">*</span>' \
or u'<span style="visibility: hidden">*</span>') or '',
label_for = name,
label_html = escape(label),
error_html = error_html,
field_html = field_html,
help_html = help and escape(help)+'\n ' or '',
field_pre_html = field_pre and escape(field_pre) or '',
colon = colon and ":" or "",
)
if label_desc or field_desc:
html += """
<tr class="description">
<td class="label_desc" valign="top">
<span class="small">%(label_desc_html)s</span>
</td>
<td class="field_desc" valign="top">
<span class="small">%(field_desc_html)s</span>
</td>
</tr>""" %dict(
label_desc_html = label_desc,
field_desc_html = field_desc,
)
else:
html = """\
<tr><td></td>
<td valign="top">
<table border="0">
<tr>
<td><label for="%(label_for)s">%(label_html)s%(colon)s</label></td><td>%(required_html)s</td><td><span class="small label_desc">%(label_desc_html)s</span></td>
</tr>
</table>
</td>
<td valign="top" rowspan="3">%(help_html)s</td>
</tr>
<tr><td></td>
<td valign="top">%(field_pre_html)s%(error_html)s%(field_html)s</td>
</tr>
<tr><td></td>
<td class="small field_desc" valign="top">%(field_desc_html)s</td>
</tr>"""% dict(
label_for = name,
label_html = escape(label),
help_html = help and escape(help)+'\n ' or '',
required_html = required and u'<span class="required">*</span>' \
or u'<span style="visibility: hidden">*</span>',
error_html = error_html,
field_html = field_html,
field_pre_html = field_pre and escape(field_pre) or '',
label_desc_html = label_desc,
field_desc_html = field_desc,
colon = colon and ":" or "",
)
# XXX This is completely wrong, we haven't tested it
return Markup(html)
#def start_fieldset(self, legend=None, name=None):
# """\
# >>> form = Form(error=dict(person="This is an error"))
# >>> print form.start_fieldset()
# <fieldset>
# >>> print form.end_fieldset()
# </fieldset>
# >>> print form.start_fieldset(u'People')
# <fieldset>
# <legend>People</legend>
# >>> print form.start_fieldset(u'People', 'person')
# <fieldset>
# <legend>People</legend>
# <p class="error">This is an error message</p>
# """
# html = u'<fieldset>'
# if legend is not None:
# html += '\n'+self.builder.html_open(u'legend', False, dict())+legend+self.builder.html_close(u'legend')
# if name and self.error.get(name) is not None:
# html += '\n<p class="error">'+self.error.get(name)+'</p>'
# return html
#def end_fieldset(self, name):
# """\
# >>> form = Form()
# >>> print form.end_fieldset()
# </fieldset>
# """
# return u'</fieldset>' | PypiClean |
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/public/VAADIN/widgetsets/org.muntiacus.MuntjacWidgetSet/mode/clojure/clojure.js | CodeMirror.defineMode("clojure", function (config, mode) {
var BUILTIN = "builtin", COMMENT = "comment", STRING = "string", TAG = "tag",
ATOM = "atom", NUMBER = "number", BRACKET = "bracket", KEYWORD="keyword";
var INDENT_WORD_SKIP = 2, KEYWORDS_SKIP = 1;
function makeKeywords(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var atoms = makeKeywords("true false nil");
var keywords = makeKeywords(
// Control structures
"defn defn- def def- defonce defmulti defmethod defmacro defstruct deftype defprotocol defrecord deftest slice defalias defhinted defmacro- defn-memo defnk defnk defonce- defunbound defunbound- defvar defvar- let letfn do case cond condp for loop recur when when-not when-let when-first if if-let if-not . .. -> ->> doto and or dosync doseq dotimes dorun doall load import unimport ns in-ns refer try catch finally throw with-open with-local-vars binding gen-class gen-and-load-class gen-and-save-class handler-case handle" +
// Built-ins
"* *1 *2 *3 *agent* *allow-unresolved-vars* *assert *clojure-version* *command-line-args* *compile-files* *compile-path* *e *err* *file* *flush-on-newline* *in* *macro-meta* *math-context* *ns* *out* *print-dup* *print-length* *print-level* *print-meta* *print-readably* *read-eval* *source-path* *use-context-classloader* *warn-on-reflection* + - / < <= = == > >= accessor aclone agent agent-errors aget alength alias all-ns alter alter-meta! alter-var-root amap ancestors and apply areduce array-map aset aset-boolean aset-byte aset-char aset-double aset-float aset-int aset-long aset-short assert assoc assoc! assoc-in associative? atom await await-for await1 bases bean bigdec bigint binding bit-and bit-and-not bit-clear bit-flip bit-not bit-or bit-set bit-shift-left bit-shift-right bit-test bit-xor boolean boolean-array booleans bound-fn bound-fn* butlast byte byte-array bytes case cast char char-array char-escape-string char-name-string char? chars chunk chunk-append chunk-buffer chunk-cons chunk-first chunk-next chunk-rest chunked-seq? class class? clear-agent-errors clojure-version coll? comment commute comp comparator compare compare-and-set! compile complement concat cond condp conj conj! cons constantly construct-proxy contains? count counted? create-ns create-struct cycle dec decimal? declare definline defmacro defmethod defmulti defn defn- defonce defstruct delay delay? deliver deref derive descendants destructure disj disj! dissoc dissoc! distinct distinct? doall doc dorun doseq dosync dotimes doto double double-array doubles drop drop-last drop-while empty empty? ensure enumeration-seq eval even? every? extend extend-protocol extend-type extends? extenders false? ffirst file-seq filter find find-doc find-ns find-var first float float-array float? floats flush fn fn? fnext for force format future future-call future-cancel future-cancelled? future-done? future? gen-class gen-interface gensym get get-in get-method get-proxy-class get-thread-bindings get-validator hash hash-map hash-set identical? identity if-let if-not ifn? import in-ns inc init-proxy instance? int int-array integer? interleave intern interpose into into-array ints io! isa? iterate iterator-seq juxt key keys keyword keyword? last lazy-cat lazy-seq let letfn line-seq list list* list? load load-file load-reader load-string loaded-libs locking long long-array longs loop macroexpand macroexpand-1 make-array make-hierarchy map map? mapcat max max-key memfn memoize merge merge-with meta method-sig methods min min-key mod name namespace neg? newline next nfirst nil? nnext not not-any? not-empty not-every? not= ns ns-aliases ns-imports ns-interns ns-map ns-name ns-publics ns-refers ns-resolve ns-unalias ns-unmap nth nthnext num number? odd? or parents partial partition pcalls peek persistent! pmap pop pop! pop-thread-bindings pos? pr pr-str prefer-method prefers primitives-classnames print print-ctor print-doc print-dup print-method print-namespace-doc print-simple print-special-doc print-str printf println println-str prn prn-str promise proxy proxy-call-with-super proxy-mappings proxy-name proxy-super push-thread-bindings pvalues quot rand rand-int range ratio? rational? rationalize re-find re-groups re-matcher re-matches re-pattern re-seq read read-line read-string reify reduce ref ref-history-count ref-max-history ref-min-history ref-set refer refer-clojure release-pending-sends rem remove remove-method remove-ns repeat repeatedly replace replicate require reset! reset-meta! resolve rest resultset-seq reverse reversible? rseq rsubseq satisfies? second select-keys send send-off seq seq? seque sequence sequential? set set-validator! set? short short-array shorts shutdown-agents slurp some sort sort-by sorted-map sorted-map-by sorted-set sorted-set-by sorted? special-form-anchor special-symbol? split-at split-with str stream? string? struct struct-map subs subseq subvec supers swap! symbol symbol? sync syntax-symbol-anchor take take-last take-nth take-while test the-ns time to-array to-array-2d trampoline transient tree-seq true? type unchecked-add unchecked-dec unchecked-divide unchecked-inc unchecked-multiply unchecked-negate unchecked-remainder unchecked-subtract underive unquote unquote-splicing update-in update-proxy use val vals var-get var-set var? vary-meta vec vector vector? when when-first when-let when-not while with-bindings with-bindings* with-in-str with-loading-context with-local-vars with-meta with-open with-out-str with-precision xml-seq");
var indentKeys = makeKeywords(
// Built-ins
"ns fn def defn defmethod bound-fn if if-not case condp when while when-not when-first do future comment doto locking proxy with-open with-precision reify deftype defrecord defprotocol extend extend-protocol extend-type try catch" +
// Binding forms
"let letfn binding loop for doseq dotimes when-let if-let" +
// Data structures
"defstruct struct-map assoc" +
// clojure.test
"testing deftest" +
// contrib
"handler-case handle dotrace deftrace");
var tests = {
digit: /\d/,
digit_or_colon: /[\d:]/,
hex: /[0-9a-fA-F]/,
sign: /[+-]/,
exponent: /[eE]/,
keyword_char: /[^\s\(\[\;\)\]]/,
basic: /[\w\$_\-]/,
lang_keyword: /[\w*+!\-_?:\/]/
};
function stateStack(indent, type, prev) { // represents a state stack object
this.indent = indent;
this.type = type;
this.prev = prev;
}
function pushStack(state, indent, type) {
state.indentStack = new stateStack(indent, type, state.indentStack);
}
function popStack(state) {
state.indentStack = state.indentStack.prev;
}
function isNumber(ch, stream){
// hex
if ( ch === '0' && 'x' == stream.peek().toLowerCase() ) {
stream.eat('x');
stream.eatWhile(tests.hex);
return true;
}
// leading sign
if ( ch == '+' || ch == '-' ) {
stream.eat(tests.sign);
ch = stream.next();
}
if ( tests.digit.test(ch) ) {
stream.eat(ch);
stream.eatWhile(tests.digit);
if ( '.' == stream.peek() ) {
stream.eat('.');
stream.eatWhile(tests.digit);
}
if ( 'e' == stream.peek().toLowerCase() ) {
stream.eat(tests.exponent);
stream.eat(tests.sign);
stream.eatWhile(tests.digit);
}
return true;
}
return false;
}
return {
startState: function () {
return {
indentStack: null,
indentation: 0,
mode: false,
};
},
token: function (stream, state) {
if (state.indentStack == null && stream.sol()) {
// update indentation, but only if indentStack is empty
state.indentation = stream.indentation();
}
// skip spaces
if (stream.eatSpace()) {
return null;
}
var returnType = null;
switch(state.mode){
case "string": // multi-line string parsing mode
var next, escaped = false;
while ((next = stream.next()) != null) {
if (next == "\"" && !escaped) {
state.mode = false;
break;
}
escaped = !escaped && next == "\\";
}
returnType = STRING; // continue on in string mode
break;
default: // default parsing mode
var ch = stream.next();
if (ch == "\"") {
state.mode = "string";
returnType = STRING;
} else if (ch == "'" && !( tests.digit_or_colon.test(stream.peek()) )) {
returnType = ATOM;
} else if (ch == ";") { // comment
stream.skipToEnd(); // rest of the line is a comment
returnType = COMMENT;
} else if (isNumber(ch,stream)){
returnType = NUMBER;
} else if (ch == "(" || ch == "[") {
var keyWord = ''; var indentTemp = stream.column();
/**
Either
(indent-word ..
(non-indent-word ..
(;something else, bracket, etc.
*/
while ((letter = stream.eat(tests.keyword_char)) != null) {
keyWord += letter;
}
if (keyWord.length > 0 && indentKeys.propertyIsEnumerable(keyWord)) { // indent-word
pushStack(state, indentTemp + INDENT_WORD_SKIP, ch);
} else { // non-indent word
// we continue eating the spaces
stream.eatSpace();
if (stream.eol() || stream.peek() == ";") {
// nothing significant after
// we restart indentation 1 space after
pushStack(state, indentTemp + 1, ch);
} else {
pushStack(state, indentTemp + stream.current().length, ch); // else we match
}
}
stream.backUp(stream.current().length - 1); // undo all the eating
returnType = BRACKET;
} else if (ch == ")" || ch == "]") {
returnType = BRACKET;
if (state.indentStack != null && state.indentStack.type == (ch == ")" ? "(" : "[")) {
popStack(state);
}
} else if ( ch == ":" ) {
stream.eatWhile(tests.lang_keyword);
return TAG;
} else {
stream.eatWhile(tests.basic);
if (keywords && keywords.propertyIsEnumerable(stream.current())) {
returnType = BUILTIN;
} else if ( atoms && atoms.propertyIsEnumerable(stream.current()) ) {
returnType = ATOM;
} else returnType = null;
}
}
return returnType;
},
indent: function (state, textAfter) {
if (state.indentStack == null) return state.indentation;
return state.indentStack.indent;
}
};
});
CodeMirror.defineMIME("text/x-clojure", "clojure"); | PypiClean |
/EGL-ML-CHALLENGE-0.1.0.tar.gz/EGL-ML-CHALLENGE-0.1.0/README.md | # ML - Engineering Challenge
## Build a machine learning system
Welcome to the endeavour machine learning challenge! This challenge is designed to test a large variety of skills that a machine learning engineer would use in their day to day work. There are no restrictions in terms of technology required for this challenge other than the use of Python 3. You are free to use whichever technology or cloud provider you like. It's important to note that everyone has strong points and weak points. If you are strong in one or more areas, try to make that area shine.
The challenge description is as follows:
#### 0. Take the code provided and upload it to a git repository of your choice.
After you complete the challenge, please add our team members as viewers to your repo.
#### 1. Please provide a high level overview of your systems design and its key components.
This could be a one pager, a readme.md or an architecture diagram. We will leave the implementation up to you.
#### 2. Create a simple linear regression model.
You will have to fill in the gaps in the `SimpleLinearRegression` class so that the code will run successfully.
The following functions need to be filled:
- `__loss`: This function defines the loss function of your choice.
- `__sgd`: We will use the Stochastic Gradient Descent Algorithm to optimise the slope and the intercept of our linear function. There are many resources online about SGD, However
the most important formulas are :

Where `n`is the number of sample in the training dataset.
Do your best to vectorize the formulas.
- `__predict`our linear function to predict the outcome. The function of a simple line is defined as `y= wX + b`
We have provided the benchmark code `benchmark.py`. Execute it and you should get the Coefficient of determination around `0.42`.
A good implementation should return about the same Coefficient of determination or slightly higher. During the interview we could explore the time and memory complexity of your code.
**PS: If you are struggling implementing the above, consider using scikit-learn to progress to the next stages (but this is not encouraged).**
3. Update `main.py` to make it an API for inference. Make the API invokable from a http request. The choice of web framework is up to you.
The API should have two endpoints:
- `POST /stream` : which takes a payload of one record and return the prediction for that record.
- `POST /batch` : which takes an array of multiple records and return an array of predictions
Think about what other features an enterprise machine learning system would have.
#### 4. Package your code into a python package to make it easily installable and testable for developers.
#### 5. Package your code into a container and deploy it to a container registry of your choice.
#### 6. Create a CICD pipeline using the technology of your choice to deploy your code to production.
Think about what stages might be required in a full CICD pipeline. Your code should be invokable from a public URL.
#### 7. Document what componenets an enterprise machine learning system would have if you had the time to add it.
What are some things that are critical to have versus nice to have?
## Assessment Criterion
We are not looking for a highly performant model. The criterion for this exercise is centered on a complete system that works well together and your ability to apply a machine learning inference to a real world use case. The following diagram speaks volumes about the reality of a machine learning engineer.

We are more interested in how your overall system works and the ancillary systems and components that are considered and better yet, implemented. As you complete the challenge, try to think about the following assessment criterion:
- Does your solution work end to end?
- Are there any unit tests or integration tests?
- Has security/monitoring been considered?
- How is your solution documented? Is it easy to build on and for other developers to understand
- How performant is your solution both from a code perspective and a scalability perspective as a service
- Has due consideration been given to what a production ML system would require? This could be interactions or dependencies with other systems.
Good luck & have fun!
| PypiClean |
/GeneXpress-0.0.1.1.tar.gz/GeneXpress-0.0.1.1/AnalysisTools/limma_de.py | from ExpressionTools import pyEset, pyXset
from rpy2.robjects.packages import importr
from pandas import DataFrame
r_base = importr('base')
limma = importr('limma')
stats = importr('stats')
class LimmaDiffEx:
# https://www.bioconductor.org/help/course-materials/2009/BioC2009/labs/limma/limma.pdf
def __init__(self, data, model: DataFrame = None):
if isinstance(data, pyEset.PyEset):
self.data = data.exprs
self.model = data.groups.astype(int)
elif issubclass(type(data), pyXset.Xset):
self.data = data.exprs.to_numpy()
self.model = data.groups.astype(int).T
else:
# some stuff to convert normalized counts and a model into a l
self.data = data
self.model = model.T
# self.model_mat = self.make_model_matrix()
self.lm = self.run_lmfit()
self.contrasts = None
self.contrast_fit = None
self.ebayes = None
def run_lmfit(self):
return limma.lmFit(self.data, self.model)
def run_ebayes(self):
self.ebayes = limma.ebayes(self.contrast_fit)
return self.ebayes
def make_contrasts(self, experimental: list, control: str, comps=None):
if comps is None:
self.contrasts = limma.makeContrasts(*[experiment + "-" + control for experiment in experimental], levels=self.model)
return self.contrasts
else:
self.contrasts = limma.makeContrasts(comps, levels=self.model)
return self.contrasts
def run_contrast_fit(self):
if self.contrasts is None:
return None
return limma.contrasts_fit(self.lm, self.contrasts)
def run_toptable(self, sort_by, n=None, coef=1):
if n is None:
n = len(self.data.index)
tops = limma.topTable(self.ebayes, adjust='fdr', sort_by=sort_by, n=n, coef=coef)
return tops
# def make_model_matrix(self, intercept=True):
# if intercept:
# mat = stats.model_matrix(self.model.to_array())
# else:
# mat = stats.model_matrix(self.model.to_array())
# colnames = r("`colnames<-`")
# mat = colnames(mat, StrVector(self.model.columns.to_array()))
# return mat | PypiClean |
/APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/analysis/spatial.py | from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from numpy import ndarray
from apav.analysis.base import AnalysisBase
from apav.utils import validate
from apav import Roi, RangeCollection, Ion
from apav.core.histogram import histogram2d_binwidth
from apav.core.multipleevent import get_mass_indices
from apav.core.isotopic import Element
from scipy.ndimage import gaussian_filter
import numpy as n
import multiprocessing as mp
from apav.analysis.grid_transfer import transfer as _transfer
def ion_transfer(X: n.ndarray, Y: n.ndarray, Z: n.ndarray, pos: n.ndarray, stddev3: Number) -> ndarray:
"""
Transfer an array of ion positions to a binned grid.
:param X: 3D array of x-coordinates of grid
:param Y: 3D array of y-coordinates of grid
:param Y: 3D array of z-coordinates of grid
:param pos: 2D array of positions
:param stddev3: 3sigma standard deviation of gaussian distribution
:return: 3D array of counts
"""
if len(pos.shape) != 2:
raise ValueError("Positions must be a 2D array")
if pos.size == 0:
raise ValueError("At least one ion position must be provided")
if any(len(i.shape) != 3 for i in [X, Y, Z]):
raise ValueError("All grid coordinate arrays must be three-dimensional")
validate.positive_number(stddev3)
if n.isclose(stddev3, 0):
binx = X[1, 0, 0] - X[0, 0, 0]
biny = Y[0, 1, 0] - Y[0, 0, 0]
binz = Z[0, 0, 1] - Z[0, 0, 0]
x_edge = n.concatenate([X[:, 0, 0] - binx / 2, [X[-1, 0, 0] + binx / 2]])
y_edge = n.concatenate([Y[0, :, 0] - biny / 2, [Y[0, -1, 0] + biny / 2]])
z_edge = n.concatenate([Z[0, 0, :] - binz / 2, [Z[0, 0, -1] + binz / 2]])
counts, _ = n.histogramdd(pos, bins=(x_edge, y_edge, z_edge))
return counts
else:
return _transfer(
X.astype(n.double), Y.astype(n.double), Z.astype(n.double), pos.astype(n.double), float(stddev3)
)
def make_coordinate_grids(
extents: Sequence[Tuple[Number, Number]], bin_width: Union[Sequence[Number], Number], edges=False
) -> Tuple[ndarray, ndarray, ndarray]:
"""
Generate 3D x/y/z coordinate arrays for indexing into compositional grids
:param extents: The x/y/z extent to generate the grids for
:param bin_width: The bin width of each bin, a single number or sequence of numbers for each dimension
:param edges: Whether the coordinates represent the edges of the bins or centers
"""
assert len(extents) == 3
for i in extents:
validate.interval(i)
assert all(len(i) == 2 for i in extents)
if hasattr(bin_width, "__iter__"):
assert len(bin_width) == 3
if isinstance(bin_width, (float, int)):
bin_width = [
bin_width,
] * 3
bin_width = [float(i) for i in bin_width]
validate.all_positive_nonzero(bin_width)
ext_x, ext_y, ext_z = extents
dx = n.abs(n.diff(ext_x)[0])
dy = n.abs(n.diff(ext_y)[0])
dz = n.abs(n.diff(ext_z)[0])
nx = int(n.ceil(dx / bin_width[0]))
ny = int(n.ceil(dy / bin_width[1]))
nz = int(n.ceil(dz / bin_width[2]))
x = n.array([ext_x[0] + i * bin_width[0] for i in range(nx)])
y = n.array([ext_y[0] + i * bin_width[1] for i in range(ny)])
z = n.array([ext_z[0] + i * bin_width[2] for i in range(nz)])
if x[-1] % 1 == 0:
x = n.concatenate([x, x[-1] + [bin_width[0]]])
y = n.concatenate([y, y[-1] + [bin_width[1]]])
z = n.concatenate([z, z[-1] + [bin_width[2]]])
if edges is True:
x -= bin_width[0] / 2
y -= bin_width[1] / 2
z -= bin_width[2] / 2
x = n.concatenate([x, [x[-1] + bin_width[0]]])
y = n.concatenate([y, [y[-1] + bin_width[1]]])
z = n.concatenate([z, [z[-1] + bin_width[2]]])
return n.meshgrid(x, y, z, indexing="ij")
class RangedGrid(AnalysisBase):
"""
Compute the ionic and elemental composition spatially distributed among a structured grid
"""
def __init__(
self,
roi: Roi,
ranges: RangeCollection,
bin_width: Number = 1,
first_pass: bool = True,
delocalization: Union[Number, Sequence[Number]] = n.array([3, 3, 1.5]),
gauss_trunc: Number = 4,
):
"""
:param roi: Parent the RangedGrid is competed on
:param ranges: RangeCollection defining the ranges
:param bin_width: symmetric bin width size
:param first_pass: Whether the first pass delocalization is computed using a gaussian transfer function.
:param delocalization: The delocalization distances (as 3 standard deviations of a normal distribution)
:param gauss_trunc: Number of standard deviations to truncate the gaussian kernel for second pass delocalization
"""
super().__init__(roi)
self._ranges = validate.is_type(ranges, RangeCollection)
self._voxel = float(validate.positive_nonzero_number(bin_width))
if isinstance(delocalization, Real):
self._delocalization = n.array([delocalization])
else:
self._delocalization = n.array(delocalization)
if len(self._delocalization.shape) == 1 and self._delocalization.shape[0] == 1:
self._delocalization = n.ones(3) * self._delocalization[0]
if not all(i > 0 for i in self._delocalization):
raise ValueError("Delocalization distances must be positive and non-zero")
if self._delocalization.shape[0] != 3:
raise ValueError(f"Unexpected delocalization shape, expected 3 got {self._delocalization.shape[0]}")
self._gauss_trunc = validate.positive_nonzero_number(gauss_trunc)
self._X = ndarray([])
self._Y = ndarray([])
self._Z = ndarray([])
self._ion_counts = {}
self._elem_counts_array = ndarray([])
self._elem_frac = {}
self._elem_counts = {}
self._elem_cum_counts = None
self._first_pass = first_pass
self._calculate()
@property
def ranges(self) -> RangeCollection:
"""
The ranges used for ranging the mass spectrum
"""
return self._ranges
@property
def extents(self) -> Tuple[Tuple[float, float], Tuple[float, float], Tuple[float, float]]:
"""
Get the spatial extents (by center positions) of the grids
"""
return (
(self._X.min(), self._X.max()),
(self._Y.min(), self._Y.max()),
(self._Z.min(), self._Z.max()),
)
@property
def first_pass(self) -> bool:
"""
Whether to compute first pass delocalization
"""
return self._first_pass
@property
def centers(self) -> Tuple[ndarray, ndarray, ndarray]:
"""
The center positions of the structured grids
For MxNxP voxels this returns 3 arrays of dimensions: Mx1x1, 1xNx1, 1x1xP
"""
return self._X, self._Y, self._Z
@property
def bin_width(self) -> float:
"""
Bin width of the voxels
"""
return self._voxel
@property
def delocalization(self) -> ndarray:
"""
Amount of smoothing used during the delocalization process
"""
return self._delocalization
@property
def gauss_trunc(self) -> Number:
"""
Where to truncate the gaussian kernel for second pass delocalization
"""
return self._gauss_trunc
@property
def all_ionic_counts(self) -> Dict[Ion, ndarray]:
"""
Get all ionic count grids in a dict
"""
return self._ion_counts
@property
def all_elemental_frac(self) -> Dict[Element, ndarray]:
"""
Get all elemental fraction grids as a dict
"""
return self._elem_frac
@property
def all_elemental_frac_str(self) -> Dict[str, ndarray]:
"""
Get all elemental fraction grids as a dictionary (using elemental symbols)
"""
return {i.symbol: j for i, j in self._elem_frac.items()}
@property
def elemental_counts_total(self) -> Number:
"""
Get the total (sum) of all elemental counts
"""
return self._elem_cum_counts
@property
def elemental_counts_grid(self) -> ndarray:
"""
Get an array of the cumulative elemental counts in each bin
"""
return self._elem_counts_array
def ionic_counts(self, ion: Ion) -> ndarray:
"""
Get a single ionic counts grid
:param ion: The ion of the grid to return
"""
if ion not in self.all_ionic_counts.keys():
raise ValueError("Ion {} does not exist in the RangedGrid".format(ion.hill_formula))
return self.all_ionic_counts[ion]
def elemental_frac(self, element: Union[str, Element]) -> ndarray:
"""
Get a single elemental fraction grid
:param element: the elemental of the grid to return (Element or str)
"""
if isinstance(element, str):
el = None
for i, j in self.all_elemental_frac.items():
if i.symbol == element:
el = i
break
return self.all_elemental_frac[el]
elif isinstance(element, Element):
return self.all_elemental_frac[element]
else:
raise TypeError("Expected elemental symbol string or Element type, got {} instead".format(type(element)))
def _calculate(self):
"""
Compute the ranged grids
"""
dims = self.roi.dimensions
n_voxels = n.ceil(dims / self.bin_width).ravel().astype(int)
dx, dy, dz = self.roi.xyz_extents
range_elems = self.ranges.elements()
self._ion_counts = {i.ion: n.zeros(n_voxels) for i in self.ranges.ranges}
r = self.bin_width / 2
X, Y, Z = make_coordinate_grids(self.roi.xyz_extents, self.bin_width)
self._X = X
self._Y = Y
self._Z = Z
if not self.first_pass:
pass1_3sigma = 0
stddev = self.delocalization / 3
else:
pass1_3sigma = self.bin_width / 2
stddev = n.sqrt((self.delocalization / 3) ** 2 - n.tile(pass1_3sigma / 3, 3) ** 2)
stddev_vox = stddev / self.bin_width
init_counts = []
final_counts = []
def ranged_xyz(rng):
low, up = rng.interval
idx = n.argwhere((self.roi.mass >= low) & (self.roi.mass < up)).ravel()
init_counts.append(idx.shape[0])
return self.roi.xyz[idx].astype(float)
N = len(self.ranges)
nproc = min(N, mp.cpu_count())
if self.first_pass:
result = [ion_transfer(X, Y, Z, ranged_xyz(i), pass1_3sigma) for i in self.ranges]
else:
result = []
for i, rng in enumerate(self.ranges):
coords = ranged_xyz(rng)
counts, _ = n.histogramdd(coords, bins=n_voxels)
result.append(counts)
for i, data in zip(self.ranges, result):
final_counts.append(n.sum(data))
nan = n.count_nonzero(n.isnan(data))
if nan > 0:
raise ArithmeticError(
"NaNs encountered during first pass delocalization, try picking a different bin width"
)
self._ion_counts[i.ion] += gaussian_filter(
data,
sigma=stddev_vox,
# mode="constant",
truncate=self.gauss_trunc,
)
self._elem_frac = {i: 0 for i in range_elems}
self._elem_counts = {i: 0 for i in range_elems}
elem_counts = self._elem_counts
for ion, counts in self._ion_counts.items():
for elem, mult in ion.comp_dict.items():
elem_counts[elem] += mult * counts
self._elem_counts_array = sum(array for array in elem_counts.values())
norm = sum(i for i in elem_counts.values())
self._elem_cum_counts = norm
for key in elem_counts.keys():
ary = elem_counts[key]
self._elem_frac[key] = n.divide(ary, norm, where=ary > 0)
class DensityHistogram(AnalysisBase):
"""
Compute density histograms on an Roi
"""
def __init__(self, roi: Roi, bin_width=0.3, axis="z", multiplicity="all"):
"""
:param roi: region of interest
:param bin_width: width of the bin size in Daltons
:param axis: which axis the histogram should be computed on ("x", "y", or "z")
:param multiplicity: the multiplicity order to compute histogram with
"""
super().__init__(roi)
self.bin_width = validate.positive_nonzero_number(bin_width)
self._multiplicity = validate.multiplicity_any(multiplicity)
if multiplicity != "all":
roi.require_multihit_info()
self._histogram = None
self._histogram_extents = None
self._axis = validate.choice(axis, ("x", "y", "z"))
self._bin_vol = None
self._calculate_histogram()
@property
def multiplicity(self) -> Union[str, int]:
return self._multiplicity
@property
def bin_vol(self) -> Number:
return self._bin_vol
@property
def axis(self) -> str:
return self._axis
@property
def histogram(self) -> ndarray:
return self._histogram
@property
def histogram_extents(self) -> ndarray:
return self._histogram_extents
def _calculate_histogram(self):
orient_map = {"x": 0, "y": 1, "z": 2}
ax1, ax2 = (self.roi.xyz[:, val] for key, val in orient_map.items() if key != self.axis)
ext_ax1, ext_ax2 = (self.roi.xyz_extents[val] for key, val in orient_map.items() if key != self.axis)
ext = (ext_ax1, ext_ax2)
if self.multiplicity == "all":
self._histogram = histogram2d_binwidth(ax1, ax2, ext, self.bin_width)
else:
idx = get_mass_indices(self.roi.misc["ipp"], self.multiplicity)
self._histogram = histogram2d_binwidth(ax1[idx], ax2[idx], ext, self.bin_width)
self._histogram_extents = ext | PypiClean |
/NVDA-addonTemplate-0.5.2.zip/NVDA-addonTemplate-0.5.2/NVDAAddonTemplate/data/{{cookiecutter.project_slug}}/scons-local-2.5.0/SCons/Node/Python.py |
__revision__ = "src/engine/SCons/Node/Python.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import SCons.Node
class ValueNodeInfo(SCons.Node.NodeInfoBase):
__slots__ = ('csig',)
current_version_id = 2
field_list = ['csig']
def str_to_node(self, s):
return Value(s)
def __getstate__(self):
"""
Return all fields that shall be pickled. Walk the slots in the class
hierarchy and add those to the state dictionary. If a '__dict__' slot is
available, copy all entries to the dictionary. Also include the version
id, which is fixed for all instances of a class.
"""
state = getattr(self, '__dict__', {}).copy()
for obj in type(self).mro():
for name in getattr(obj,'__slots__',()):
if hasattr(self, name):
state[name] = getattr(self, name)
state['_version_id'] = self.current_version_id
try:
del state['__weakref__']
except KeyError:
pass
return state
def __setstate__(self, state):
"""
Restore the attributes from a pickled state.
"""
# TODO check or discard version
del state['_version_id']
for key, value in state.items():
if key not in ('__weakref__',):
setattr(self, key, value)
class ValueBuildInfo(SCons.Node.BuildInfoBase):
__slots__ = ()
current_version_id = 2
class Value(SCons.Node.Node):
"""A class for Python variables, typically passed on the command line
or generated by a script, but not from a file or some other source.
"""
NodeInfo = ValueNodeInfo
BuildInfo = ValueBuildInfo
def __init__(self, value, built_value=None):
SCons.Node.Node.__init__(self)
self.value = value
self.changed_since_last_build = 6
self.store_info = 0
if built_value is not None:
self.built_value = built_value
def str_for_display(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def make_ready(self):
self.get_csig()
def build(self, **kw):
if not hasattr(self, 'built_value'):
SCons.Node.Node.build(self, **kw)
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Value nodes get built regardless of
# what directory scons was run from. Value nodes
# are outside the filesystem:
return 1
def write(self, built_value):
"""Set the value of the node."""
self.built_value = built_value
def read(self):
"""Return the value. If necessary, the value is built."""
self.build()
if not hasattr(self, 'built_value'):
self.built_value = self.value
return self.built_value
def get_text_contents(self):
"""By the assumption that the node.built_value is a
deterministic product of the sources, the contents of a Value
are the concatenation of all the contents of its sources. As
the value need not be built when get_contents() is called, we
cannot use the actual node.built_value."""
###TODO: something reasonable about universal newlines
contents = str(self.value)
for kid in self.children(None):
contents = contents + kid.get_contents()
return contents
get_contents = get_text_contents ###TODO should return 'bytes' value
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def get_csig(self, calc=None):
"""Because we're a Python value node and don't have a real
timestamp, we get to ignore the calculator and just use the
value contents."""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
self.get_ninfo().csig = contents
return contents
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Files.com-1.0.1051-py3-none-any.whl/files_sdk/models/group_user.py | import builtins
import datetime
from files_sdk.api import Api
from files_sdk.list_obj import ListObj
from files_sdk.exceptions import InvalidParameterError, MissingParameterError, NotImplementedError
class GroupUser:
default_attributes = {
'group_name': None, # string - Group name
'group_id': None, # int64 - Group ID
'user_id': None, # int64 - User ID
'admin': None, # boolean - Is this user an administrator of this group?
'usernames': None, # array - A list of usernames for users in this group
'id': None, # int64 - Group User ID.
}
def __init__(self, attributes=None, options=None):
if not isinstance(attributes, dict):
attributes = {}
if not isinstance(options, dict):
options = {}
self.set_attributes(attributes)
self.options = options
def set_attributes(self, attributes):
for (attribute, default_value) in GroupUser.default_attributes.items():
setattr(self, attribute, attributes.get(attribute, default_value))
def get_attributes(self):
return {k: getattr(self, k, None) for k in GroupUser.default_attributes if getattr(self, k, None) is not None}
# Parameters:
# group_id (required) - int64 - Group ID to add user to.
# user_id (required) - int64 - User ID to add to group.
# admin - boolean - Is the user a group administrator?
def update(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "group_id" not in params:
raise MissingParameterError("Parameter missing: group_id")
if "user_id" not in params:
raise MissingParameterError("Parameter missing: user_id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
if "user_id" in params and not isinstance(params["user_id"], int):
raise InvalidParameterError("Bad parameter: user_id must be an int")
response, _options = Api.send_request("PATCH", "/group_users/{id}".format(id=params['id']), params, self.options)
return response.data
# Parameters:
# group_id (required) - int64 - Group ID from which to remove user.
# user_id (required) - int64 - User ID to remove from group.
def delete(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "group_id" not in params:
raise MissingParameterError("Parameter missing: group_id")
if "user_id" not in params:
raise MissingParameterError("Parameter missing: user_id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
if "user_id" in params and not isinstance(params["user_id"], int):
raise InvalidParameterError("Bad parameter: user_id must be an int")
response, _options = Api.send_request("DELETE", "/group_users/{id}".format(id=params['id']), params, self.options)
return response.data
def destroy(self, params = None):
self.delete(params)
def save(self):
if hasattr(self, "id") and self.id:
self.update(self.get_attributes())
else:
new_obj = create(self.get_attributes(), self.options)
self.set_attributes(new_obj.get_attributes())
# Parameters:
# user_id - int64 - User ID. If provided, will return group_users of this user.
# cursor - string - Used for pagination. When a list request has more records available, cursors are provided in the response headers `X-Files-Cursor-Next` and `X-Files-Cursor-Prev`. Send one of those cursor value here to resume an existing list from the next available record. Note: many of our SDKs have iterator methods that will automatically handle cursor-based pagination.
# per_page - int64 - Number of records to show per page. (Max: 10,000, 1,000 or less is recommended).
# group_id - int64 - Group ID. If provided, will return group_users of this group.
def list(params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
if "user_id" in params and not isinstance(params["user_id"], int):
raise InvalidParameterError("Bad parameter: user_id must be an int")
if "cursor" in params and not isinstance(params["cursor"], str):
raise InvalidParameterError("Bad parameter: cursor must be an str")
if "per_page" in params and not isinstance(params["per_page"], int):
raise InvalidParameterError("Bad parameter: per_page must be an int")
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
return ListObj(GroupUser,"GET", "/group_users", params, options)
def all(params = None, options = None):
list(params, options)
# Parameters:
# group_id (required) - int64 - Group ID to add user to.
# user_id (required) - int64 - User ID to add to group.
# admin - boolean - Is the user a group administrator?
def create(params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
if "user_id" in params and not isinstance(params["user_id"], int):
raise InvalidParameterError("Bad parameter: user_id must be an int")
if "group_id" not in params:
raise MissingParameterError("Parameter missing: group_id")
if "user_id" not in params:
raise MissingParameterError("Parameter missing: user_id")
response, options = Api.send_request("POST", "/group_users", params, options)
return GroupUser(response.data, options)
# Parameters:
# group_id (required) - int64 - Group ID to add user to.
# user_id (required) - int64 - User ID to add to group.
# admin - boolean - Is the user a group administrator?
def update(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
if "user_id" in params and not isinstance(params["user_id"], int):
raise InvalidParameterError("Bad parameter: user_id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "group_id" not in params:
raise MissingParameterError("Parameter missing: group_id")
if "user_id" not in params:
raise MissingParameterError("Parameter missing: user_id")
response, options = Api.send_request("PATCH", "/group_users/{id}".format(id=params['id']), params, options)
return GroupUser(response.data, options)
# Parameters:
# group_id (required) - int64 - Group ID from which to remove user.
# user_id (required) - int64 - User ID to remove from group.
def delete(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
if "user_id" in params and not isinstance(params["user_id"], int):
raise InvalidParameterError("Bad parameter: user_id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "group_id" not in params:
raise MissingParameterError("Parameter missing: group_id")
if "user_id" not in params:
raise MissingParameterError("Parameter missing: user_id")
response, _options = Api.send_request("DELETE", "/group_users/{id}".format(id=params['id']), params, options)
return response.data
def destroy(id, params = None, options = None):
delete(id, params, options)
def new(*args, **kwargs):
return GroupUser(*args, **kwargs) | PypiClean |
/Ion-0.6.4.tar.gz/Ion-0.6.4/ion/settings.py |
# Copyright Bernardo Heynemann <heynemann@gmail.com>
# Licensed under the Open Software License ("OSL") v. 3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.opensource.org/licenses/osl-3.0.php
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import abspath, join, exists
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
class Settings(object):
def __init__(self, root_dir):
self.root_dir = root_dir
self.config = None
def load(self, filename="config.ini"):
path = abspath(join(self.root_dir, filename))
if not exists(path):
raise ValueError("The specified path (%s) was not found!" % filename)
self.config = ConfigParser()
self.config.read(path)
def __getattr__(self, name):
if not self.config:
raise RuntimeError("You can't use any settings before loading a config file. Please use the load method.")
return SettingsSection(self, name, self.config)
class SettingsSection(object):
def __init__(self, settings, name, config):
self.settings = settings
self.name = name
self.config = config
def as_int(self, config_name):
return int(getattr(self, config_name))
def as_bool(self, config_name):
value = getattr(self, config_name)
if value is None:
return False
return {'true': True, 'false': False}.get(value.lower())
def __getattr__(self, config_name):
try:
return self.config.get(self.name, config_name)
except NoSectionError:
return None
except NoOptionError:
return None | PypiClean |
/BGT_Client-1.0.2-py3-none-any.whl/dgt_sdk/protobuf/client_batch_submit_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from dgt_sdk.protobuf import batch_pb2 as dgt__sdk_dot_protobuf_dot_batch__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='dgt_sdk/protobuf/client_batch_submit.proto',
package='',
syntax='proto3',
serialized_options=_b('\n\025sawtooth.sdk.protobufP\001Z\027client_batch_submit_pb2'),
serialized_pb=_b('\n*dgt_sdk/protobuf/client_batch_submit.proto\x1a\x1c\x64gt_sdk/protobuf/batch.proto\"\xbd\x02\n\x11\x43lientBatchStatus\x12\x10\n\x08\x62\x61tch_id\x18\x01 \x01(\t\x12)\n\x06status\x18\x02 \x01(\x0e\x32\x19.ClientBatchStatus.Status\x12\x43\n\x14invalid_transactions\x18\x03 \x03(\x0b\x32%.ClientBatchStatus.InvalidTransaction\x1aT\n\x12InvalidTransaction\x12\x16\n\x0etransaction_id\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x15\n\rextended_data\x18\x03 \x01(\x0c\"P\n\x06Status\x12\x10\n\x0cSTATUS_UNSET\x10\x00\x12\r\n\tCOMMITTED\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07PENDING\x10\x03\x12\x0b\n\x07UNKNOWN\x10\x04\"3\n\x18\x43lientBatchSubmitRequest\x12\x17\n\x07\x62\x61tches\x18\x01 \x03(\x0b\x32\x06.Batch\"\xa9\x01\n\x19\x43lientBatchSubmitResponse\x12\x31\n\x06status\x18\x01 \x01(\x0e\x32!.ClientBatchSubmitResponse.Status\"Y\n\x06Status\x12\x10\n\x0cSTATUS_UNSET\x10\x00\x12\x06\n\x02OK\x10\x01\x12\x12\n\x0eINTERNAL_ERROR\x10\x02\x12\x11\n\rINVALID_BATCH\x10\x03\x12\x0e\n\nQUEUE_FULL\x10\x04\"L\n\x18\x43lientBatchStatusRequest\x12\x11\n\tbatch_ids\x18\x01 \x03(\t\x12\x0c\n\x04wait\x18\x02 \x01(\x08\x12\x0f\n\x07timeout\x18\x03 \x01(\r\"\xd3\x01\n\x19\x43lientBatchStatusResponse\x12\x31\n\x06status\x18\x01 \x01(\x0e\x32!.ClientBatchStatusResponse.Status\x12*\n\x0e\x62\x61tch_statuses\x18\x02 \x03(\x0b\x32\x12.ClientBatchStatus\"W\n\x06Status\x12\x10\n\x0cSTATUS_UNSET\x10\x00\x12\x06\n\x02OK\x10\x01\x12\x12\n\x0eINTERNAL_ERROR\x10\x02\x12\x0f\n\x0bNO_RESOURCE\x10\x05\x12\x0e\n\nINVALID_ID\x10\x08\x42\x32\n\x15sawtooth.sdk.protobufP\x01Z\x17\x63lient_batch_submit_pb2b\x06proto3')
,
dependencies=[dgt__sdk_dot_protobuf_dot_batch__pb2.DESCRIPTOR,])
_CLIENTBATCHSTATUS_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='ClientBatchStatus.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMITTED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PENDING', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=314,
serialized_end=394,
)
_sym_db.RegisterEnumDescriptor(_CLIENTBATCHSTATUS_STATUS)
_CLIENTBATCHSUBMITRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='ClientBatchSubmitResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_BATCH', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_FULL', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=530,
serialized_end=619,
)
_sym_db.RegisterEnumDescriptor(_CLIENTBATCHSUBMITRESPONSE_STATUS)
_CLIENTBATCHSTATUSRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='ClientBatchStatusResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_RESOURCE', index=3, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_ID', index=4, number=8,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=824,
serialized_end=911,
)
_sym_db.RegisterEnumDescriptor(_CLIENTBATCHSTATUSRESPONSE_STATUS)
_CLIENTBATCHSTATUS_INVALIDTRANSACTION = _descriptor.Descriptor(
name='InvalidTransaction',
full_name='ClientBatchStatus.InvalidTransaction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='ClientBatchStatus.InvalidTransaction.transaction_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='ClientBatchStatus.InvalidTransaction.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extended_data', full_name='ClientBatchStatus.InvalidTransaction.extended_data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=228,
serialized_end=312,
)
_CLIENTBATCHSTATUS = _descriptor.Descriptor(
name='ClientBatchStatus',
full_name='ClientBatchStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_id', full_name='ClientBatchStatus.batch_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='ClientBatchStatus.status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='invalid_transactions', full_name='ClientBatchStatus.invalid_transactions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLIENTBATCHSTATUS_INVALIDTRANSACTION, ],
enum_types=[
_CLIENTBATCHSTATUS_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=394,
)
_CLIENTBATCHSUBMITREQUEST = _descriptor.Descriptor(
name='ClientBatchSubmitRequest',
full_name='ClientBatchSubmitRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batches', full_name='ClientBatchSubmitRequest.batches', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=396,
serialized_end=447,
)
_CLIENTBATCHSUBMITRESPONSE = _descriptor.Descriptor(
name='ClientBatchSubmitResponse',
full_name='ClientBatchSubmitResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ClientBatchSubmitResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLIENTBATCHSUBMITRESPONSE_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=450,
serialized_end=619,
)
_CLIENTBATCHSTATUSREQUEST = _descriptor.Descriptor(
name='ClientBatchStatusRequest',
full_name='ClientBatchStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_ids', full_name='ClientBatchStatusRequest.batch_ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wait', full_name='ClientBatchStatusRequest.wait', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timeout', full_name='ClientBatchStatusRequest.timeout', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=621,
serialized_end=697,
)
_CLIENTBATCHSTATUSRESPONSE = _descriptor.Descriptor(
name='ClientBatchStatusResponse',
full_name='ClientBatchStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ClientBatchStatusResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_statuses', full_name='ClientBatchStatusResponse.batch_statuses', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLIENTBATCHSTATUSRESPONSE_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=700,
serialized_end=911,
)
_CLIENTBATCHSTATUS_INVALIDTRANSACTION.containing_type = _CLIENTBATCHSTATUS
_CLIENTBATCHSTATUS.fields_by_name['status'].enum_type = _CLIENTBATCHSTATUS_STATUS
_CLIENTBATCHSTATUS.fields_by_name['invalid_transactions'].message_type = _CLIENTBATCHSTATUS_INVALIDTRANSACTION
_CLIENTBATCHSTATUS_STATUS.containing_type = _CLIENTBATCHSTATUS
_CLIENTBATCHSUBMITREQUEST.fields_by_name['batches'].message_type = dgt__sdk_dot_protobuf_dot_batch__pb2._BATCH
_CLIENTBATCHSUBMITRESPONSE.fields_by_name['status'].enum_type = _CLIENTBATCHSUBMITRESPONSE_STATUS
_CLIENTBATCHSUBMITRESPONSE_STATUS.containing_type = _CLIENTBATCHSUBMITRESPONSE
_CLIENTBATCHSTATUSRESPONSE.fields_by_name['status'].enum_type = _CLIENTBATCHSTATUSRESPONSE_STATUS
_CLIENTBATCHSTATUSRESPONSE.fields_by_name['batch_statuses'].message_type = _CLIENTBATCHSTATUS
_CLIENTBATCHSTATUSRESPONSE_STATUS.containing_type = _CLIENTBATCHSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['ClientBatchStatus'] = _CLIENTBATCHSTATUS
DESCRIPTOR.message_types_by_name['ClientBatchSubmitRequest'] = _CLIENTBATCHSUBMITREQUEST
DESCRIPTOR.message_types_by_name['ClientBatchSubmitResponse'] = _CLIENTBATCHSUBMITRESPONSE
DESCRIPTOR.message_types_by_name['ClientBatchStatusRequest'] = _CLIENTBATCHSTATUSREQUEST
DESCRIPTOR.message_types_by_name['ClientBatchStatusResponse'] = _CLIENTBATCHSTATUSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClientBatchStatus = _reflection.GeneratedProtocolMessageType('ClientBatchStatus', (_message.Message,), dict(
InvalidTransaction = _reflection.GeneratedProtocolMessageType('InvalidTransaction', (_message.Message,), dict(
DESCRIPTOR = _CLIENTBATCHSTATUS_INVALIDTRANSACTION,
__module__ = 'dgt_sdk.protobuf.client_batch_submit_pb2'
# @@protoc_insertion_point(class_scope:ClientBatchStatus.InvalidTransaction)
))
,
DESCRIPTOR = _CLIENTBATCHSTATUS,
__module__ = 'dgt_sdk.protobuf.client_batch_submit_pb2'
# @@protoc_insertion_point(class_scope:ClientBatchStatus)
))
_sym_db.RegisterMessage(ClientBatchStatus)
_sym_db.RegisterMessage(ClientBatchStatus.InvalidTransaction)
ClientBatchSubmitRequest = _reflection.GeneratedProtocolMessageType('ClientBatchSubmitRequest', (_message.Message,), dict(
DESCRIPTOR = _CLIENTBATCHSUBMITREQUEST,
__module__ = 'dgt_sdk.protobuf.client_batch_submit_pb2'
# @@protoc_insertion_point(class_scope:ClientBatchSubmitRequest)
))
_sym_db.RegisterMessage(ClientBatchSubmitRequest)
ClientBatchSubmitResponse = _reflection.GeneratedProtocolMessageType('ClientBatchSubmitResponse', (_message.Message,), dict(
DESCRIPTOR = _CLIENTBATCHSUBMITRESPONSE,
__module__ = 'dgt_sdk.protobuf.client_batch_submit_pb2'
# @@protoc_insertion_point(class_scope:ClientBatchSubmitResponse)
))
_sym_db.RegisterMessage(ClientBatchSubmitResponse)
ClientBatchStatusRequest = _reflection.GeneratedProtocolMessageType('ClientBatchStatusRequest', (_message.Message,), dict(
DESCRIPTOR = _CLIENTBATCHSTATUSREQUEST,
__module__ = 'dgt_sdk.protobuf.client_batch_submit_pb2'
# @@protoc_insertion_point(class_scope:ClientBatchStatusRequest)
))
_sym_db.RegisterMessage(ClientBatchStatusRequest)
ClientBatchStatusResponse = _reflection.GeneratedProtocolMessageType('ClientBatchStatusResponse', (_message.Message,), dict(
DESCRIPTOR = _CLIENTBATCHSTATUSRESPONSE,
__module__ = 'dgt_sdk.protobuf.client_batch_submit_pb2'
# @@protoc_insertion_point(class_scope:ClientBatchStatusResponse)
))
_sym_db.RegisterMessage(ClientBatchStatusResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope) | PypiClean |
/HitBTCMonster-0.0.3.tar.gz/HitBTCMonster-0.0.3/README.rst | HitBTC Library
-------------------
A library for better communication with HitBTC Exchange API
(`HitBTC API Documentation <https://api.hitbtc.com>`_)
Installation:
~~~~~~~~~~~~~~~
.. code:: bash
pip install HitBTCMonster
Example:
~~~~~~~~~
API
**********
.. code:: python
from HitBTCMonster.api.core import HitBTC
from HitBTCMonster.api.market import Market
from HitBTCMonster.api.trading import Trading
CORE = HitBTC(
public='YOUR_PUBLIC_KEY_HERE',
secret='YOUR_SECRET_KEY_HERE',
)
MARKET = Market(CORE)
TRADING = Trading(CORE)
# do stuff
WebSocket
****************
.. code:: python
from HitBTCMonster.wss.core import HitBTC
from HitBTCMonster.wss.market import Market
from HitBTCMonster.wss.trading import Trading
CORE = HitBTC(
public='YOUR_PUBLIC_KEY_HERE',
secret='YOUR_SECRET_KEY_HERE',
)
MARKET = Market(CORE)
TRADING = Trading(CORE)
# do stuff | PypiClean |
/MilkyWay-1.2.1.tar.gz/MilkyWay-1.2.1/README.md | # MilkyWay
MilkyWay is an open source API for Robotics Path Planning
## Example of using the lib:
```python
# Import the main classes
from milkyway import Waypoint, Spline
# Create all Waypoint
a = Waypoint(0, 0, angle=0,k=2)
b = Waypoint(1, 1, points=20, der2=0)
c = Waypoint(1, 2, angle=90)
# Make them into a spline
spline = Spline(a, b ,c)
# Plot them
spline.plot()
```
Easy!
## Main classes in MilkyWay:
MilkyWay lets you use two main classes:
- `Waypoint` - A class representing a point in 2D space with some more info
- `Spline` - A class representing a path between Waypoints
### Waypoints:
Waypoints are the base for every trajectory, you can create them like so:
```python
point = Waypoint(1, 0, angle=90, points=20, der2=0, k=2)
```
The arguments for a Waypoints are the following:
- We specify location (1, 0).
- You can specify the angle with the `angle` parameter.
- In degrees.
- If you don't specify, MilkyWay will auto configure the angle to a continuous one.
- You can specify the sample points for the following trajectory (Until the next Waypoint) with the `points` parameter.
- You can specify the second derivative (advanced, don't touch or put 0) for better control of the spline, with the `der2` parameter.
- You can specify the "curviness" of the parametric function with the `k` parameter.
### Spline:
Splines are the "glue" for Waypoints, Spline is an easy to use class:
```python
spline = Spline(point1, point2, point2, ...)
```
The parameters for the Spline are the Waypoints, the class does the rest.
Some function you might use:
`get_linear_points` Will return all points between all Waypoints.
```python
spline.get_linear_points()
```
The `plot` functions lets you view the spline, the `scatter` parameters sets the point scattering
```python
spline.plot(scatter=False)
```
| PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/middleware/csrf.py | import logging
import re
import string
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.urls import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import is_same_domain
from django.utils.log import log_response
logger = logging.getLogger('django.security.csrf')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure."
CSRF_SECRET_LENGTH = 32
CSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH
CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits
CSRF_SESSION_KEY = '_csrftoken'
def _get_failure_view():
"""Return the view to be used for CSRF rejections."""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_string():
return get_random_string(CSRF_SECRET_LENGTH, allowed_chars=CSRF_ALLOWED_CHARS)
def _salt_cipher_secret(secret):
"""
Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a
token by adding a salt and using it to encrypt the secret.
"""
salt = _get_new_csrf_string()
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in salt))
cipher = ''.join(chars[(x + y) % len(chars)] for x, y in pairs)
return salt + cipher
def _unsalt_cipher_token(token):
"""
Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length
CSRF_TOKEN_LENGTH, and that its first half is a salt), use it to decrypt
the second half to produce the original secret.
"""
salt = token[:CSRF_SECRET_LENGTH]
token = token[CSRF_SECRET_LENGTH:]
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in salt))
secret = ''.join(chars[x - y] for x, y in pairs) # Note negative values are ok
return secret
def _get_new_csrf_token():
return _salt_cipher_secret(_get_new_csrf_string())
def get_token(request):
"""
Return the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" not in request.META:
csrf_secret = _get_new_csrf_string()
request.META["CSRF_COOKIE"] = _salt_cipher_secret(csrf_secret)
else:
csrf_secret = _unsalt_cipher_token(request.META["CSRF_COOKIE"])
request.META["CSRF_COOKIE_USED"] = True
return _salt_cipher_secret(csrf_secret)
def rotate_token(request):
"""
Change the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_token(),
})
request.csrf_cookie_needs_reset = True
def _sanitize_token(token):
# Allow only ASCII alphanumerics
if re.search('[^a-zA-Z0-9]', token):
return _get_new_csrf_token()
elif len(token) == CSRF_TOKEN_LENGTH:
return token
elif len(token) == CSRF_SECRET_LENGTH:
# Older Django versions set cookies to values of CSRF_SECRET_LENGTH
# alphanumeric characters. For backwards compatibility, accept
# such values as unsalted secrets.
# It's easier to salt here and be consistent later, rather than add
# different code paths in the checks, although that might be a tad more
# efficient.
return _salt_cipher_secret(token)
return _get_new_csrf_token()
def _compare_salted_tokens(request_csrf_token, csrf_token):
# Assume both arguments are sanitized -- that is, strings of
# length CSRF_TOKEN_LENGTH, all CSRF_ALLOWED_CHARS.
return constant_time_compare(
_unsalt_cipher_token(request_csrf_token),
_unsalt_cipher_token(csrf_token),
)
class CsrfViewMiddleware(MiddlewareMixin):
"""
Require a present and correct csrfmiddlewaretoken for POST requests that
have a CSRF cookie, and set an outgoing CSRF cookie.
This middleware should be used in conjunction with the {% csrf_token %}
template tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
response = _get_failure_view()(request, reason=reason)
log_response(
'Forbidden (%s): %s', reason, request.path,
response=response,
request=request,
logger=logger,
)
return response
def _get_token(self, request):
if settings.CSRF_USE_SESSIONS:
try:
return request.session.get(CSRF_SESSION_KEY)
except AttributeError:
raise ImproperlyConfigured(
'CSRF_USE_SESSIONS is enabled, but request.session is not '
'set. SessionMiddleware must appear before CsrfViewMiddleware '
'in MIDDLEWARE%s.' % ('_CLASSES' if settings.MIDDLEWARE is None else '')
)
else:
try:
cookie_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
return None
csrf_token = _sanitize_token(cookie_token)
if csrf_token != cookie_token:
# Cookie token needed to be replaced;
# the cookie needs to be reset.
request.csrf_cookie_needs_reset = True
return csrf_token
def _set_token(self, request, response):
if settings.CSRF_USE_SESSIONS:
if request.session.get(CSRF_SESSION_KEY) != request.META['CSRF_COOKIE']:
request.session[CSRF_SESSION_KEY] = request.META['CSRF_COOKIE']
else:
response.set_cookie(
settings.CSRF_COOKIE_NAME,
request.META['CSRF_COOKIE'],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY,
samesite=settings.CSRF_COOKIE_SAMESITE,
)
# Set the Vary header since content varies with the CSRF cookie.
patch_vary_headers(response, ('Cookie',))
def process_request(self, request):
csrf_token = self._get_token(request)
if csrf_token is not None:
# Use same token next time.
request.META['CSRF_COOKIE'] = csrf_token
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC7231 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# secret we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
return self._reject(request, REASON_NO_REFERER)
referer = urlparse(referer)
# Make sure we have a valid URL for Referer.
if '' in (referer.scheme, referer.netloc):
return self._reject(request, REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if referer.scheme != 'https':
return self._reject(request, REASON_INSECURE_REFERER)
# If there isn't a CSRF_COOKIE_DOMAIN, require an exact match
# match on host:port. If not, obey the cookie rules (or those
# for the session cookie, if CSRF_USE_SESSIONS).
good_referer = (
settings.SESSION_COOKIE_DOMAIN
if settings.CSRF_USE_SESSIONS
else settings.CSRF_COOKIE_DOMAIN
)
if good_referer is not None:
server_port = request.get_port()
if server_port not in ('443', '80'):
good_referer = '%s:%s' % (good_referer, server_port)
else:
try:
# request.get_host() includes the port.
good_referer = request.get_host()
except DisallowedHost:
pass
# Create a list of all acceptable HTTP referers, including the
# current host if it's permitted by ALLOWED_HOSTS.
good_hosts = list(settings.CSRF_TRUSTED_ORIGINS)
if good_referer is not None:
good_hosts.append(good_referer)
if not any(is_same_domain(referer.netloc, host) for host in good_hosts):
reason = REASON_BAD_REFERER % referer.geturl()
return self._reject(request, reason)
csrf_token = request.META.get('CSRF_COOKIE')
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '')
request_csrf_token = _sanitize_token(request_csrf_token)
if not _compare_salted_tokens(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if not getattr(request, 'csrf_cookie_needs_reset', False):
if getattr(response, 'csrf_cookie_set', False):
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
self._set_token(request, response)
response.csrf_cookie_set = True
return response | PypiClean |
/FastCNN2-1.23.425.1716.tar.gz/FastCNN2-1.23.425.1716/FastCNN/prx/TrainProxy.py | from FastCNN.prx.DatasetProxy import DatasetProxy
from FastCNN.prx.PathProxy import PathProxy2 as PathProxy
from FastCNN.nn.neuralnets import getNeuralNet
from FastCNN.utils.CallBacks import MACallBack2 as MACallBack
from IutyLib.file.files import CsvFile
from IutyLib.commonutil.config import JConfig
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]='3'
import tensorflow as tf
from PIL import Image,ImageStat,ImageEnhance
import numpy as np
import random
import shutil
import time
import json
import datetime
np.set_printoptions(threshold=np.inf)
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.compat.v1 import GPUOptions
tfconfig = ConfigProto()
tfconfig.gpu_options.per_process_gpu_memory_fraction = 1
tfconfig.gpu_options.allow_growth = True
session = InteractiveSession(config=tfconfig)
def getImageBright(img):
im = img.convert('L')
stat = ImageStat.Stat(im)
return stat.mean[0]
def readBmp(files,imgw=None,imgh=None,imgf=None):
if not imgw:
imgw = 1000
if not imgh:
imgh = 1000
if not imgf:
imgf = "bmp"
image_group = []
for file in files:
img = Image.open(file)
img = img.resize([imgw,imgh])
lv = getImageBright(img)
evalue = 100.0/lv
img = ImageEnhance.Brightness(img).enhance(evalue)
img = np.array(img)
#print(img[0][0][0])
img = img / 255.
#img = img * 255.0
#print(img.astype(int)[0][0][0])
if len(list(img.shape)) == 2:
img = np.reshape(img,(imgw,imgh,1))
#print(img)
#img = tf.image.resize(img,[imgw,imgh])
"""
img = Image.fromarray(img,"RGB")
img.save(r"./1.jpg")
img.show()
input()
"""
image_group.append(img)
image_group = np.array(image_group)
#load_image_group = np.array(image_group).reshape(len(image_group),imgw,imgh,3)
#load_image_group = tf.image.resize(image_group,[len(image_group),imgw,imgh,3])
return image_group
def get_train_batch(X_train, y_train, batch_size, img_w, img_h,img_f,endless = True):
'''
参数:
X_train:所有图片路径列表
y_train: 所有图片对应的标签列表
batch_size:批次
img_w:图片宽
img_h:图片高
color_type:图片类型
is_argumentation:是否需要数据增强
返回:
一个generator,x: 获取的批次图片 y: 获取的图片对应的标签
'''
queue_x = []
queue_y = []
seed = random.randint(1,30)
random.seed(seed)
random.shuffle(X_train)
random.seed(seed)
random.shuffle(y_train)
queue_x += X_train
queue_y += y_train
while 1:
while (len(queue_x) < batch_size):
queue_x += X_train
queue_y += y_train
x = queue_x[0:batch_size]
x = readBmp(x,img_w,img_h,img_f)
# queue_x = queue_x[batch_size:]
y = queue_y[0:batch_size]
#queue_y = queue_y[batch_size:]
queue_x = queue_x[batch_size:]
queue_y = queue_y[batch_size:]
#yield({'input': np.array(x)}, {'output': np.array(y)})
yield(np.array(x), np.array(y))
class TrainProxy:
projectid = ""
modelid = ""
config = None
def getConfig(projectid,modelid):
cfgpath = PathProxy.getConfigPath(projectid,modelid)
jfile = JConfig(cfgpath)
data = jfile.get()
return data
def getSuperParam(projectid,modelid):
cfgpath = PathProxy.getSuperParamConfigPath(projectid,modelid)
jfile = JConfig(cfgpath)
data = jfile.get()
return data
def startTrainModel(projectid,modelid):
projectid = projectid
modelid = modelid
config = TrainProxy.getConfig(projectid,modelid)
trainset,validset,traintag,validtag = DatasetProxy.getData(projectid,modelid)
superparam = TrainProxy.getSuperParam(projectid,modelid)
epchos = int(superparam["Epcho"])
batch = int(superparam["Batch"])
width = int(config["Width"])
height = int(config["Height"])
learnrate = float(superparam["LearnRate"])
formatter = config["Formatter"]
train_batch = get_train_batch(trainset,traintag,batch,width,height,formatter)
test_batch = get_train_batch(validset,validtag,int(batch/4)+1,width,height,formatter)
model = getNeuralNet(config,superparam)
train_ckpt = PathProxy.getTrainCKPT(projectid,modelid)
valid_ckpt = PathProxy.getValidCKPT(projectid,modelid)
if os.path.exists(train_ckpt + '.index'):
print('-------------load the model-----------------')
model.load_weights(train_ckpt)
train_acc = tf.keras.metrics.SparseCategoricalAccuracy(name='train_acc')
test_acc = tf.keras.metrics.SparseCategoricalAccuracy(name='test_acc')
adam = tf.keras.optimizers.Adam(lr=learnrate)
model.compile(optimizer=adam,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
ma_callback = MACallBack()
ma_callback.init(projectid,modelid,model)
result = model.fit_generator(generator=get_train_batch(trainset,traintag,batch,width,height,formatter),
steps_per_epoch=10,
epochs=epchos, verbose=1,
validation_data=test_batch,
validation_steps=1,
#validation_freq = 1,
callbacks=[ma_callback],
max_queue_size=128,
workers=1)
pass
if __name__ == "__main__":
pass | PypiClean |
/Kamaelia-0.6.0.tar.gz/Kamaelia-0.6.0/Examples/SoC2006/RJL/TorrentGUI/TorrentTkGUI.py | import Tkinter, time
from Kamaelia.UI.Tk.TkWindow import TkWindow
from Axon.Ipc import producerFinished, shutdown
from Kamaelia.Protocol.Torrent.TorrentPatron import TorrentPatron
from Kamaelia.Protocol.Torrent.TorrentIPC import TIPCNewTorrentCreated, TIPCTorrentStartFail, TIPCTorrentAlreadyDownloading, TIPCTorrentStatusUpdate
class TorrentTkWindow(TkWindow):
"""Tkinter BitTorrent client GUI"""
Inboxes = {
"inbox" : "From TorrentPatron backend",
"control" : "Tell me to shutdown",
}
Outboxes = {
"outbox" : "To TorrentPatron backend",
"fetcher" : "To TorrentPatron backend via a resource fetcher, e.g. file reader or HTTP client",
"fetchersignal" : "Shutdown resource fetcher",
"signal" : "When I've shutdown"
}
def __init__(self):
super(TorrentTkWindow, self).__init__()
# torrents that the user has requested be downloaded,
# but which TorrentPatron has not yet confirmed
self.pendingtorrents = []
# torrents that have been started by TorrentPatron
# (an associative array of torrentid -> (torrentname, label, labeltext) )
self.torrents = {}
def setupWindow(self):
"Create the GUI controls and window for this application"
# THIS FUNCTION IS CALLED BY THE PARENT CLASS - TorrentTkWindow during __init__
# Create the URL entry text box
self.entry = Tkinter.Entry(self.window)
# Create a button labelled "Add Torrent" which causes self.requestNewTorrent
# to be called when clicked (a callback function)
self.addtorrentbutton = Tkinter.Button(self.window, text="Add Torrent", command=self.requestNewTorrent)
# Set the caption of our window
self.window.title("Kamaelia BitTorrent Client")
# Layout the window like a table so it resizes
# the widgets automatically when it resizes
# it will look something like: (without the grid lines)
# +---------------------------------------+--------------+
# | ENTRY (75% width) | BUTTON (25%) |
# +---------------------------------------+--------------+
# set the position of the text box in the 'table'
self.entry.grid(row=0, column=0, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
# set the button of the text box in the 'table'
self.addtorrentbutton.grid(row=0, column=1, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
# setup the row they are both in
self.window.rowconfigure(0, weight=1)
# make the left-most column three times the width of the right-most one
self.window.columnconfigure(0, weight=3)
self.window.columnconfigure(1, weight=1)
def requestNewTorrent(self):
"Request the addition of a new torrent"
# get the contents of the text box (which should be a URL of a .torrent)
torrenturl = self.entry.get()
# add it to our list of torrents pending confirmation from Torrent Patron
self.pendingtorrents.append(torrenturl.rsplit("/", 1)[-1])
# send this the URL of this .torrent to the fetcher
self.send(torrenturl, "fetcher")
# clear the text box - make its contents ""
self.entry.delete(0, Tkinter.END)
def addTorrentToList(self, msg):
"Add a new torrent to the list onscreen"
# this torrent is the oldest one we requested that has not yet been added
torrentname = self.pendingtorrents.pop(0)
# using a StringVar allows us to change the label's text on the fly
labeltext = Tkinter.StringVar()
# create a new label for this torrent
newlabel = Tkinter.Label(self.window, textvariable=labeltext)
self.torrents[msg.torrentid] = (torrentname, newlabel, labeltext)
labeltext.set(torrentname + " - 0%")
# setup the layout 'table' so that the label spans
# the entire width of the window
newlabel.grid(row=len(self.torrents), column=0, columnspan=2, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
self.window.rowconfigure(len(self.torrents), weight=1)
def main(self):
while not self.isDestroyed():
time.sleep(0.05) # reduces CPU usage but a separate timer component would be better
yield 1
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
# close this window, causing us to exit the main loop
# (it makes self.isDestroyed() == True)
self.window.destroy()
while self.dataReady("inbox"):
msg = self.recv("inbox")
if isinstance(msg, TIPCNewTorrentCreated):
self.addTorrentToList(msg)
elif isinstance(msg, TIPCTorrentStartFail) or isinstance(msg, TIPCTorrentAlreadyDownloading):
# the oldest torrent not yet started failed to start so
# remove it from the list of pending torrents
print "Oops - torrent start failed!\n"
self.pendingtorrents.pop(0)
elif isinstance(msg, TIPCTorrentStatusUpdate):
# change the label for that torrent to show the new percentage completion
# newlabelcaption = "{the torrent name} - {the percentage completion of the download}%"
newlabelcaption = self.torrents[msg.torrentid][0] + " - " + str(int(msg.statsdictionary.get("fractionDone","0") * 100)) + "%"
self.torrents[msg.torrentid][2].set(newlabelcaption)
# update the screen
self.tkupdate()
# shutdown the TorrentPatron
self.send(shutdown(), "signal")
# and tell the HTTP client that we've finished which should cause
# it to terminate gracefully, of its own accord
self.send(producerFinished(self), "fetchersignal")
__kamaelia_components__ = ( TorrentTkWindow, )
if __name__ == "__main__":
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
Graphline(
gui = TorrentTkWindow(), # our GUI
httpclient = SimpleHTTPClient(), # used to download .torrent files
backend = TorrentPatron(), # our BitTorrent client backend
linkages = {
("backend", "outbox") : ("gui", "inbox"),
("gui", "outbox") : ("backend", "inbox"),
("gui", "signal") : ("backend", "control"),
("gui", "fetchersignal") : ("httpclient", "control"),
("gui", "fetcher") : ("httpclient", "inbox"),
("httpclient", "outbox") : ("backend", "inbox"),
}
).run()
# BASIC TOPOLOGY
# -------------------------------
#
# httpclient <-- gui <--> backend
# \ /
# '--->---->---->--->' | PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/onedrive/termstore/store.py | from office365.entity import Entity
from office365.entity_collection import EntityCollection
from office365.onedrive.termstore.groups.group import Group
from office365.onedrive.termstore.groups.collection import GroupCollection
from office365.onedrive.termstore.sets.set import Set
from office365.onedrive.termstore.sets.collection import SetCollection
from office365.runtime.paths.resource_path import ResourcePath
from office365.runtime.types.collections import StringCollection
class Store(Entity):
"""Represents a taxonomy term store."""
def get_all_term_sets(self):
"""Returns a collection containing a flat list of all TermSet objects."""
return_type = EntityCollection(self.context, Set)
def _sets_loaded(group_sets):
"""
:type group_sets: EntityCollection
"""
[return_type.add_child(s) for s in group_sets]
def _groups_loaded(groups):
"""
:type groups: EntityCollection
"""
for g in groups: # type: Group
self.context.load(g.sets, after_loaded=_sets_loaded)
self.context.load(self.groups, after_loaded=_groups_loaded)
return return_type
@property
def default_language_tag(self):
"""Default language of the term store.
:rtype: str
"""
return self.properties.get("defaultLanguageTag", None)
@property
def language_tags(self):
"""List of languages for the term store."""
return self.properties.get("languageTags", StringCollection())
@property
def groups(self):
"""Collection of all groups available in the term store."""
return self.properties.get('groups',
GroupCollection(self.context, ResourcePath("groups", self.resource_path)))
@property
def sets(self):
"""Collection of all sets available in the term store."""
return self.properties.get('sets',
SetCollection(self.context, ResourcePath("sets", self.resource_path)))
def get_property(self, name, default_value=None):
if default_value is None:
property_mapping = {
"languageTags": self.language_tags
}
default_value = property_mapping.get(name, None)
return super(Store, self).get_property(name, default_value) | PypiClean |
/BlueWhale3-Timeseries-0.3.13.tar.gz/BlueWhale3-Timeseries-0.3.13/orangecontrib/timeseries/widgets/highcharts/_highcharts/map.js | (function(h){typeof module==="object"&&module.exports?module.exports=h:h(Highcharts)})(function(h){function H(a){if(a)a.preventDefault&&a.preventDefault(),a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0}function M(a,b){var c,d,e,f,g=!1,i=a.x,k=a.y;for(c=0,d=b.length-1;c<b.length;d=c++)e=b[c][1]>k,f=b[d][1]>k,e!==f&&i<(b[d][0]-b[c][0])*(k-b[c][1])/(b[d][1]-b[c][1])+b[c][0]&&(g=!g);return g}function N(a,b,c,d,e,f,g,i){return["M",a+e,b,"L",a+c-f,b,"C",a+c-f/2,b,a+c,b+f/2,a+c,b+f,"L",a+c,b+d-
g,"C",a+c,b+d-g/2,a+c-g/2,b+d,a+c-g,b+d,"L",a+i,b+d,"C",a+i/2,b+d,a,b+d-i/2,a,b+d-i,"L",a,b+e,"C",a,b+e/2,a+e/2,b,a+e,b,"Z"]}var T=h.animObject,o=h.Axis,q=h.Chart,w=h.Color,s=h.Point,I=h.Pointer,D=h.Legend,J=h.LegendSymbolMixin,U=h.Renderer,y=h.Series,K=h.SVGRenderer,O=h.VMLRenderer,A=h.win,P=A.document,L=h.addEvent,l=h.each,E=h.error,n=h.extend,t=h.extendClass,Q=h.format,V=h.map,F=h.isNumber,p=h.merge,m=h.pick,B=h.getOptions(),j=h.seriesTypes,v=B.plotOptions,u=h.wrap,r=function(){};u(o.prototype,
"getSeriesExtremes",function(a){var b=this.isXAxis,c,d,e=[],f;b&&l(this.series,function(a,b){if(a.useMapGeometry)e[b]=a.xData,a.xData=[]});a.call(this);if(b&&(c=m(this.dataMin,Number.MAX_VALUE),d=m(this.dataMax,-Number.MAX_VALUE),l(this.series,function(a,b){if(a.useMapGeometry)c=Math.min(c,m(a.minX,c)),d=Math.max(d,m(a.maxX,c)),a.xData=e[b],f=!0}),f))this.dataMin=c,this.dataMax=d});u(o.prototype,"setAxisTranslation",function(a){var b=this.chart,c=b.plotWidth/b.plotHeight,b=b.xAxis[0],d;a.call(this);
this.coll==="yAxis"&&b.transA!==void 0&&l(this.series,function(a){a.preserveAspectRatio&&(d=!0)});if(d&&(this.transA=b.transA=Math.min(this.transA,b.transA),a=c/((b.max-b.min)/(this.max-this.min)),a=a<1?this:b,c=(a.max-a.min)*a.transA,a.pixelPadding=a.len-c,a.minPixelPadding=a.pixelPadding/2,c=a.fixTo)){c=c[1]-a.toValue(c[0],!0);c*=a.transA;if(Math.abs(c)>a.minPixelPadding||a.min===a.dataMin&&a.max===a.dataMax)c=0;a.minPixelPadding-=c}});u(o.prototype,"render",function(a){a.call(this);this.fixTo=
null});var C=h.ColorAxis=function(){this.isColorAxis=!0;this.init.apply(this,arguments)};n(C.prototype,o.prototype);n(C.prototype,{defaultColorAxisOptions:{lineWidth:0,minPadding:0,maxPadding:0,gridLineWidth:1,tickPixelInterval:72,startOnTick:!0,endOnTick:!0,offset:0,marker:{animation:{duration:50},color:"gray",width:0.01},labels:{overflow:"justify"},minColor:"#EFEFFF",maxColor:"#003875",tickLength:5},init:function(a,b){var c=a.options.legend.layout!=="vertical",d;d=p(this.defaultColorAxisOptions,
{side:c?2:1,reversed:!c},b,{opposite:!c,showEmpty:!1,title:null,isColor:!0});o.prototype.init.call(this,a,d);b.dataClasses&&this.initDataClasses(b);this.initStops(b);this.horiz=c;this.zoomEnabled=!1},tweenColors:function(a,b,c){var d;!b.rgba.length||!a.rgba.length?a=b.input||"none":(a=a.rgba,b=b.rgba,d=b[3]!==1||a[3]!==1,a=(d?"rgba(":"rgb(")+Math.round(b[0]+(a[0]-b[0])*(1-c))+","+Math.round(b[1]+(a[1]-b[1])*(1-c))+","+Math.round(b[2]+(a[2]-b[2])*(1-c))+(d?","+(b[3]+(a[3]-b[3])*(1-c)):"")+")");return a},
initDataClasses:function(a){var b=this,c=this.chart,d,e=0,f=this.options,g=a.dataClasses.length;this.dataClasses=d=[];this.legendItems=[];l(a.dataClasses,function(a,k){var h,a=p(a);d.push(a);if(!a.color)f.dataClassColor==="category"?(h=c.options.colors,a.color=h[e++],e===h.length&&(e=0)):a.color=b.tweenColors(w(f.minColor),w(f.maxColor),g<2?0.5:k/(g-1))})},initStops:function(a){this.stops=a.stops||[[0,this.options.minColor],[1,this.options.maxColor]];l(this.stops,function(a){a.color=w(a[1])})},setOptions:function(a){o.prototype.setOptions.call(this,
a);this.options.crosshair=this.options.marker;this.coll="colorAxis"},setAxisSize:function(){var a=this.legendSymbol,b=this.chart,c,d,e;if(a)this.left=c=a.attr("x"),this.top=d=a.attr("y"),this.width=e=a.attr("width"),this.height=a=a.attr("height"),this.right=b.chartWidth-c-e,this.bottom=b.chartHeight-d-a,this.len=this.horiz?e:a,this.pos=this.horiz?c:d},toColor:function(a,b){var c,d=this.stops,e,f=this.dataClasses,g,i;if(f)for(i=f.length;i--;){if(g=f[i],e=g.from,d=g.to,(e===void 0||a>=e)&&(d===void 0||
a<=d)){c=g.color;if(b)b.dataClass=i;break}}else{this.isLog&&(a=this.val2lin(a));c=1-(this.max-a)/(this.max-this.min||1);for(i=d.length;i--;)if(c>d[i][0])break;e=d[i]||d[i+1];d=d[i+1]||e;c=1-(d[0]-c)/(d[0]-e[0]||1);c=this.tweenColors(e.color,d.color,c)}return c},getOffset:function(){var a=this.legendGroup,b=this.chart.axisOffset[this.side];if(a){this.axisParent=a;o.prototype.getOffset.call(this);if(!this.added)this.added=!0,this.labelLeft=0,this.labelRight=this.width;this.chart.axisOffset[this.side]=
b}},setLegendColor:function(){var a,b=this.options,c=this.reversed;a=c?1:0;c=c?0:1;a=this.horiz?[a,0,c,0]:[0,c,0,a];this.legendColor={linearGradient:{x1:a[0],y1:a[1],x2:a[2],y2:a[3]},stops:b.stops||[[0,b.minColor],[1,b.maxColor]]}},drawLegendSymbol:function(a,b){var c=a.padding,d=a.options,e=this.horiz,f=m(d.symbolWidth,e?200:12),g=m(d.symbolHeight,e?12:200),i=m(d.labelPadding,e?16:30),d=m(d.itemDistance,10);this.setLegendColor();b.legendSymbol=this.chart.renderer.rect(0,a.baseline-11,f,g).attr({zIndex:1}).add(b.legendGroup);
this.legendItemWidth=f+c+(e?d:i);this.legendItemHeight=g+c+(e?i:0)},setState:r,visible:!0,setVisible:r,getSeriesExtremes:function(){var a;if(this.series.length)a=this.series[0],this.dataMin=a.valueMin,this.dataMax=a.valueMax},drawCrosshair:function(a,b){var c=b&&b.plotX,d=b&&b.plotY,e,f=this.pos,g=this.len;if(b)e=this.toPixels(b[b.series.colorKey]),e<f?e=f-2:e>f+g&&(e=f+g+2),b.plotX=e,b.plotY=this.len-e,o.prototype.drawCrosshair.call(this,a,b),b.plotX=c,b.plotY=d,this.cross&&this.cross.attr({fill:this.crosshair.color}).add(this.legendGroup)},
getPlotLinePath:function(a,b,c,d,e){return F(e)?this.horiz?["M",e-4,this.top-6,"L",e+4,this.top-6,e,this.top,"Z"]:["M",this.left,e,"L",this.left-6,e+6,this.left-6,e-6,"Z"]:o.prototype.getPlotLinePath.call(this,a,b,c,d)},update:function(a,b){var c=this.chart,d=c.legend;l(this.series,function(a){a.isDirtyData=!0});if(a.dataClasses&&d.allItems)l(d.allItems,function(a){a.isDataClass&&a.legendGroup.destroy()}),c.isDirtyLegend=!0;c.options[this.coll]=p(this.userOptions,a);o.prototype.update.call(this,a,
b);this.legendItem&&(this.setLegendColor(),d.colorizeItem(this,!0))},getDataClassLegendSymbols:function(){var a=this,b=this.chart,c=this.legendItems,d=b.options.legend,e=d.valueDecimals,f=d.valueSuffix||"",g;c.length||l(this.dataClasses,function(d,k){var x=!0,z=d.from,m=d.to;g="";z===void 0?g="< ":m===void 0&&(g="> ");z!==void 0&&(g+=h.numberFormat(z,e)+f);z!==void 0&&m!==void 0&&(g+=" - ");m!==void 0&&(g+=h.numberFormat(m,e)+f);c.push(n({chart:b,name:g,options:{},drawLegendSymbol:J.drawRectangle,
visible:!0,setState:r,isDataClass:!0,setVisible:function(){x=this.visible=!x;l(a.series,function(a){l(a.points,function(a){a.dataClass===k&&a.setVisible(x)})});b.legend.colorizeItem(this,x)}},d))});return c},name:""});l(["fill","stroke"],function(a){h.Fx.prototype[a+"Setter"]=function(){this.elem.attr(a,C.prototype.tweenColors(w(this.start),w(this.end),this.pos))}});u(q.prototype,"getAxes",function(a){var b=this.options.colorAxis;a.call(this);this.colorAxis=[];b&&new C(this,b)});u(D.prototype,"getAllItems",
function(a){var b=[],c=this.chart.colorAxis[0];c&&(c.options.dataClasses?b=b.concat(c.getDataClassLegendSymbols()):b.push(c),l(c.series,function(a){a.options.showInLegend=!1}));return b.concat(a.call(this))});var D={setVisible:function(a){var b=this,c=a?"show":"hide";l(["graphic","dataLabel"],function(a){if(b[a])b[a][c]()})}},R={pointAttrToOptions:{stroke:"borderColor","stroke-width":"borderWidth",fill:"color",dashstyle:"dashStyle"},pointArrayMap:["value"],axisTypes:["xAxis","yAxis","colorAxis"],
optionalAxis:"colorAxis",trackerGroups:["group","markerGroup","dataLabelsGroup"],getSymbol:r,parallelArrays:["x","y","value"],colorKey:"value",translateColors:function(){var a=this,b=this.options.nullColor,c=this.colorAxis,d=this.colorKey;l(this.data,function(e){var f=e[d];if(f=e.options.color||(f===null?b:c&&f!==void 0?c.toColor(f,e):e.color||a.color))e.color=f})}};n(q.prototype,{renderMapNavigation:function(){var a=this,b=this.options.mapNavigation,c=b.buttons,d,e,f,g,i=function(b){this.handler.call(a,
b);H(b)};if(m(b.enableButtons,b.enabled)&&!a.renderer.forExport)for(d in a.mapNavButtons=[],c)if(c.hasOwnProperty(d))f=p(b.buttonOptions,c[d]),e=f.theme,e.style=p(f.theme.style,f.style),g=e.states,e=a.renderer.button(f.text,0,0,i,e,g&&g.hover,g&&g.select,0,d==="zoomIn"?"topbutton":"bottombutton").attr({width:f.width,height:f.height,title:a.options.lang[d],zIndex:5}).add(),e.handler=f.onclick,e.align(n(f,{width:e.width,height:2*e.height}),null,f.alignTo),L(e.element,"dblclick",H),a.mapNavButtons.push(e)},
fitToBox:function(a,b){l([["x","width"],["y","height"]],function(c){var d=c[0],c=c[1];a[d]+a[c]>b[d]+b[c]&&(a[c]>b[c]?(a[c]=b[c],a[d]=b[d]):a[d]=b[d]+b[c]-a[c]);a[c]>b[c]&&(a[c]=b[c]);a[d]<b[d]&&(a[d]=b[d])});return a},mapZoom:function(a,b,c,d,e){var f=this.xAxis[0],g=f.max-f.min,i=m(b,f.min+g/2),k=g*a,g=this.yAxis[0],h=g.max-g.min,z=m(c,g.min+h/2);h*=a;i=this.fitToBox({x:i-k*(d?(d-f.pos)/f.len:0.5),y:z-h*(e?(e-g.pos)/g.len:0.5),width:k,height:h},{x:f.dataMin,y:g.dataMin,width:f.dataMax-f.dataMin,
height:g.dataMax-g.dataMin});if(d)f.fixTo=[d-f.pos,b];if(e)g.fixTo=[e-g.pos,c];a!==void 0?(f.setExtremes(i.x,i.x+i.width,!1),g.setExtremes(i.y,i.y+i.height,!1)):(f.setExtremes(void 0,void 0,!1),g.setExtremes(void 0,void 0,!1));this.redraw()}});u(q.prototype,"render",function(a){var b=this,c=b.options.mapNavigation;b.renderMapNavigation();a.call(b);(m(c.enableDoubleClickZoom,c.enabled)||c.enableDoubleClickZoomTo)&&L(b.container,"dblclick",function(a){b.pointer.onContainerDblClick(a)});m(c.enableMouseWheelZoom,
c.enabled)&&L(b.container,P.onmousewheel===void 0?"DOMMouseScroll":"mousewheel",function(a){b.pointer.onContainerMouseWheel(a);H(a);return!1})});n(I.prototype,{onContainerDblClick:function(a){var b=this.chart,a=this.normalize(a);b.options.mapNavigation.enableDoubleClickZoomTo?b.pointer.inClass(a.target,"highcharts-tracker")&&b.hoverPoint&&b.hoverPoint.zoomTo():b.isInsidePlot(a.chartX-b.plotLeft,a.chartY-b.plotTop)&&b.mapZoom(0.5,b.xAxis[0].toValue(a.chartX),b.yAxis[0].toValue(a.chartY),a.chartX,a.chartY)},
onContainerMouseWheel:function(a){var b=this.chart,c,a=this.normalize(a);c=a.detail||-(a.wheelDelta/120);b.isInsidePlot(a.chartX-b.plotLeft,a.chartY-b.plotTop)&&b.mapZoom(Math.pow(b.options.mapNavigation.mouseWheelSensitivity,c),b.xAxis[0].toValue(a.chartX),b.yAxis[0].toValue(a.chartY),a.chartX,a.chartY)}});u(I.prototype,"init",function(a,b,c){a.call(this,b,c);if(m(c.mapNavigation.enableTouchZoom,c.mapNavigation.enabled))this.pinchX=this.pinchHor=this.pinchY=this.pinchVert=this.hasZoom=!0});u(I.prototype,
"pinchTranslate",function(a,b,c,d,e,f,g){a.call(this,b,c,d,e,f,g);this.chart.options.chart.type==="map"&&this.hasZoom&&(a=d.scaleX>d.scaleY,this.pinchTranslateDirection(!a,b,c,d,e,f,g,a?d.scaleX:d.scaleY))});var G=P.documentElement.style.vectorEffect!==void 0;v.map=p(v.scatter,{allAreas:!0,animation:!1,nullColor:"#F8F8F8",borderColor:"silver",borderWidth:1,marker:null,stickyTracking:!1,dataLabels:{formatter:function(){return this.point.value},inside:!0,verticalAlign:"middle",crop:!1,overflow:!1,padding:0},
turboThreshold:0,tooltip:{followPointer:!0,pointFormat:"{point.name}: {point.value}<br/>"},states:{normal:{animation:!0},hover:{brightness:0.2,halo:null}}});var S=t(s,n({applyOptions:function(a,b){var c=s.prototype.applyOptions.call(this,a,b),d=this.series,e=d.joinBy;if(d.mapData)if(e=c[e[1]]!==void 0&&d.mapMap[c[e[1]]]){if(d.xyFromShape)c.x=e._midX,c.y=e._midY;n(c,e)}else c.value=c.value||null;return c},onMouseOver:function(a){clearTimeout(this.colorInterval);if(this.value!==null)s.prototype.onMouseOver.call(this,
a);else this.series.onMouseOut(a)},onMouseOut:function(){var a=this,b=+new Date,c=w(a.color),d=w(a.pointAttr.hover.fill),e=T(a.series.options.states.normal.animation).duration,f;if(e&&c.rgba.length===4&&d.rgba.length===4&&a.state!=="select")f=a.pointAttr[""].fill,delete a.pointAttr[""].fill,clearTimeout(a.colorInterval),a.colorInterval=setInterval(function(){var g=(new Date-b)/e,f=a.graphic;g>1&&(g=1);f&&f.attr("fill",C.prototype.tweenColors.call(0,d,c,g));g>=1&&clearTimeout(a.colorInterval)},13);
s.prototype.onMouseOut.call(a);if(f)a.pointAttr[""].fill=f},zoomTo:function(){var a=this.series;a.xAxis.setExtremes(this._minX,this._maxX,!1);a.yAxis.setExtremes(this._minY,this._maxY,!1);a.chart.redraw()}},D));j.map=t(j.scatter,p(R,{type:"map",pointClass:S,supportsDrilldown:!0,getExtremesFromAll:!0,useMapGeometry:!0,forceDL:!0,searchPoint:r,directTouch:!0,preserveAspectRatio:!0,getBox:function(a){var b=Number.MAX_VALUE,c=-b,d=b,e=-b,f=b,g=b,i=this.xAxis,k=this.yAxis,x;l(a||[],function(a){if(a.path){if(typeof a.path===
"string")a.path=h.splitPath(a.path);var i=a.path||[],k=i.length,l=!1,j=-b,p=b,n=-b,o=b,q=a.properties;if(!a._foundBox){for(;k--;)F(i[k])&&(l?(j=Math.max(j,i[k]),p=Math.min(p,i[k])):(n=Math.max(n,i[k]),o=Math.min(o,i[k])),l=!l);a._midX=p+(j-p)*(a.middleX||q&&q["hc-middle-x"]||0.5);a._midY=o+(n-o)*(a.middleY||q&&q["hc-middle-y"]||0.5);a._maxX=j;a._minX=p;a._maxY=n;a._minY=o;a.labelrank=m(a.labelrank,(j-p)*(n-o));a._foundBox=!0}c=Math.max(c,a._maxX);d=Math.min(d,a._minX);e=Math.max(e,a._maxY);f=Math.min(f,
a._minY);g=Math.min(a._maxX-a._minX,a._maxY-a._minY,g);x=!0}});if(x){this.minY=Math.min(f,m(this.minY,b));this.maxY=Math.max(e,m(this.maxY,-b));this.minX=Math.min(d,m(this.minX,b));this.maxX=Math.max(c,m(this.maxX,-b));if(i&&i.options.minRange===void 0)i.minRange=Math.min(5*g,(this.maxX-this.minX)/5,i.minRange||b);if(k&&k.options.minRange===void 0)k.minRange=Math.min(5*g,(this.maxY-this.minY)/5,k.minRange||b)}},getExtremes:function(){y.prototype.getExtremes.call(this,this.valueData);this.chart.hasRendered&&
this.isDirtyData&&this.getBox(this.options.data);this.valueMin=this.dataMin;this.valueMax=this.dataMax;this.dataMin=this.minY;this.dataMax=this.maxY},translatePath:function(a){var b=!1,c=this.xAxis,d=this.yAxis,e=c.min,f=c.transA,c=c.minPixelPadding,g=d.min,i=d.transA,d=d.minPixelPadding,k,h=[];if(a)for(k=a.length;k--;)F(a[k])?(h[k]=b?(a[k]-e)*f+c:(a[k]-g)*i+d,b=!b):h[k]=a[k];return h},setData:function(a,b,c,d){var e=this.options,f=e.mapData,g=e.joinBy,i=g===null,k=[],m={},j,n,o;i&&(g="_i");g=this.joinBy=
h.splat(g);g[1]||(g[1]=g[0]);a&&l(a,function(b,c){F(b)&&(a[c]={value:b});if(i)a[c]._i=c});this.getBox(a);if(f){if(f.type==="FeatureCollection"){if(f["hc-transform"])for(j in this.chart.mapTransforms=n=f["hc-transform"],n)if(n.hasOwnProperty(j)&&j.rotation)j.cosAngle=Math.cos(j.rotation),j.sinAngle=Math.sin(j.rotation);f=h.geojson(f,this.type,this)}this.mapData=f;for(o=0;o<f.length;o++)j=f[o],n=j.properties,j._i=o,g[0]&&n&&n[g[0]]&&(j[g[0]]=n[g[0]]),m[j[g[0]]]=j;this.mapMap=m;a&&g[1]&&l(a,function(a){m[a[g[1]]]&&
k.push(m[a[g[1]]])});e.allAreas?(this.getBox(f),a=a||[],k="|"+V(k,function(a){return a[g[0]]}).join("|")+"|",l(f,function(b){if(!g[0]||k.indexOf("|"+b[g[0]]+"|")===-1)a.push(p(b,{value:null})),d=!1})):this.getBox(k)}y.prototype.setData.call(this,a,b,c,d)},drawGraph:r,drawDataLabels:r,doFullTranslate:function(){return this.isDirtyData||this.chart.isResizing||this.chart.renderer.isVML||!this.baseTrans},translate:function(){var a=this,b=a.xAxis,c=a.yAxis,d=a.doFullTranslate();a.generatePoints();l(a.data,
function(e){e.plotX=b.toPixels(e._midX,!0);e.plotY=c.toPixels(e._midY,!0);if(d)e.shapeType="path",e.shapeArgs={d:a.translatePath(e.path)},G&&(e.shapeArgs["vector-effect"]="non-scaling-stroke")});a.translateColors()},drawPoints:function(){var a=this,b=a.xAxis,c=a.yAxis,d=a.group,e=a.chart,f=e.renderer,g,i=this.baseTrans;if(!a.transformGroup)a.transformGroup=f.g().attr({scaleX:1,scaleY:1}).add(d),a.transformGroup.survive=!0;a.doFullTranslate()?(e.hasRendered&&a.pointAttrToOptions.fill==="color"&&l(a.points,
function(a){if(a.shapeArgs)a.shapeArgs.fill=a.pointAttr[m(a.state,"")].fill}),G||l(a.points,function(b){b=b.pointAttr[""];b["stroke-width"]===a.pointAttr[""]["stroke-width"]&&(b["stroke-width"]="inherit")}),a.group=a.transformGroup,j.column.prototype.drawPoints.apply(a),a.group=d,l(a.points,function(a){a.graphic&&(a.name&&a.graphic.addClass("highcharts-name-"+a.name.replace(" ","-").toLowerCase()),a.properties&&a.properties["hc-key"]&&a.graphic.addClass("highcharts-key-"+a.properties["hc-key"].toLowerCase()),
G||(a.graphic["stroke-widthSetter"]=r))}),this.baseTrans={originX:b.min-b.minPixelPadding/b.transA,originY:c.min-c.minPixelPadding/c.transA+(c.reversed?0:c.len/c.transA),transAX:b.transA,transAY:c.transA},this.transformGroup.animate({translateX:0,translateY:0,scaleX:1,scaleY:1})):(g=b.transA/i.transAX,d=c.transA/i.transAY,b=b.toPixels(i.originX,!0),c=c.toPixels(i.originY,!0),g>0.99&&g<1.01&&d>0.99&&d<1.01&&(d=g=1,b=Math.round(b),c=Math.round(c)),this.transformGroup.animate({translateX:b,translateY:c,
scaleX:g,scaleY:d}));G||a.group.element.setAttribute("stroke-width",a.options.borderWidth/(g||1));this.drawMapDataLabels()},drawMapDataLabels:function(){y.prototype.drawDataLabels.call(this);this.dataLabelsGroup&&this.dataLabelsGroup.clip(this.chart.clipRect)},render:function(){var a=this,b=y.prototype.render;a.chart.renderer.isVML&&a.data.length>3E3?setTimeout(function(){b.call(a)}):b.call(a)},animate:function(a){var b=this.options.animation,c=this.group,d=this.xAxis,e=this.yAxis,f=d.pos,g=e.pos;
if(this.chart.renderer.isSVG)b===!0&&(b={duration:1E3}),a?c.attr({translateX:f+d.len/2,translateY:g+e.len/2,scaleX:0.001,scaleY:0.001}):(c.animate({translateX:f,translateY:g,scaleX:1,scaleY:1},b),this.animate=null)},animateDrilldown:function(a){var b=this.chart.plotBox,c=this.chart.drilldownLevels[this.chart.drilldownLevels.length-1],d=c.bBox,e=this.chart.options.drilldown.animation;if(!a)a=Math.min(d.width/b.width,d.height/b.height),c.shapeArgs={scaleX:a,scaleY:a,translateX:d.x,translateY:d.y},l(this.points,
function(a){a.graphic&&a.graphic.attr(c.shapeArgs).animate({scaleX:1,scaleY:1,translateX:0,translateY:0},e)}),this.animate=null},drawLegendSymbol:J.drawRectangle,animateDrillupFrom:function(a){j.column.prototype.animateDrillupFrom.call(this,a)},animateDrillupTo:function(a){j.column.prototype.animateDrillupTo.call(this,a)}}));v.mapline=p(v.map,{lineWidth:1,fillColor:"none"});j.mapline=t(j.map,{type:"mapline",pointAttrToOptions:{stroke:"color","stroke-width":"lineWidth",fill:"fillColor",dashstyle:"dashStyle"},
drawLegendSymbol:j.line.prototype.drawLegendSymbol});v.mappoint=p(v.scatter,{dataLabels:{enabled:!0,formatter:function(){return this.point.name},crop:!1,defer:!1,overflow:!1,style:{color:"#000000"}}});j.mappoint=t(j.scatter,{type:"mappoint",forceDL:!0,pointClass:t(s,{applyOptions:function(a,b){var c=s.prototype.applyOptions.call(this,a,b);a.lat!==void 0&&a.lon!==void 0&&(c=n(c,this.series.chart.fromLatLonToPoint(c)));return c}})});if(j.bubble)v.mapbubble=p(v.bubble,{animationLimit:500,tooltip:{pointFormat:"{point.name}: {point.z}"}}),
j.mapbubble=t(j.bubble,{pointClass:t(s,{applyOptions:function(a,b){var c;a&&a.lat!==void 0&&a.lon!==void 0?(c=s.prototype.applyOptions.call(this,a,b),c=n(c,this.series.chart.fromLatLonToPoint(c))):c=S.prototype.applyOptions.call(this,a,b);return c},ttBelow:!1}),xyFromShape:!0,type:"mapbubble",pointArrayMap:["z"],getMapData:j.map.prototype.getMapData,getBox:j.map.prototype.getBox,setData:j.map.prototype.setData});B.plotOptions.heatmap=p(B.plotOptions.scatter,{animation:!1,borderWidth:0,nullColor:"#F8F8F8",
dataLabels:{formatter:function(){return this.point.value},inside:!0,verticalAlign:"middle",crop:!1,overflow:!1,padding:0},marker:null,pointRange:null,tooltip:{pointFormat:"{point.x}, {point.y}: {point.value}<br/>"},states:{normal:{animation:!0},hover:{halo:!1,brightness:0.2}}});j.heatmap=t(j.scatter,p(R,{type:"heatmap",pointArrayMap:["y","value"],hasPointSpecificOptions:!0,pointClass:t(s,D),supportsDrilldown:!0,getExtremesFromAll:!0,directTouch:!0,init:function(){var a;j.scatter.prototype.init.apply(this,
arguments);a=this.options;a.pointRange=m(a.pointRange,a.colsize||1);this.yAxis.axisPointRange=a.rowsize||1},translate:function(){var a=this.options,b=this.xAxis,c=this.yAxis,d=function(a,b,c){return Math.min(Math.max(b,a),c)};this.generatePoints();l(this.points,function(e){var f=(a.colsize||1)/2,g=(a.rowsize||1)/2,i=d(Math.round(b.len-b.translate(e.x-f,0,1,0,1)),-b.len,2*b.len),f=d(Math.round(b.len-b.translate(e.x+f,0,1,0,1)),-b.len,2*b.len),h=d(Math.round(c.translate(e.y-g,0,1,0,1)),-c.len,2*c.len),
g=d(Math.round(c.translate(e.y+g,0,1,0,1)),-c.len,2*c.len);e.plotX=e.clientX=(i+f)/2;e.plotY=(h+g)/2;e.shapeType="rect";e.shapeArgs={x:Math.min(i,f),y:Math.min(h,g),width:Math.abs(f-i),height:Math.abs(g-h)}});this.translateColors();this.chart.hasRendered&&l(this.points,function(a){a.shapeArgs.fill=a.options.color||a.color})},drawPoints:j.column.prototype.drawPoints,animate:r,getBox:r,drawLegendSymbol:J.drawRectangle,alignDataLabel:j.column.prototype.alignDataLabel,getExtremes:function(){y.prototype.getExtremes.call(this,
this.valueData);this.valueMin=this.dataMin;this.valueMax=this.dataMax;y.prototype.getExtremes.call(this)}}));q.prototype.transformFromLatLon=function(a,b){if(A.proj4===void 0)return E(21),{x:0,y:null};var c=A.proj4(b.crs,[a.lon,a.lat]),d=b.cosAngle||b.rotation&&Math.cos(b.rotation),e=b.sinAngle||b.rotation&&Math.sin(b.rotation),c=b.rotation?[c[0]*d+c[1]*e,-c[0]*e+c[1]*d]:c;return{x:((c[0]-(b.xoffset||0))*(b.scale||1)+(b.xpan||0))*(b.jsonres||1)+(b.jsonmarginX||0),y:(((b.yoffset||0)-c[1])*(b.scale||
1)+(b.ypan||0))*(b.jsonres||1)-(b.jsonmarginY||0)}};q.prototype.transformToLatLon=function(a,b){if(A.proj4===void 0)E(21);else{var c={x:((a.x-(b.jsonmarginX||0))/(b.jsonres||1)-(b.xpan||0))/(b.scale||1)+(b.xoffset||0),y:((-a.y-(b.jsonmarginY||0))/(b.jsonres||1)+(b.ypan||0))/(b.scale||1)+(b.yoffset||0)},d=b.cosAngle||b.rotation&&Math.cos(b.rotation),e=b.sinAngle||b.rotation&&Math.sin(b.rotation),c=A.proj4(b.crs,"WGS84",b.rotation?{x:c.x*d+c.y*-e,y:c.x*e+c.y*d}:c);return{lat:c.y,lon:c.x}}};q.prototype.fromPointToLatLon=
function(a){var b=this.mapTransforms,c;if(b){for(c in b)if(b.hasOwnProperty(c)&&b[c].hitZone&&M({x:a.x,y:-a.y},b[c].hitZone.coordinates[0]))return this.transformToLatLon(a,b[c]);return this.transformToLatLon(a,b["default"])}else E(22)};q.prototype.fromLatLonToPoint=function(a){var b=this.mapTransforms,c,d;if(!b)return E(22),{x:0,y:null};for(c in b)if(b.hasOwnProperty(c)&&b[c].hitZone&&(d=this.transformFromLatLon(a,b[c]),M({x:d.x,y:-d.y},b[c].hitZone.coordinates[0])))return d;return this.transformFromLatLon(a,
b["default"])};h.geojson=function(a,b,c){var d=[],e=[],f=function(a){var b,c=a.length;e.push("M");for(b=0;b<c;b++)b===1&&e.push("L"),e.push(a[b][0],-a[b][1])},b=b||"map";l(a.features,function(a){var c=a.geometry,h=c.type,c=c.coordinates,a=a.properties,j;e=[];b==="map"||b==="mapbubble"?(h==="Polygon"?(l(c,f),e.push("Z")):h==="MultiPolygon"&&(l(c,function(a){l(a,f)}),e.push("Z")),e.length&&(j={path:e})):b==="mapline"?(h==="LineString"?f(c):h==="MultiLineString"&&l(c,f),e.length&&(j={path:e})):b==="mappoint"&&
h==="Point"&&(j={x:c[0],y:-c[1]});j&&d.push(n(j,{name:a.name||a.NAME,properties:a}))});if(c&&a.copyrightShort)c.chart.mapCredits=Q(c.chart.options.credits.mapText,{geojson:a}),c.chart.mapCreditsFull=Q(c.chart.options.credits.mapTextFull,{geojson:a});return d};u(q.prototype,"showCredits",function(a,b){if(this.mapCredits)b.href=null;a.call(this,h.merge(b,{text:b.text+(this.mapCredits||"")}));this.credits&&this.mapCreditsFull&&this.credits.attr({title:this.mapCreditsFull})});n(B.lang,{zoomIn:"Zoom in",
zoomOut:"Zoom out"});B.mapNavigation={buttonOptions:{alignTo:"plotBox",align:"left",verticalAlign:"top",x:0,width:18,height:18,style:{fontSize:"15px",fontWeight:"bold",textAlign:"center"},theme:{"stroke-width":1}},buttons:{zoomIn:{onclick:function(){this.mapZoom(0.5)},text:"+",y:0},zoomOut:{onclick:function(){this.mapZoom(2)},text:"-",y:28}},mouseWheelSensitivity:1.1};h.splitPath=function(a){var b,a=a.replace(/([A-Za-z])/g," $1 "),a=a.replace(/^\s*/,"").replace(/\s*$/,""),a=a.split(/[ ,]+/);for(b=
0;b<a.length;b++)/[a-zA-Z]/.test(a[b])||(a[b]=parseFloat(a[b]));return a};h.maps={};K.prototype.symbols.topbutton=function(a,b,c,d,e){return N(a-1,b-1,c,d,e.r,e.r,0,0)};K.prototype.symbols.bottombutton=function(a,b,c,d,e){return N(a-1,b-1,c,d,0,0,e.r,e.r)};U===O&&l(["topbutton","bottombutton"],function(a){O.prototype.symbols[a]=K.prototype.symbols[a]});h.Map=h.mapChart=function(a,b,c){var d=typeof a==="string"||a.nodeName,e=arguments[d?1:0],f={endOnTick:!1,gridLineWidth:0,lineWidth:0,minPadding:0,
maxPadding:0,startOnTick:!1,title:null,tickPositions:[]},g,i=h.getOptions().credits;g=e.series;e.series=null;e=p({chart:{panning:"xy",type:"map"},credits:{mapText:m(i.mapText,' \u00a9 <a href="{geojson.copyrightUrl}">{geojson.copyrightShort}</a>'),mapTextFull:m(i.mapTextFull,"{geojson.copyright}")},xAxis:f,yAxis:p(f,{reversed:!0})},e,{chart:{inverted:!1,alignTicks:!1}});e.series=g;return d?new q(a,e,c):new q(e,b)}}); | PypiClean |
/DrQueueIPython-0.0.1.tar.gz/DrQueueIPython-0.0.1/bin/control_computer.py | from optparse import OptionParser
import os
import DrQueue
from DrQueue import Job as DrQueueJob
from DrQueue import Client as DrQueueClient
from DrQueue import Computer as DrQueueComputer
from DrQueue import ComputerPool as DrQueueComputerPool
def main():
# parse arguments
parser = OptionParser()
parser.usage = "%prog [options] -i id"
parser.add_option("-i", "--id ",
dest="id", default=None, help="id of computer")
parser.add_option("-a", "--all ",
action="store_true", dest="all", default=False, help="use all computers")
parser.add_option("-s", "--shutdown",
action="store_true", dest="shutdown", default=False, help="shutdown computer")
parser.add_option("-p", "--pools",
dest="pools", default=None, help="add computer to one or more pools")
parser.add_option("--info",
action="store_true", dest="info", default=False, help="show information about computer")
parser.add_option("-t", "--status",
action="store_true", dest="status", default=False, help="show status of computer")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False, help="verbose output")
(options, args) = parser.parse_args()
# initialize DrQueue client
client = DrQueueClient()
cache_time = 60
# engines to work on
if options.id != None:
computers = []
computers.append(int(options.id))
if options.all == True:
computers = client.ip_client.ids
# run specified action
if options.shutdown:
for computer in computers:
client.engine_stop(computer)
print("Computer %s has been shut down." % str(computers))
return True
if options.pools:
for computer in computers:
comp = client.identify_computer(computer, cache_time)
DrQueueComputer.set_pools(comp['hostname'], options.pools.split(","))
print("Computer %i has been added to pools %s." % (computer, options.pools.split(",")))
return True
if options.info:
for computer in computers:
print("Engine " + str(computer) + ":")
comp = client.identify_computer(computer, cache_time)
print(" hostname: " + comp['hostname'])
print(" arch: " + comp['arch'])
print(" os: " + comp['os'])
print(" nbits: " + str(comp['nbits']))
print(" procspeed: " + comp['procspeed'])
print(" ncpus: " + str(comp['ncpus']))
print(" ncorescpu: " + str(comp['ncorescpu']))
print(" memory: " + str(comp['memory']))
print(" load: " + comp['load'])
print(" pools: " + str(DrQueueComputer.get_pools(comp['hostname'])) + "\n")
return True
if options.status:
for computer in computers:
print("Engine " + str(computer) + ":")
status = client.ip_client.queue_status(computer, verbose=True)
print(" status:")
print(" in queue: " + str(status['queue']))
print(" completed: " + str(status['completed']))
print(" tasks: " + str(status['tasks']))
return True
if __name__ == "__main__":
main() | PypiClean |
/BlueWhale3-BlueWhale-0.0.54.tar.gz/BlueWhale3-BlueWhale-0.0.54/orangecontrib/blue_whale/canvasmain.py | from AnyQt.QtWidgets import QAction, QMenu
from AnyQt.QtCore import Qt
from Orange.canvas import config
from orangecanvas.application.canvasmain import CanvasMainWindow
from orangecanvas.registry import get_style_sheet, get_global_registry
from orangecanvas.application.outputview import TextStream
from orangecontrib.blue_whale.i18n_config import *
def __(key):
return i18n.t("bluewhale.canvasmain." + key)
__SESSION = {"SESSION": ""}
def login(way=None):
global __SESSION
if __SESSION.get('SESSION'): # 登录状态,用户点击则是退出,
set_session({'SESSION': None}, messge=__("sign_in"))
else:
if not way:
get_session()
login_action = QAction(
__("sign_in"),
objectName="action-login",
toolTip=__("login"),
triggered=login,
)
def get_user_session():
global __SESSION
from orangecontrib.blue_whale.widgets.utils.login import MainWindow
window = MainWindow(__SESSION)
window.exec_()
return __SESSION.get('SESSION')
def set_session(value, messge=__("sign_in")):
global __SESSION, login_action
__SESSION.update(value)
login_action.setText(messge)
def get_session(key='SESSION'):
global __SESSION, login_action
if __SESSION.get(key):
return __SESSION[key]
if not get_user_session():
return None
login_action.setText(__("sign_out"))
return __SESSION.get(key)
def get_session_value(key='SESSION'):
global __SESSION
return __SESSION.get(key)
def set_service():
from orangecontrib.blue_whale.widgets.utils.service_window import ServiceWindow
window = ServiceWindow()
window.exec_()
class BWCanvasMainWindow(CanvasMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
global login_action
menubar = self.menuBar()
server_menu = QMenu(
self.tr(__("service")), menubar, objectName="server-menu"
)
self.settings_action = QAction(
__("service_settings"), self,
objectName="action-settings",
toolTip=__("service_settings_tip"),
triggered=set_service
)
server_menu.addAction(login_action)
server_menu.addAction(self.settings_action)
menubar.addMenu(server_menu)
self.setMenuBar(menubar)
def open_case(self, filename):
"""
Open and load a '*.report' from 'filename'
"""
widget_registry = get_global_registry()
if self.is_transient():
window = self
else:
window = self.create_new_window()
window.setWindowModified(True)
window.setWindowModified(False)
window.setStyleSheet(get_style_sheet())
window.setAttribute(Qt.WA_DeleteOnClose)
window.setWindowIcon(config.application_icon())
window.connect_output_stream(TextStream())
window.set_widget_registry(widget_registry)
window.open_example_scheme(filename)
def closeEvent(self, event):
super().closeEvent(event) | PypiClean |
/AdyenAntoni-1.0.0.tar.gz/AdyenAntoni-1.0.0/Adyen/httpclient.py |
from __future__ import absolute_import, division, unicode_literals
try:
import requests
except ImportError:
requests = None
try:
import pycurl
except ImportError:
pycurl = None
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import HTTPError
from io import BytesIO
import json as json_lib
import base64
class HTTPClient(object):
def __init__(
self,
user_agent_suffix,
lib_version,
force_request=None,
timeout=None,
):
# Check if requests already available, default to urllib
self.user_agent = user_agent_suffix + lib_version
if not force_request:
if requests:
self.request = self._requests_request
elif pycurl:
self.request = self._pycurl_request
else:
self.request = self._urllib_request
else:
if force_request == 'requests':
self.request = self._requests_request
elif force_request == 'pycurl':
self.request = self._pycurl_request
else:
self.request = self._urllib_request
self.timeout = timeout
def _pycurl_request(
self,
method,
url,
json=None,
data=None,
username="",
password="",
xapikey="",
headers=None
):
"""This function will send a request with a specified method to the url endpoint using pycurl.
Returning an AdyenResult object on 200 HTTP response.
Either json or data has to be provided for POST/PATCH.
If username and password are provided, basic auth will be used.
Args:
method (str): This is the method used to send the request to an endpoint.
url (str): url to send the request
json (dict, optional): Dict of the JSON to POST/PATCH
data (dict, optional): Dict, presumed flat structure of key/value
of request to place
username (str, optionl): Username for basic auth. Must be included
as part of password.
password (str, optional): Password for basic auth. Must be included
as part of username.
xapikey (str, optional): Adyen API key. Will be used for auth
if username and password are absent.
headers (dict, optional): Key/Value pairs of headers to include
Returns:
str: Raw response received
str: Raw request placed
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
"""
if headers is None:
headers = {}
response_headers = {}
stringbuffer = BytesIO()
curl = pycurl.Curl()
curl.setopt(curl.URL, url)
curl.setopt(curl.WRITEDATA, stringbuffer)
# Add User-Agent header to request so that the
# request can be identified as coming from the Adyen Python library.
headers['User-Agent'] = self.user_agent
if username and password:
curl.setopt(curl.USERPWD, '%s:%s' % (username, password))
elif xapikey:
headers["X-API-KEY"] = xapikey
# Convert the header dict to formatted array as pycurl needs.
header_list = ["%s:%s" % (k, v) for k, v in headers.items()]
# Ensure proper content-type when adding headers
if json:
header_list.append("Content-Type:application/json")
curl.setopt(pycurl.HTTPHEADER, header_list)
# Return regular dict instead of JSON encoded dict for request:
if method == "POST" or method == "PATCH":
raw_store = json
# Set the request body.
raw_request = json_lib.dumps(json) if json else urlencode(data)
curl.setopt(curl.POSTFIELDS, raw_request)
# Needed here as POSTFIELDS sets the method to POST
curl.setopt(curl.CUSTOMREQUEST, method)
elif method == "GET" or method == "DELETE":
curl.setopt(curl.CUSTOMREQUEST, method)
raw_store = None
curl.setopt(curl.TIMEOUT, self.timeout)
curl.perform()
# Grab the response content
result = stringbuffer.getvalue()
status_code = curl.getinfo(curl.RESPONSE_CODE)
curl.close()
# Return regular dict instead of JSON encoded dict for request:
raw_request = raw_store
return result, raw_request, status_code, response_headers
def _requests_request(
self,
method,
url,
json=None,
data=None,
username="",
password="",
xapikey="",
headers=None
):
"""This function will send a request with a specified method to the url endpoint using requests.
Returning an AdyenResult object on 200 HTTP response.
Either json or data has to be provided for POST/PATCH.
If username and password are provided, basic auth will be used.
Args:
method (str): This is the method used to send the request to an endpoint.
url (str): url to send the request
json (dict, optional): Dict of the JSON to POST/PATCH
data (dict, optional): Dict, presumed flat structure of key/value
of request to place
username (str, optionl): Username for basic auth. Must be included
as part of password.
password (str, optional): Password for basic auth. Must be included
as part of username.
xapikey (str, optional): Adyen API key. Will be used for auth
if username and password are absent.
headers (dict, optional): Key/Value pairs of headers to include
Returns:
str: Raw response received
str: Raw request placed
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
"""
if headers is None:
headers = {}
# Adding basic auth if username and password provided.
auth = None
if username and password:
auth = requests.auth.HTTPBasicAuth(username, password)
elif xapikey:
headers['x-api-key'] = xapikey
# Add User-Agent header to request so that the request
# can be identified as coming from the Adyen Python library.
headers['User-Agent'] = self.user_agent
request = requests.request(
method=method,
url=url,
auth=auth,
data=data,
json=json,
headers=headers,
timeout=self.timeout
)
# Ensure either json or data is returned for raw request
# Updated: Only return regular dict,
# don't switch out formats if this is not important.
message = json
return request.text, message, request.status_code, request.headers
def _urllib_request(
self,
method,
url,
json=None,
data=None,
username="",
password="",
xapikey="",
headers=None,
):
"""This function will send a request with a specified method to the url endpoint using urlib2.
Returning an AdyenResult object on 200 HTTP response.
Either json or data has to be provided for POST/PATCH.
If username and password are provided, basic auth will be used.
Args:
method (str): This is the method used to send the request to an endpoint.
url (str): url to send the request
json (dict, optional): Dict of the JSON to POST/PATCH
data (dict, optional): Dict, presumed flat structure of key/value
of request to place
username (str, optionl): Username for basic auth. Must be included
as part of password.
password (str, optional): Password for basic auth. Must be included
as part of username.
xapikey (str, optional): Adyen API key. Will be used for auth
if username and password are absent.
headers (dict, optional): Key/Value pairs of headers to include
Returns:
str: Raw response received
str: Raw request placed
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
"""
if headers is None:
headers = {}
if method == "POST" or method == "PATCH":
# Store regular dict to return later:
raw_store = json
raw_request = json_lib.dumps(json) if json else urlencode(data)
url_request = Request(url, data=raw_request.encode('utf8'), method=method)
raw_request = raw_store
if json:
url_request.add_header('Content-Type', 'application/json')
elif not data:
raise ValueError("Please provide either a json or a data field.")
elif method == "GET" or method == "DELETE":
url_request = Request(url, method=method)
raw_request = None
# Add User-Agent header to request so that the
# request can be identified as coming from the Adyen Python library.
headers['User-Agent'] = self.user_agent
# Set regular dict to return as raw_request:
# Adding basic auth is username and password provided.
if username and password:
basic_authstring = base64.encodebytes(('%s:%s' %
(username, password))
.encode()).decode(). \
replace('\n', '')
url_request.add_header("Authorization",
"Basic %s" % basic_authstring)
elif xapikey:
headers["X-API-KEY"] = xapikey
# Adding the headers to the request.
for key, value in headers.items():
url_request.add_header(key, str(value))
# URLlib raises all non 200 responses as en error.
try:
response = urlopen(url_request, timeout=self.timeout)
except HTTPError as e:
raw_response = e.read()
return raw_response, raw_request, e.getcode(), e.headers
else:
raw_response = response.read()
response.close()
# The dict(response.info()) is the headers of the response
# Raw response, raw request, status code and headers returned
return (raw_response, raw_request,
response.getcode(), dict(response.info()))
def request(
self,
method,
url,
json="",
data="",
username="",
password="",
headers=None,
):
"""This is overridden on module initialization. This function will make
an HTTP method call to a given url. Either json/data will be what is posted to
the end point. he HTTP request needs to be basicAuth when username and
password are provided. a headers dict maybe provided,
whatever the values are should be applied.
Args:
url (str): url to send the request
method (str): method to use for the endpoint
json (dict, optional): Dict of the JSON to POST/PATCH
data (dict, optional): Dict, presumed flat structure of
key/value of request to place as
www-form
username (str, optional): Username for basic auth. Must be
included as part of password.
password (str, optional): Password for basic auth. Must be
included as part of username.
xapikey (str, optional): Adyen API key. Will be used for auth
if username and password are absent.
headers (dict, optional): Key/Value pairs of headers to include
Returns:
str: Raw request placed
str: Raw response received
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
"""
raise NotImplementedError('request of HTTPClient should have been '
'overridden on initialization. '
'Otherwise, can be overridden to '
'supply your own post method') | PypiClean |
/D-Analyst-1.0.6.tar.gz/D-Analyst-1.0.6/main/analyst/visuals/plot_visual.py | import numpy as np
from analyst import get_color, get_next_color
from .visual import Visual
__all__ = ['process_coordinates', 'PlotVisual']
def process_coordinates(x=None, y=None, thickness=None):
if y is None and x is not None:
if x.ndim == 1:
x = x.reshape((1, -1))
nplots, nsamples = x.shape
y = x
x = np.tile(np.linspace(0., 1., nsamples).reshape((1, -1)), (nplots, 1))
x = np.array(x, dtype=np.float32)
y = np.array(y, dtype=np.float32)
assert x.shape == y.shape
if x.ndim == 1:
x = x.reshape((1, -1))
y = y.reshape((1, -1))
position = np.empty((x.size, 2), dtype=np.float32)
position[:, 0] = x.ravel()
position[:, 1] = y.ravel()
return position, x.shape
class PlotVisual(Visual):
def initialize(self, x=None, y=None, color=None, point_size=1.0, position=None,
nprimitives=None, index=None, color_array_index=None, thickness=None,
options=None, autocolor=None, autonormalizable=True):
if position is not None:
position = np.array(position, dtype=np.float32)
if thickness:
shape = (2 * position.shape[0], 1)
else:
shape = (1, position.shape[0])
else:
position, shape = process_coordinates(x=x, y=y)
if thickness:
shape = (shape[0], 2 * shape[1])
self.size = np.prod(shape)
if not nprimitives:
nprimitives = shape[0]
nsamples = shape[1]
else:
nsamples = self.size // nprimitives
if thickness and position.shape[0] >= 2:
w = thickness
n = self.size
X = position
Y = np.zeros((n, 2))
u = np.zeros((n/2, 2))
X2 = np.vstack((X, 2*X[-1,:]-X[-2,:]))
u[:,0] = -np.diff(X2[:,1])
u[:,1] = np.diff(X2[:,0])
r = (u[:,0] ** 2 + u[:,1] ** 2) ** .5
rm = r.mean()
r[r == 0.] = rm
u[:,0] /= r
u[:,1] /= r
Y[::2,:] = X - w * u
Y[1::2,:] = X + w * u
position = Y
x = Y[:,0]
y = Y[:,1]
self.primitive_type = 'TRIANGLE_STRIP'
if nsamples <= 1:
self.bounds = [0, self.size]
else:
self.bounds = np.arange(0, self.size + 1, nsamples)
if color is None:
if nprimitives <= 1:
color = self.default.color
if autocolor is not None:
if nprimitives <= 1:
color = get_next_color(autocolor)
else:
color = [get_next_color(i + autocolor) for i in xrange(nprimitives)]
color = get_color(color)
if type(color) is list:
if color and (type(color[0]) != tuple) and (3 <= len(color) <= 4):
color = tuple(color)
else:
color = np.array(color)
use_color_array = color_array_index is not None
if isinstance(color, np.ndarray):
colors_ndim = color.shape[1]
if color.shape[0] == self.size:
single_color = False
else:
use_color_array = True
single_color = False
elif type(color) is tuple:
single_color = True
colors_ndim = len(color)
self.add_attribute("position", ndim=2, data=position,
autonormalizable=autonormalizable)
if index is not None:
index = np.array(index)
self.add_index("index", data=index)
if single_color and not use_color_array:
self.add_uniform("color", ndim=colors_ndim, data=color)
if colors_ndim == 3:
self.add_fragment_main("""out_color = vec4(color, 1.0);""")
elif colors_ndim == 4:
self.add_fragment_main("""out_color = color;""")
elif not use_color_array:
self.add_attribute("color", ndim=colors_ndim, data=color)
self.add_varying("varying_color", vartype="float", ndim=colors_ndim)
self.add_vertex_main("""varying_color = color;""")
if colors_ndim == 3:
self.add_fragment_main("""out_color = vec4(varying_color, 1.0);""")
elif colors_ndim == 4:
self.add_fragment_main("""out_color = varying_color;""")
elif use_color_array:
if color_array_index is None:
color_array_index = np.repeat(np.arange(nprimitives), nsamples)
color_array_index = np.array(color_array_index)
ncolors = color.shape[0]
ncomponents = color.shape[1]
color = color.reshape((1, ncolors, ncomponents))
dx = 1. / ncolors
offset = dx / 2.
self.add_texture('colormap', ncomponents=ncomponents, ndim=1, data=color)
self.add_attribute('index', ndim=1, vartype='int', data=color_array_index)
self.add_varying('vindex', vartype='int', ndim=1)
self.add_vertex_main("""vindex = index;""")
self.add_fragment_main("""
float coord = %.5f + vindex * %.5f;
vec4 color = texture1D(colormap, coord);
out_color = color;
""" % (offset, dx))
self.add_uniform("point_size", data=point_size)
self.add_vertex_main("""gl_PointSize = point_size;""") | PypiClean |
/3ETool-0.8.3.tar.gz/3ETool-0.8.3/EEETools/BlockSubClasses/condenser.py | from EEETools.MainModules.support_blocks import Drawer
from EEETools.MainModules.main_module import Block
import xml.etree.ElementTree as ETree
from EEETools import costants
class Condenser(Block):
def __init__(self, inputID, main_class):
super().__init__(inputID, main_class)
self.type = "condenser"
if self.main_class.options.condenser_is_dissipative:
self.has_support_block = True
self.support_block.append(Drawer(main_class, self, is_input=True))
else:
self.has_support_block = False
def is_ready_for_calculation(self):
return len(self.input_connections) >= 1 and len(self.output_connections) >= 1
def prepare_for_calculation(self):
if self.main_class.options.condenser_is_dissipative:
self.support_block[0].prepare_for_calculation()
if self.main_class.options.loss_cost_is_zero:
new_conn = self.main_class.append_connection(from_block=self)
new_conn.name = "Condenser Exergy Loss"
new_conn.automatically_generated_connection = True
new_conn.exergy_value = self.exergy_balance
new_conn.is_fluid_stream = False
def initialize_connection_list(self, input_list):
new_input_conn = self.main_class.find_connection_by_index(input_list[0])
new_output_conn = self.main_class.find_connection_by_index(input_list[1])
if self.main_class.options.condenser_is_dissipative:
self.add_connection(new_input_conn, is_input=True, append_to_support_block=0)
self.add_connection(new_output_conn, is_input=False, append_to_support_block=0)
else:
self.add_connection(new_input_conn, is_input=True)
self.add_connection(new_output_conn, is_input=False)
def export_xml_connection_list(self) -> ETree.Element:
xml_connection_list = ETree.Element("Connections")
fluid_connections = ETree.SubElement(xml_connection_list, "FluidConnections")
if self.main_class.options.condenser_is_dissipative:
input_connections = self.support_block[0].external_input_connections
output_connections = self.support_block[0].external_output_connections
else:
input_connections = self.external_input_connections
output_connections = self.external_output_connections
for input_connection in input_connections:
if not input_connection.automatically_generated_connection:
input_xml = ETree.SubElement(fluid_connections, "input")
input_xml.set("index", str(input_connection.index))
for output_connection in output_connections:
if not output_connection.automatically_generated_connection:
output_xml = ETree.SubElement(fluid_connections, "output")
output_xml.set("index", str(output_connection.index))
return xml_connection_list
def append_xml_connection_list(self, input_list: ETree.Element):
fluid_connections = input_list.find("FluidConnections")
if self.main_class.options.condenser_is_dissipative:
self.__add_connection_by_index(fluid_connections, "input", append_to_support_block=0)
self.__add_connection_by_index(fluid_connections, "output", append_to_support_block=0)
else:
self.__add_connection_by_index(fluid_connections, "input")
self.__add_connection_by_index(fluid_connections, "output")
def __add_connection_by_index(self, input_list: ETree.Element, connection_name, append_to_support_block=None):
if connection_name == "input":
is_input = True
else:
is_input = False
for connection in input_list.findall(connection_name):
new_conn = self.main_class.find_connection_by_index(float(connection.get("index")))
if new_conn is not None:
self.add_connection(new_conn, is_input, append_to_support_block=append_to_support_block)
@classmethod
def return_EES_needed_index(cls):
return_dict = {"flow input": [1, False],
"flow output": [2, False]}
return return_dict
@classmethod
def return_EES_base_equations(cls):
return_element = dict()
variables_list = [{"variable": "flow input", "type": costants.ZONE_TYPE_FLOW_RATE},
{"variable": "flow output", "type": costants.ZONE_TYPE_FLOW_RATE}]
return_element.update({"mass_continuity": {"variables": variables_list, "related_option": "none"}})
variables_list = [{"variable": "flow input", "type": costants.ZONE_TYPE_PRESSURE},
{"variable": "flow output", "type": costants.ZONE_TYPE_PRESSURE}]
return_element.update({"pressure_continuity": {"variables": variables_list, "related_option": "none"}})
return return_element
def return_other_zone_connections(self, zone_type, input_connection):
if zone_type == costants.ZONE_TYPE_FLOW_RATE:
# In the condenser flow rate is preserved, hence if "input_connection" stream is connected to the condenser
# block the methods must returns each fluid stream connected to that block
if self.connection_is_in_connections_list(input_connection):
return self.get_fluid_stream_connections()
else:
return list()
elif zone_type == costants.ZONE_TYPE_FLUID:
# In the condenser fluid type is preserved, hence if "input_connection" stream is connected to the condenser
# block the methods must returns each fluid stream connected to that block
if self.connection_is_in_connections_list(input_connection):
return self.get_fluid_stream_connections()
else:
return list()
elif zone_type == costants.ZONE_TYPE_PRESSURE:
# In the condenser pressure is preserved, hence if "input_connection" stream is connected to the condenser
# block the methods must returns each fluid stream connected to that block
if self.connection_is_in_connections_list(input_connection):
return self.get_fluid_stream_connections()
else:
return list()
else:
return list() | PypiClean |
/Files.com-1.0.1051-py3-none-any.whl/files_sdk/models/form_field_set.py | import builtins
import datetime
from files_sdk.api import Api
from files_sdk.list_obj import ListObj
from files_sdk.exceptions import InvalidParameterError, MissingParameterError, NotImplementedError
class FormFieldSet:
default_attributes = {
'id': None, # int64 - Form field set id
'title': None, # string - Title to be displayed
'form_layout': None, # array - Layout of the form
'form_fields': None, # array - Associated form fields
'skip_name': None, # boolean - Any associated InboxRegistrations or BundleRegistrations can be saved without providing name
'skip_email': None, # boolean - Any associated InboxRegistrations or BundleRegistrations can be saved without providing email
'skip_company': None, # boolean - Any associated InboxRegistrations or BundleRegistrations can be saved without providing company
'user_id': None, # int64 - User ID. Provide a value of `0` to operate the current session's user.
}
def __init__(self, attributes=None, options=None):
if not isinstance(attributes, dict):
attributes = {}
if not isinstance(options, dict):
options = {}
self.set_attributes(attributes)
self.options = options
def set_attributes(self, attributes):
for (attribute, default_value) in FormFieldSet.default_attributes.items():
setattr(self, attribute, attributes.get(attribute, default_value))
def get_attributes(self):
return {k: getattr(self, k, None) for k in FormFieldSet.default_attributes if getattr(self, k, None) is not None}
# Parameters:
# title - string - Title to be displayed
# skip_email - boolean - Skip validating form email
# skip_name - boolean - Skip validating form name
# skip_company - boolean - Skip validating company
# form_fields - array(object)
def update(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "title" in params and not isinstance(params["title"], str):
raise InvalidParameterError("Bad parameter: title must be an str")
if "form_fields" in params and not isinstance(params["form_fields"], builtins.list):
raise InvalidParameterError("Bad parameter: form_fields must be an list")
response, _options = Api.send_request("PATCH", "/form_field_sets/{id}".format(id=params['id']), params, self.options)
return response.data
def delete(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
response, _options = Api.send_request("DELETE", "/form_field_sets/{id}".format(id=params['id']), params, self.options)
return response.data
def destroy(self, params = None):
self.delete(params)
def save(self):
if hasattr(self, "id") and self.id:
self.update(self.get_attributes())
else:
new_obj = create(self.get_attributes(), self.options)
self.set_attributes(new_obj.get_attributes())
# Parameters:
# user_id - int64 - User ID. Provide a value of `0` to operate the current session's user.
# cursor - string - Used for pagination. When a list request has more records available, cursors are provided in the response headers `X-Files-Cursor-Next` and `X-Files-Cursor-Prev`. Send one of those cursor value here to resume an existing list from the next available record. Note: many of our SDKs have iterator methods that will automatically handle cursor-based pagination.
# per_page - int64 - Number of records to show per page. (Max: 10,000, 1,000 or less is recommended).
def list(params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
if "user_id" in params and not isinstance(params["user_id"], int):
raise InvalidParameterError("Bad parameter: user_id must be an int")
if "cursor" in params and not isinstance(params["cursor"], str):
raise InvalidParameterError("Bad parameter: cursor must be an str")
if "per_page" in params and not isinstance(params["per_page"], int):
raise InvalidParameterError("Bad parameter: per_page must be an int")
return ListObj(FormFieldSet,"GET", "/form_field_sets", params, options)
def all(params = None, options = None):
list(params, options)
# Parameters:
# id (required) - int64 - Form Field Set ID.
def find(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, options = Api.send_request("GET", "/form_field_sets/{id}".format(id=params['id']), params, options)
return FormFieldSet(response.data, options)
def get(id, params = None, options = None):
find(id, params, options)
# Parameters:
# user_id - int64 - User ID. Provide a value of `0` to operate the current session's user.
# title - string - Title to be displayed
# skip_email - boolean - Skip validating form email
# skip_name - boolean - Skip validating form name
# skip_company - boolean - Skip validating company
# form_fields - array(object)
def create(params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
if "user_id" in params and not isinstance(params["user_id"], int):
raise InvalidParameterError("Bad parameter: user_id must be an int")
if "title" in params and not isinstance(params["title"], str):
raise InvalidParameterError("Bad parameter: title must be an str")
if "form_fields" in params and not isinstance(params["form_fields"], builtins.list):
raise InvalidParameterError("Bad parameter: form_fields must be an list")
response, options = Api.send_request("POST", "/form_field_sets", params, options)
return FormFieldSet(response.data, options)
# Parameters:
# title - string - Title to be displayed
# skip_email - boolean - Skip validating form email
# skip_name - boolean - Skip validating form name
# skip_company - boolean - Skip validating company
# form_fields - array(object)
def update(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "title" in params and not isinstance(params["title"], str):
raise InvalidParameterError("Bad parameter: title must be an str")
if "form_fields" in params and not isinstance(params["form_fields"], builtins.list):
raise InvalidParameterError("Bad parameter: form_fields must be an list")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, options = Api.send_request("PATCH", "/form_field_sets/{id}".format(id=params['id']), params, options)
return FormFieldSet(response.data, options)
def delete(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, _options = Api.send_request("DELETE", "/form_field_sets/{id}".format(id=params['id']), params, options)
return response.data
def destroy(id, params = None, options = None):
delete(id, params, options)
def new(*args, **kwargs):
return FormFieldSet(*args, **kwargs) | PypiClean |
/Divisi-0.6.10.tar.gz/Divisi-0.6.10/csc/divisi/dict_mixin.py | class MyDictMixin(object):
'''Emulates a dictionary interface, more efficiently than DictMixin.
Mixin defining all dictionary methods for classes that already have
a minimum dictionary interface including getitem, setitem, delitem,
and __iter__. Without knowledge of the subclass constructor, the mixin
does not define __init__() or copy(). In addition to the four base
methods, progressively more efficiency comes with defining
__contains__() and iteritems().
Based on UserDict in python 2.4.
'''
__slots__ = []
# second level definitions support higher levels
# def has_key(self, key):
# """
# Does this object have a value for the given key?
# """
# try:
# _ = self[key]
# except (KeyError, IndexError):
# return False
# return True
## Note: the above definition of has_key is incorrect for dicts
## with default values.
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
"""
Iterate over a list of (key, value) tuples.
"""
for k in self:
yield (k, self[k])
def iterkeys(self):
"""
An iterator over the keys of this object.
"""
return self.__iter__()
def keys(self):
"""
List the keys of this object.
"""
return list(self.__iter__())
# fourth level uses definitions from lower levels
def itervalues(self):
"""
An iterator over the values of this object.
"""
for _, v in self.iteritems():
yield v
def values(self):
"""
List the values of this object.
"""
return [v for _, v in self.iteritems()]
def items(self):
"""
Express this object as a list of (key, value) tuples.
"""
return list(self.iteritems())
def clear(self):
"""
Remove all elements from this object.
"""
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
"""`D.setdefault(k,d)` -> `D.get(k,d)`, and also sets `D[k]=d` if
`k not in D`"""
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
"""
Get a value from this object (disregarding its key) and delete it.
"""
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
"""
Get a (key, value) tuple from this object, and delete that key.
"""
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
"""
Add all the items from `other` into `self`, overwriting values when
that key already exists.
"""
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
"""
Get the value associated with `key`, or `default` if the key does not
exist.
"""
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, MyDictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys()) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mdnd/dropMode/OverDropMode.js | define("dojox/mdnd/dropMode/OverDropMode",["dojo/_base/kernel","dojo/_base/declare","dojo/_base/connect","dojo/_base/html","dojo/_base/array","dojox/mdnd/AreaManager"],function(_1){
var _2=_1.declare("dojox.mdnd.dropMode.OverDropMode",null,{_oldXPoint:null,_oldYPoint:null,_oldBehaviour:"up",constructor:function(){
this._dragHandler=[_1.connect(dojox.mdnd.areaManager(),"onDragEnter",function(_3,_4){
var m=dojox.mdnd.areaManager();
if(m._oldIndexArea==-1){
m._oldIndexArea=m._lastValidIndexArea;
}
})];
},addArea:function(_5,_6){
var _7=_5.length,_8=_1.position(_6.node,true);
_6.coords={"x":_8.x,"y":_8.y};
if(_7==0){
_5.push(_6);
}else{
var x=_6.coords.x;
for(var i=0;i<_7;i++){
if(x<_5[i].coords.x){
for(var j=_7-1;j>=i;j--){
_5[j+1]=_5[j];
}
_5[i]=_6;
break;
}
}
if(i==_7){
_5.push(_6);
}
}
return _5;
},updateAreas:function(_9){
var _a=_9.length;
for(var i=0;i<_a;i++){
this._updateArea(_9[i]);
}
},_updateArea:function(_b){
var _c=_1.position(_b.node,true);
_b.coords.x=_c.x;
_b.coords.x2=_c.x+_c.w;
_b.coords.y=_c.y;
},initItems:function(_d){
_1.forEach(_d.items,function(_e){
var _f=_e.item.node;
var _10=_1.position(_f,true);
var y=_10.y+_10.h/2;
_e.y=y;
});
_d.initItems=true;
},refreshItems:function(_11,_12,_13,_14){
if(_12==-1){
return;
}else{
if(_11&&_13&&_13.h){
var _15=_13.h;
if(_11.margin){
_15+=_11.margin.t;
}
var _16=_11.items.length;
for(var i=_12;i<_16;i++){
var _17=_11.items[i];
if(_14){
_17.y+=_15;
}else{
_17.y-=_15;
}
}
}
}
},getDragPoint:function(_18,_19,_1a){
return {"x":_1a.x,"y":_1a.y};
},getTargetArea:function(_1b,_1c,_1d){
var _1e=0;
var x=_1c.x;
var y=_1c.y;
var end=_1b.length;
var _1f=0,_20="right",_21=false;
if(_1d==-1||arguments.length<3){
_21=true;
}else{
if(this._checkInterval(_1b,_1d,x,y)){
_1e=_1d;
}else{
if(this._oldXPoint<x){
_1f=_1d+1;
}else{
_1f=_1d-1;
end=0;
_20="left";
}
_21=true;
}
}
if(_21){
if(_20==="right"){
for(var i=_1f;i<end;i++){
if(this._checkInterval(_1b,i,x,y)){
_1e=i;
break;
}
}
if(i==end){
_1e=-1;
}
}else{
for(var i=_1f;i>=end;i--){
if(this._checkInterval(_1b,i,x,y)){
_1e=i;
break;
}
}
if(i==end-1){
_1e=-1;
}
}
}
this._oldXPoint=x;
return _1e;
},_checkInterval:function(_22,_23,x,y){
var _24=_22[_23];
var _25=_24.node;
var _26=_24.coords;
var _27=_26.x;
var _28=_26.x2;
var _29=_26.y;
var _2a=_29+_25.offsetHeight;
if(_27<=x&&x<=_28&&_29<=y&&y<=_2a){
return true;
}
return false;
},getDropIndex:function(_2b,_2c){
var _2d=_2b.items.length;
var _2e=_2b.coords;
var y=_2c.y;
if(_2d>0){
for(var i=0;i<_2d;i++){
if(y<_2b.items[i].y){
return i;
}else{
if(i==_2d-1){
return -1;
}
}
}
}
return -1;
},destroy:function(){
_1.forEach(this._dragHandler,_1.disconnect);
}});
dojox.mdnd.areaManager()._dropMode=new dojox.mdnd.dropMode.OverDropMode();
return _2;
}); | PypiClean |
/MeleeUploader-1.22.2.tar.gz/MeleeUploader-1.22.2/meleeuploader/youtube.py |
try:
import http.client as httplib
except ImportError:
import httplib
import httplib2
import os
import sys
import errno
from time import sleep
from decimal import Decimal
from . import consts
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from oauth2client.file import Storage
from oauth2client.tools import run_flow
from oauth2client.client import flow_from_clientsecrets
httplib2.RETRIES = 1
RETRIABLE_EXCEPTIONS = (
httplib2.HttpLib2Error,
IOError,
httplib.NotConnected,
httplib.IncompleteRead,
httplib.ImproperConnectionState,
httplib.CannotSendRequest,
httplib.CannotSendHeader,
httplib.ResponseNotReady,
httplib.BadStatusLine,
)
RETRIABLE_STATUS_CODES = [500, 502, 504]
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload https://www.googleapis.com/auth/youtube https://www.googleapis.com/auth/youtube.readonly https://www.googleapis.com/auth/youtube.force-ssl"
YOUTUBE_PARTNER_SCOPE = "https://www.googleapis.com/auth/youtubepartner"
SPREADSHEETS_SCOPE = "https://www.googleapis.com/auth/spreadsheets"
PREFIXES = (
consts.smash_folder,
sys.prefix,
os.path.join(sys.prefix, "local"),
"/usr",
os.path.join("/usr", "local"),
)
SUFFIXES = (
"client_secrets.json",
".client_secrets.json",
f"share/{consts.short_name}/client_secrets.json",
)
def upload(yt, body, file, notify=False):
vid = None
ret = None
retries = 0
while not vid and retries < 10:
insert_request = yt.videos().insert(
part=",".join(body.keys()),
body=body,
notifySubscribers=notify,
media_body=MediaFileUpload(file, chunksize=104857600, resumable=True),
)
ret, vid = upload_service(insert_request)
retries += 1
return ret, vid
def upload_service(insert_request):
response = None
retry_exceptions = RETRIABLE_EXCEPTIONS
retry_status_codes = RETRIABLE_STATUS_CODES
ACCEPTABLE_ERRNO = (errno.EPIPE, errno.EINVAL, errno.ECONNRESET)
try:
ACCEPTABLE_ERRNO += (errno.WSAECONNABORTED,)
except AttributeError:
pass # Not windows
while True:
try:
status, response = insert_request.next_chunk()
if status is not None:
percent = Decimal(
int(status.resumable_progress) / int(status.total_size)
)
print(f"{round(100 * percent, 2)}% uploaded")
except HttpError as e:
if e.resp.status in retry_status_codes:
print(f"A retriable HTTP error {e.resp.status} occurred:\n{e.content}")
elif b"503" in e.content:
print("Backend Error: will attempt to retry upload")
return False, None
elif b"uploadLimitExceeded" in e.content:
print("You have exceeded the YouTube Upload Limit")
print("Waiting 10 minutes before retrying to avoid the limit")
sleep(600)
else:
print(e)
return False, None
except retry_exceptions as e:
print(f"A retriable error occurred: {e}")
except Exception as e:
if e in ACCEPTABLE_ERRNO:
print("Retriable Error occured, retrying now")
else:
print(e)
pass
if response:
video_id = response.get("id", None)
if video_id is None:
print(response)
print(status)
return False, None
print(f"Video link is\nhttps://www.youtube.com/watch?v={video_id}")
return True, video_id
def test_get_service(scope, oauth_file, secret=None):
"""
WIP
Based on the newer google_auth_oauthlib module
"""
CLIENT_SECRETS_FILE = get_secrets(PREFIXES, SUFFIXES) if not secret else secret
if not CLIENT_SECRETS_FILE:
return None
flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, scopes=scope)
storage = Storage(oauth_file)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = flow.run_local_server(
host="localhost",
port=8080,
authorization_prompt_message="Please visit this URL: {url}",
success_message="The auth flow is complete; you may close this window.",
open_browser=True,
)
storage.put(credentials)
return credentials
def get_youtube_service():
CLIENT_SECRETS_FILE = get_secrets(PREFIXES, SUFFIXES)
if not CLIENT_SECRETS_FILE:
return None
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_UPLOAD_SCOPE)
flow.user_agent = consts.long_name
storage = Storage(consts.youtube_oauth_file)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage)
if not credentials:
return None
http = httplib2.Http()
try:
# https://github.com/googleapis/google-api-python-client/issues/803
http.redirect_codes = set(http.redirect_codes) - {308}
except:
pass
return build("youtube", "v3", http=credentials.authorize(http))
def add_to_playlist(pID, vID):
consts.youtube.playlistItems().insert(
part="snippet",
body=dict(
snippet=dict(
playlistId=pID, resourceId=dict(kind="youtube#video", videoId=vID)
)
),
).execute()
print("Added to playlist")
def get_secrets(prefixes, relative_paths):
"""
Taken from https://github.com/tokland/youtube-upload/blob/master/youtube_upload/main.py
Get the first existing filename of relative_path seeking on prefixes directories.
"""
paths_attempted = []
try:
return os.path.join(sys._MEIPASS, relative_paths[-1])
except Exception:
for prefix in prefixes:
for relative_path in relative_paths:
path = os.path.join(prefix, relative_path)
if os.path.exists(path):
print(f"found client_secrets.json at {path}")
return path
paths_attempted.append(path)
else:
print(
f"Unable to find client_secrets.json. Checked in the following locations: {paths_attempted}"
)
return None | PypiClean |
/CPAT-3.0.4.tar.gz/CPAT-3.0.4/.eggs/nose-1.3.7-py3.7.egg/nose/plugins/attrib.py | import inspect
import logging
import os
import sys
from inspect import isfunction
from nose.plugins.base import Plugin
from nose.util import tolist
import collections
log = logging.getLogger('nose.plugins.attrib')
compat_24 = sys.version_info >= (2, 4)
def attr(*args, **kwargs):
"""Decorator that adds attributes to classes or functions
for use with the Attribute (-a) plugin.
"""
def wrap_ob(ob):
for name in args:
setattr(ob, name, True)
for name, value in kwargs.items():
setattr(ob, name, value)
return ob
return wrap_ob
def get_method_attr(method, cls, attr_name, default = False):
"""Look up an attribute on a method/ function.
If the attribute isn't found there, looking it up in the
method's class, if any.
"""
Missing = object()
value = getattr(method, attr_name, Missing)
if value is Missing and cls is not None:
value = getattr(cls, attr_name, Missing)
if value is Missing:
return default
return value
class ContextHelper:
"""Object that can act as context dictionary for eval and looks up
names as attributes on a method/ function and its class.
"""
def __init__(self, method, cls):
self.method = method
self.cls = cls
def __getitem__(self, name):
return get_method_attr(self.method, self.cls, name)
class AttributeSelector(Plugin):
"""Selects test cases to be run based on their attributes.
"""
def __init__(self):
Plugin.__init__(self)
self.attribs = []
def options(self, parser, env):
"""Register command line options"""
parser.add_option("-a", "--attr",
dest="attr", action="append",
default=env.get('NOSE_ATTR'),
metavar="ATTR",
help="Run only tests that have attributes "
"specified by ATTR [NOSE_ATTR]")
# disable in < 2.4: eval can't take needed args
if compat_24:
parser.add_option("-A", "--eval-attr",
dest="eval_attr", metavar="EXPR", action="append",
default=env.get('NOSE_EVAL_ATTR'),
help="Run only tests for whose attributes "
"the Python expression EXPR evaluates "
"to True [NOSE_EVAL_ATTR]")
def configure(self, options, config):
"""Configure the plugin and system, based on selected options.
attr and eval_attr may each be lists.
self.attribs will be a list of lists of tuples. In that list, each
list is a group of attributes, all of which must match for the rule to
match.
"""
self.attribs = []
# handle python eval-expression parameter
if compat_24 and options.eval_attr:
eval_attr = tolist(options.eval_attr)
for attr in eval_attr:
# "<python expression>"
# -> eval(expr) in attribute context must be True
def eval_in_context(expr, obj, cls):
return eval(expr, None, ContextHelper(obj, cls))
self.attribs.append([(attr, eval_in_context)])
# attribute requirements are a comma separated list of
# 'key=value' pairs
if options.attr:
std_attr = tolist(options.attr)
for attr in std_attr:
# all attributes within an attribute group must match
attr_group = []
for attrib in attr.strip().split(","):
# don't die on trailing comma
if not attrib:
continue
items = attrib.split("=", 1)
if len(items) > 1:
# "name=value"
# -> 'str(obj.name) == value' must be True
key, value = items
else:
key = items[0]
if key[0] == "!":
# "!name"
# 'bool(obj.name)' must be False
key = key[1:]
value = False
else:
# "name"
# -> 'bool(obj.name)' must be True
value = True
attr_group.append((key, value))
self.attribs.append(attr_group)
if self.attribs:
self.enabled = True
def validateAttrib(self, method, cls = None):
"""Verify whether a method has the required attributes
The method is considered a match if it matches all attributes
for any attribute group.
."""
# TODO: is there a need for case-sensitive value comparison?
any = False
for group in self.attribs:
match = True
for key, value in group:
attr = get_method_attr(method, cls, key)
if isinstance(value, collections.Callable):
if not value(key, method, cls):
match = False
break
elif value is True:
# value must exist and be True
if not bool(attr):
match = False
break
elif value is False:
# value must not exist or be False
if bool(attr):
match = False
break
elif type(attr) in (list, tuple):
# value must be found in the list attribute
if not str(value).lower() in [str(x).lower()
for x in attr]:
match = False
break
else:
# value must match, convert to string and compare
if (value != attr
and str(value).lower() != str(attr).lower()):
match = False
break
any = any or match
if any:
# not True because we don't want to FORCE the selection of the
# item, only say that it is acceptable
return None
return False
def wantFunction(self, function):
"""Accept the function if its attributes match.
"""
return self.validateAttrib(function)
def wantMethod(self, method):
"""Accept the method if its attributes match.
"""
try:
cls = method.__self__.__class__
except AttributeError:
return False
return self.validateAttrib(method, cls) | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/yaml/yaml/constructor.py | __all__ = [
'BaseConstructor',
'SafeConstructor',
'FullConstructor',
'UnsafeConstructor',
'Constructor',
'ConstructorError'
]
from .error import *
from .nodes import *
import collections.abc, datetime, base64, binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor:
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def check_state_key(self, key):
"""Block special attributes/methods from being set in a newly created
object, to prevent user-controlled methods from being called during
deserialization"""
if self.get_state_keys_blacklist_regexp().match(key):
raise ConstructorError(None, None,
"blacklisted key '%s' in instance state found" % (key,), None)
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if tag_prefix is not None and node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = next(generator)
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.abc.Hashable):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
@classmethod
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
@classmethod
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == 'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return super().construct_scalar(node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == 'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == 'tag:yaml.org,2002:value':
key_node.tag = 'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return super().construct_mapping(node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
'yes': True,
'no': False,
'true': True,
'false': False,
'on': True,
'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
r'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
tzinfo = None
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
tzinfo = datetime.timezone(delta)
elif values['tz']:
tzinfo = datetime.timezone.utc
return datetime.datetime(year, month, day, hour, minute, second, fraction,
tzinfo=tzinfo)
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
return self.construct_scalar(node)
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag,
node.start_mark)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class FullConstructor(SafeConstructor):
# 'extend' is blacklisted because it is used by
# construct_python_object_apply to add `listitems` to a newly generate
# python instance
def get_state_keys_blacklist(self):
return ['^extend$', '^__.*__$']
def get_state_keys_blacklist_regexp(self):
if not hasattr(self, 'state_keys_blacklist_regexp'):
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
return self.state_keys_blacklist_regexp
def construct_python_str(self, node):
return self.construct_scalar(node)
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_bytes(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
def construct_python_long(self, node):
return self.construct_yaml_int(node)
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
if unsafe:
try:
__import__(name)
except ImportError as exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name, exc), mark)
if name not in sys.modules:
raise ConstructorError("while constructing a Python module", mark,
"module %r is not imported" % name, mark)
return sys.modules[name]
def find_python_name(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if '.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = 'builtins'
object_name = name
if unsafe:
try:
__import__(module_name)
except ImportError as exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name, exc), mark)
if module_name not in sys.modules:
raise ConstructorError("while constructing a Python object", mark,
"module %r is not imported" % module_name, mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r"
% (object_name, module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_module(suffix, node.start_mark)
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False, unsafe=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if not (unsafe or isinstance(cls, type)):
raise ConstructorError("while constructing a Python instance", node.start_mark,
"expected a class, but found %r" % type(cls),
node.start_mark)
if newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state, unsafe=False):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
if not unsafe and state:
for key in state.keys():
self.check_state_key(key)
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
if not unsafe:
self.check_state_key(key)
setattr(instance, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/none',
FullConstructor.construct_yaml_null)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bool',
FullConstructor.construct_yaml_bool)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/str',
FullConstructor.construct_python_str)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/unicode',
FullConstructor.construct_python_unicode)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bytes',
FullConstructor.construct_python_bytes)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/int',
FullConstructor.construct_yaml_int)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/long',
FullConstructor.construct_python_long)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/float',
FullConstructor.construct_yaml_float)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/complex',
FullConstructor.construct_python_complex)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/list',
FullConstructor.construct_yaml_seq)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/tuple',
FullConstructor.construct_python_tuple)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/dict',
FullConstructor.construct_yaml_map)
FullConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/name:',
FullConstructor.construct_python_name)
class UnsafeConstructor(FullConstructor):
def find_python_module(self, name, mark):
return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
def find_python_name(self, name, mark):
return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
return super(UnsafeConstructor, self).make_python_instance(
suffix, node, args, kwds, newobj, unsafe=True)
def set_python_instance_state(self, instance, state):
return super(UnsafeConstructor, self).set_python_instance_state(
instance, state, unsafe=True)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/module:',
UnsafeConstructor.construct_python_module)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object:',
UnsafeConstructor.construct_python_object)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/new:',
UnsafeConstructor.construct_python_object_new)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/apply:',
UnsafeConstructor.construct_python_object_apply)
# Constructor is same as UnsafeConstructor. Need to leave this in place in case
# people have extended it directly.
class Constructor(UnsafeConstructor):
pass | PypiClean |
/Fabric39-1.15.3.post1.tar.gz/Fabric39-1.15.3.post1/sites/docs/usage/execution.rst | ===============
Execution model
===============
If you've read the :doc:`../tutorial`, you should already be familiar with how
Fabric operates in the base case (a single task on a single host.) However, in
many situations you'll find yourself wanting to execute multiple tasks and/or
on multiple hosts. Perhaps you want to split a big task into smaller reusable
parts, or crawl a collection of servers looking for an old user to remove. Such
a scenario requires specific rules for when and how tasks are executed.
This document explores Fabric's execution model, including the main execution
loop, how to define host lists, how connections are made, and so forth.
.. _execution-strategy:
Execution strategy
==================
Fabric defaults to a single, serial execution method, though there is an
alternative parallel mode available as of Fabric 1.3 (see
:doc:`/usage/parallel`). This default behavior is as follows:
* A list of tasks is created. Currently this list is simply the arguments given
to :doc:`fab <fab>`, preserving the order given.
* For each task, a task-specific host list is generated from various
sources (see :ref:`host-lists` below for details.)
* The task list is walked through in order, and each task is run once per host
in its host list.
* Tasks with no hosts in their host list are considered local-only, and will
always run once and only once.
Thus, given the following fabfile::
from fabric.api import run, env
env.hosts = ['host1', 'host2']
def taskA():
run('ls')
def taskB():
run('whoami')
and the following invocation::
$ fab taskA taskB
you will see that Fabric performs the following:
* ``taskA`` executed on ``host1``
* ``taskA`` executed on ``host2``
* ``taskB`` executed on ``host1``
* ``taskB`` executed on ``host2``
While this approach is simplistic, it allows for a straightforward composition
of task functions, and (unlike tools which push the multi-host functionality
down to the individual function calls) enables shell script-like logic where
you may introspect the output or return code of a given command and decide what
to do next.
Defining tasks
==============
For details on what constitutes a Fabric task and how to organize them, please see :doc:`/usage/tasks`.
Defining host lists
===================
Unless you're using Fabric as a simple build system (which is possible, but not
the primary use-case) having tasks won't do you any good without the ability to
specify remote hosts on which to execute them. There are a number of ways to do
so, with scopes varying from global to per-task, and it's possible mix and
match as needed.
.. _host-strings:
Hosts
-----
Hosts, in this context, refer to what are also called "host strings": Python
strings specifying a username, hostname and port combination, in the form of
``username@hostname:port``. User and/or port (and the associated ``@`` or
``:``) may be omitted, and will be filled by the executing user's local
username, and/or port 22, respectively. Thus, ``admin@foo.com:222``,
``deploy@website`` and ``nameserver1`` could all be valid host strings.
IPv6 address notation is also supported, for example ``::1``, ``[::1]:1222``,
``user@2001:db8::1`` or ``user@[2001:db8::1]:1222``. Square brackets
are necessary only to separate the address from the port number. If no
port number is used, the brackets are optional. Also if host string is
specified via command-line argument, it may be necessary to escape
brackets in some shells.
.. note::
The user/hostname split occurs at the last ``@`` found, so e.g. email
address usernames are valid and will be parsed correctly.
During execution, Fabric normalizes the host strings given and then stores each
part (username/hostname/port) in the environment dictionary, for both its use
and for tasks to reference if the need arises. See :doc:`env` for details.
.. _execution-roles:
Roles
-----
Host strings map to single hosts, but sometimes it's useful to arrange hosts in
groups. Perhaps you have a number of Web servers behind a load balancer and
want to update all of them, or want to run a task on "all client servers".
Roles provide a way of defining strings which correspond to lists of host
strings, and can then be specified instead of writing out the entire list every
time.
This mapping is defined as a dictionary, ``env.roledefs``, which must be
modified by a fabfile in order to be used. A simple example::
from fabric.api import env
env.roledefs['webservers'] = ['www1', 'www2', 'www3']
Since ``env.roledefs`` is naturally empty by default, you may also opt to
re-assign to it without fear of losing any information (provided you aren't
loading other fabfiles which also modify it, of course)::
from fabric.api import env
env.roledefs = {
'web': ['www1', 'www2', 'www3'],
'dns': ['ns1', 'ns2']
}
Role definitions are not necessarily configuration of hosts only, they can
also hold additional role specific settings of your choice. This is achieved
by defining the roles as dicts and host strings under a ``hosts`` key::
from fabric.api import env
env.roledefs = {
'web': {
'hosts': ['www1', 'www2', 'www3'],
'foo': 'bar'
},
'dns': {
'hosts': ['ns1', 'ns2'],
'foo': 'baz'
}
}
In addition to list/iterable object types, the values in ``env.roledefs``
(or value of ``hosts`` key in dict style definition) may be callables, and will
thus be called when looked up when tasks are run instead of at module load
time. (For example, you could connect to remote servers to obtain role
definitions, and not worry about causing delays at fabfile load time when
calling e.g. ``fab --list``.)
Use of roles is not required in any way -- it's simply a convenience in
situations where you have common groupings of servers.
.. versionchanged:: 0.9.2
Added ability to use callables as ``roledefs`` values.
.. _host-lists:
How host lists are constructed
------------------------------
There are a number of ways to specify host lists, either globally or per-task,
and generally these methods override one another instead of merging together
(though this may change in future releases.) Each such method is typically
split into two parts, one for hosts and one for roles.
Globally, via ``env``
~~~~~~~~~~~~~~~~~~~~~
The most common method of setting hosts or roles is by modifying two key-value
pairs in the environment dictionary, :doc:`env <env>`: ``hosts`` and ``roles``.
The value of these variables is checked at runtime, while constructing each
tasks's host list.
Thus, they may be set at module level, which will take effect when the fabfile
is imported::
from fabric.api import env, run
env.hosts = ['host1', 'host2']
def mytask():
run('ls /var/www')
Such a fabfile, run simply as ``fab mytask``, will run ``mytask`` on ``host1``
followed by ``host2``.
Since the env vars are checked for *each* task, this means that if you have the
need, you can actually modify ``env`` in one task and it will affect all
following tasks::
from fabric.api import env, run
def set_hosts():
env.hosts = ['host1', 'host2']
def mytask():
run('ls /var/www')
When run as ``fab set_hosts mytask``, ``set_hosts`` is a "local" task -- its
own host list is empty -- but ``mytask`` will again run on the two hosts given.
.. note::
This technique used to be a common way of creating fake "roles", but is
less necessary now that roles are fully implemented. It may still be useful
in some situations, however.
Alongside ``env.hosts`` is ``env.roles`` (not to be confused with
``env.roledefs``!) which, if given, will be taken as a list of role names to
look up in ``env.roledefs``.
Globally, via the command line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In addition to modifying ``env.hosts``, ``env.roles``, and
``env.exclude_hosts`` at the module level, you may define them by passing
comma-separated string arguments to the command-line switches
:option:`--hosts/-H <-H>` and :option:`--roles/-R <-R>`, e.g.::
$ fab -H host1,host2 mytask
Such an invocation is directly equivalent to ``env.hosts = ['host1', 'host2']``
-- the argument parser knows to look for these arguments and will modify
``env`` at parse time.
.. note::
It's possible, and in fact common, to use these switches to set only a
single host or role. Fabric simply calls ``string.split(',')`` on the given
string, so a string with no commas turns into a single-item list.
It is important to know that these command-line switches are interpreted
**before** your fabfile is loaded: any reassignment to ``env.hosts`` or
``env.roles`` in your fabfile will overwrite them.
If you wish to nondestructively merge the command-line hosts with your
fabfile-defined ones, make sure your fabfile uses ``env.hosts.extend()``
instead::
from fabric.api import env, run
env.hosts.extend(['host3', 'host4'])
def mytask():
run('ls /var/www')
When this fabfile is run as ``fab -H host1,host2 mytask``, ``env.hosts`` will
then contain ``['host1', 'host2', 'host3', 'host4']`` at the time that
``mytask`` is executed.
.. note::
``env.hosts`` is simply a Python list object -- so you may use
``env.hosts.append()`` or any other such method you wish.
.. _hosts-per-task-cli:
Per-task, via the command line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Globally setting host lists only works if you want all your tasks to run on the
same host list all the time. This isn't always true, so Fabric provides a few
ways to be more granular and specify host lists which apply to a single task
only. The first of these uses task arguments.
As outlined in :doc:`fab`, it's possible to specify per-task arguments via a
special command-line syntax. In addition to naming actual arguments to your
task function, this may be used to set the ``host``, ``hosts``, ``role`` or
``roles`` "arguments", which are interpreted by Fabric when building host lists
(and removed from the arguments passed to the task itself.)
.. note::
Since commas are already used to separate task arguments from one another,
semicolons must be used in the ``hosts`` or ``roles`` arguments to
delineate individual host strings or role names. Furthermore, the argument
must be quoted to prevent your shell from interpreting the semicolons.
Take the below fabfile, which is the same one we've been using, but which
doesn't define any host info at all::
from fabric.api import run
def mytask():
run('ls /var/www')
To specify per-task hosts for ``mytask``, execute it like so::
$ fab mytask:hosts="host1;host2"
This will override any other host list and ensure ``mytask`` always runs on
just those two hosts.
Per-task, via decorators
~~~~~~~~~~~~~~~~~~~~~~~~
If a given task should always run on a predetermined host list, you may wish to
specify this in your fabfile itself. This can be done by decorating a task
function with the `~fabric.decorators.hosts` or `~fabric.decorators.roles`
decorators. These decorators take a variable argument list, like so::
from fabric.api import hosts, run
@hosts('host1', 'host2')
def mytask():
run('ls /var/www')
They will also take an single iterable argument, e.g.::
my_hosts = ('host1', 'host2')
@hosts(my_hosts)
def mytask():
# ...
When used, these decorators override any checks of ``env`` for that particular
task's host list (though ``env`` is not modified in any way -- it is simply
ignored.) Thus, even if the above fabfile had defined ``env.hosts`` or the call
to :doc:`fab <fab>` uses :option:`--hosts/-H <-H>`, ``mytask`` would still run
on a host list of ``['host1', 'host2']``.
However, decorator host lists do **not** override per-task command-line
arguments, as given in the previous section.
Order of precedence
~~~~~~~~~~~~~~~~~~~
We've been pointing out which methods of setting host lists trump the others,
as we've gone along. However, to make things clearer, here's a quick breakdown:
* Per-task, command-line host lists (``fab mytask:host=host1``) override
absolutely everything else.
* Per-task, decorator-specified host lists (``@hosts('host1')``) override the
``env`` variables.
* Globally specified host lists set in the fabfile (``env.hosts = ['host1']``)
*can* override such lists set on the command-line, but only if you're not
careful (or want them to.)
* Globally specified host lists set on the command-line (``--hosts=host1``)
will initialize the ``env`` variables, but that's it.
This logic may change slightly in the future to be more consistent (e.g.
having :option:`--hosts <-H>` somehow take precedence over ``env.hosts`` in the
same way that command-line per-task lists trump in-code ones) but only in a
backwards-incompatible release.
.. _combining-host-lists:
Combining host lists
--------------------
There is no "unionizing" of hosts between the various sources mentioned in
:ref:`host-lists`. If ``env.hosts`` is set to ``['host1', 'host2', 'host3']``,
and a per-function (e.g. via `~fabric.decorators.hosts`) host list is set to
just ``['host2', 'host3']``, that function will **not** execute on ``host1``,
because the per-task decorator host list takes precedence.
However, for each given source, if both roles **and** hosts are specified, they
will be merged together into a single host list. Take, for example, this
fabfile where both of the decorators are used::
from fabric.api import env, hosts, roles, run
env.roledefs = {'role1': ['b', 'c']}
@hosts('a', 'b')
@roles('role1')
def mytask():
run('ls /var/www')
Assuming no command-line hosts or roles are given when ``mytask`` is executed,
this fabfile will call ``mytask`` on a host list of ``['a', 'b', 'c']`` -- the
union of ``role1`` and the contents of the `~fabric.decorators.hosts` call.
.. _deduplication:
Host list deduplication
-----------------------
By default, to support :ref:`combining-host-lists`, Fabric deduplicates the
final host list so any given host string is only present once. However, this
prevents explicit/intentional running of a task multiple times on the same
target host, which is sometimes useful.
To turn off deduplication, set :ref:`env.dedupe_hosts <dedupe_hosts>` to
``False``.
.. _excluding-hosts:
Excluding specific hosts
------------------------
At times, it is useful to exclude one or more specific hosts, e.g. to override
a few bad or otherwise undesirable hosts which are pulled in from a role or an
autogenerated host list.
.. note::
As of Fabric 1.4, you may wish to use :ref:`skip-bad-hosts` instead, which
automatically skips over any unreachable hosts.
Host exclusion may be accomplished globally with :option:`--exclude-hosts/-x
<-x>`::
$ fab -R myrole -x host2,host5 mytask
If ``myrole`` was defined as ``['host1', 'host2', ..., 'host15']``, the above
invocation would run with an effective host list of ``['host1', 'host3',
'host4', 'host6', ..., 'host15']``.
.. note::
Using this option does not modify ``env.hosts`` -- it only causes the
main execution loop to skip the requested hosts.
Exclusions may be specified per-task by using an extra ``exclude_hosts`` kwarg,
which is implemented similarly to the abovementioned ``hosts`` and ``roles``
per-task kwargs, in that it is stripped from the actual task invocation. This
example would have the same result as the global exclude above::
$ fab mytask:roles=myrole,exclude_hosts="host2;host5"
Note that the host list is semicolon-separated, just as with the ``hosts``
per-task argument.
Combining exclusions
~~~~~~~~~~~~~~~~~~~~
Host exclusion lists, like host lists themselves, are not merged together
across the different "levels" they can be declared in. For example, a global
``-x`` option will not affect a per-task host list set with a decorator or
keyword argument, nor will per-task ``exclude_hosts`` keyword arguments affect
a global ``-H`` list.
There is one minor exception to this rule, namely that CLI-level keyword
arguments (``mytask:exclude_hosts=x,y``) **will** be taken into account when
examining host lists set via ``@hosts`` or ``@roles``. Thus a task function
decorated with ``@hosts('host1', 'host2')`` executed as ``fab
taskname:exclude_hosts=host2`` will only run on ``host1``.
As with the host list merging, this functionality is currently limited (partly
to keep the implementation simple) and may be expanded in future releases.
.. _execute:
Intelligently executing tasks with ``execute``
==============================================
.. versionadded:: 1.3
Most of the information here involves "top level" tasks executed via :doc:`fab
<fab>`, such as the first example where we called ``fab taskA taskB``.
However, it's often convenient to wrap up multi-task invocations like this into
their own, "meta" tasks.
Prior to Fabric 1.3, this had to be done by hand, as outlined in
:doc:`/usage/library`. Fabric's design eschews magical behavior, so simply
*calling* a task function does **not** take into account decorators such as
`~fabric.decorators.roles`.
New in Fabric 1.3 is the `~fabric.tasks.execute` helper function, which takes a
task object or name as its first argument. Using it is effectively the same as
calling the given task from the command line: all the rules given above in
:ref:`host-lists` apply. (The ``hosts`` and ``roles`` keyword arguments to
`~fabric.tasks.execute` are analogous to :ref:`CLI per-task arguments
<hosts-per-task-cli>`, including how they override all other host/role-setting
methods.)
As an example, here's a fabfile defining two stand-alone tasks for deploying a
Web application::
from fabric.api import run, roles
env.roledefs = {
'db': ['db1', 'db2'],
'web': ['web1', 'web2', 'web3'],
}
@roles('db')
def migrate():
# Database stuff here.
pass
@roles('web')
def update():
# Code updates here.
pass
In Fabric <=1.2, the only way to ensure that ``migrate`` runs on the DB servers
and that ``update`` runs on the Web servers (short of manual
``env.host_string`` manipulation) was to call both as top level tasks::
$ fab migrate update
Fabric >=1.3 can use `~fabric.tasks.execute` to set up a meta-task. Update the
``import`` line like so::
from fabric.api import run, roles, execute
and append this to the bottom of the file::
def deploy():
execute(migrate)
execute(update)
That's all there is to it; the `~fabric.decorators.roles` decorators will be honored as expected, resulting in the following execution sequence:
* `migrate` on `db1`
* `migrate` on `db2`
* `update` on `web1`
* `update` on `web2`
* `update` on `web3`
.. warning::
This technique works because tasks that themselves have no host list (this
includes the global host list settings) only run one time. If used inside a
"regular" task that is going to run on multiple hosts, calls to
`~fabric.tasks.execute` will also run multiple times, resulting in
multiplicative numbers of subtask calls -- be careful!
If you would like your `execute` calls to only be called once, you
may use the `~fabric.decorators.runs_once` decorator.
.. seealso:: `~fabric.tasks.execute`, `~fabric.decorators.runs_once`
.. _leveraging-execute-return-value:
Leveraging ``execute`` to access multi-host results
---------------------------------------------------
In nontrivial Fabric runs, especially parallel ones, you may want to gather up
a bunch of per-host result values at the end - e.g. to present a summary table,
perform calculations, etc.
It's not possible to do this in Fabric's default "naive" mode (one where you
rely on Fabric looping over host lists on your behalf), but with `.execute`
it's pretty easy. Simply switch from calling the actual work-bearing task, to
calling a "meta" task which takes control of execution with `.execute`::
from fabric.api import task, execute, run, runs_once
@task
def workhorse():
return run("get my infos")
@task
@runs_once
def go():
results = execute(workhorse)
print results
In the above, ``workhorse`` can do any Fabric stuff at all -- it's literally
your old "naive" task -- except that it needs to return something useful.
``go`` is your new entry point (to be invoked as ``fab go``, or whatnot) and
its job is to take the ``results`` dictionary from the `.execute` call and do
whatever you need with it. Check the API docs for details on the structure of
that return value.
.. _dynamic-hosts:
Using ``execute`` with dynamically-set host lists
-------------------------------------------------
A common intermediate-to-advanced use case for Fabric is to parameterize lookup
of one's target host list at runtime (when use of :ref:`execution-roles` does not
suffice). ``execute`` can make this extremely simple, like so::
from fabric.api import run, execute, task
# For example, code talking to an HTTP API, or a database, or ...
from mylib import external_datastore
# This is the actual algorithm involved. It does not care about host
# lists at all.
def do_work():
run("something interesting on a host")
# This is the user-facing task invoked on the command line.
@task
def deploy(lookup_param):
# This is the magic you don't get with @hosts or @roles.
# Even lazy-loading roles require you to declare available roles
# beforehand. Here, the sky is the limit.
host_list = external_datastore.query(lookup_param)
# Put this dynamically generated host list together with the work to be
# done.
execute(do_work, hosts=host_list)
For example, if ``external_datastore`` was a simplistic "look up hosts by tag
in a database" service, and you wanted to run a task on all hosts tagged as
being related to your application stack, you might call the above like this::
$ fab deploy:app
But wait! A data migration has gone awry on the DB servers. Let's fix up our
migration code in our source repo, and deploy just the DB boxes again::
$ fab deploy:db
This use case looks similar to Fabric's roles, but has much more potential, and
is by no means limited to a single argument. Define the task however you wish,
query your external data store in whatever way you need -- it's just Python.
The alternate approach
~~~~~~~~~~~~~~~~~~~~~~
Similar to the above, but using ``fab``'s ability to call multiple tasks in
succession instead of an explicit ``execute`` call, is to mutate
:ref:`env.hosts <hosts>` in a host-list lookup task and then call ``do_work``
in the same session::
from fabric.api import run, task
from mylib import external_datastore
# Marked as a publicly visible task, but otherwise unchanged: still just
# "do the work, let somebody else worry about what hosts to run on".
@task
def do_work():
run("something interesting on a host")
@task
def set_hosts(lookup_param):
# Update env.hosts instead of calling execute()
env.hosts = external_datastore.query(lookup_param)
Then invoke like so::
$ fab set_hosts:app do_work
One benefit of this approach over the previous one is that you can replace
``do_work`` with any other "workhorse" task::
$ fab set_hosts:db snapshot
$ fab set_hosts:cassandra,cluster2 repair_ring
$ fab set_hosts:redis,environ=prod status
.. _failures:
Failure handling
================
Once the task list has been constructed, Fabric will start executing them as
outlined in :ref:`execution-strategy`, until all tasks have been run on the
entirety of their host lists. However, Fabric defaults to a "fail-fast"
behavior pattern: if anything goes wrong, such as a remote program returning a
nonzero return value or your fabfile's Python code encountering an exception,
execution will halt immediately.
This is typically the desired behavior, but there are many exceptions to the
rule, so Fabric provides ``env.warn_only``, a Boolean setting. It defaults to
``False``, meaning an error condition will result in the program aborting
immediately. However, if ``env.warn_only`` is set to ``True`` at the time of
failure -- with, say, the `~fabric.context_managers.settings` context
manager -- Fabric will emit a warning message but continue executing.
To signal a failure error from a Fabric task, use the `~fabric.utils.abort`.
`~fabric.utils.abort` signals an error as if it had been detected by Fabric and
follows the regular execution model for control flow.
.. _connections:
Connections
===========
``fab`` itself doesn't actually make any connections to remote hosts. Instead,
it simply ensures that for each distinct run of a task on one of its hosts, the
env var ``env.host_string`` is set to the right value. Users wanting to
leverage Fabric as a library may do so manually to achieve similar effects
(though as of Fabric 1.3, using `~fabric.tasks.execute` is preferred and more
powerful.)
``env.host_string`` is (as the name implies) the "current" host string, and is
what Fabric uses to determine what connections to make (or re-use) when
network-aware functions are run. Operations like `~fabric.operations.run` or
`~fabric.operations.put` use ``env.host_string`` as a lookup key in a shared
dictionary which maps host strings to SSH connection objects.
.. note::
The connections dictionary (currently located at
``fabric.state.connections``) acts as a cache, opting to return previously
created connections if possible in order to save some overhead, and
creating new ones otherwise.
Lazy connections
----------------
Because connections are driven by the individual operations, Fabric will not
actually make connections until they're necessary. Take for example this task
which does some local housekeeping prior to interacting with the remote
server::
from fabric.api import *
@hosts('host1')
def clean_and_upload():
local('find assets/ -name "*.DS_Store" -exec rm '{}' \;')
local('tar czf /tmp/assets.tgz assets/')
put('/tmp/assets.tgz', '/tmp/assets.tgz')
with cd('/var/www/myapp/'):
run('tar xzf /tmp/assets.tgz')
What happens, connection-wise, is as follows:
#. The two `~fabric.operations.local` calls will run without making any network
connections whatsoever;
#. `~fabric.operations.put` asks the connection cache for a connection to
``host1``;
#. The connection cache fails to find an existing connection for that host
string, and so creates a new SSH connection, returning it to
`~fabric.operations.put`;
#. `~fabric.operations.put` uploads the file through that connection;
#. Finally, the `~fabric.operations.run` call asks the cache for a connection
to that same host string, and is given the existing, cached connection for
its own use.
Extrapolating from this, you can also see that tasks which don't use any
network-borne operations will never actually initiate any connections (though
they will still be run once for each host in their host list, if any.)
Closing connections
-------------------
Fabric's connection cache never closes connections itself -- it leaves this up
to whatever is using it. The :doc:`fab <fab>` tool does this bookkeeping for
you: it iterates over all open connections and closes them just before it exits
(regardless of whether the tasks failed or not.)
Library users will need to ensure they explicitly close all open connections
before their program exits. This can be accomplished by calling
`~fabric.network.disconnect_all` at the end of your script.
.. note::
`~fabric.network.disconnect_all` may be moved to a more public location in
the future; we're still working on making the library aspects of Fabric
more solidified and organized.
Multiple connection attempts and skipping bad hosts
---------------------------------------------------
As of Fabric 1.4, multiple attempts may be made to connect to remote servers
before aborting with an error: Fabric will try connecting
:ref:`env.connection_attempts <connection-attempts>` times before giving up,
with a timeout of :ref:`env.timeout <timeout>` seconds each time. (These
currently default to 1 try and 10 seconds, to match previous behavior, but they
may be safely changed to whatever you need.)
Furthermore, even total failure to connect to a server is no longer an absolute
hard stop: set :ref:`env.skip_bad_hosts <skip-bad-hosts>` to ``True`` and in
most situations (typically initial connections) Fabric will simply warn and
continue, instead of aborting.
.. versionadded:: 1.4
.. _password-management:
Password management
===================
Fabric maintains an in-memory password cache of your login and sudo passwords
in certain situations; this helps avoid tedious re-entry when multiple systems
share the same password [#]_, or if a remote system's ``sudo`` configuration
doesn't do its own caching.
Pre-filling the password caches
-------------------------------
The first layer is a simple default or fallback password value,
:ref:`env.password <password>` (which may also be set at the command line via
:option:`--password <-p>` or :option:`--initial-password-prompt <-I>`). This
env var stores a single password which (if non-empty) will be tried in the
event that the host-specific cache (see below) has no entry for the current
:ref:`host string <host_string>`.
:ref:`env.passwords <passwords>` (plural!) serves as a per-user/per-host cache,
storing the most recently entered password for every unique user/host/port
combination (**note** that you must include **all three values** if modifying
the structure by hand - see the above link for details). Due to this cache,
connections to multiple different users and/or hosts in the same session will
only require a single password entry for each. (Previous versions of Fabric
used only the single, default password cache and thus required password
re-entry every time the previously entered password became invalid.)
Auto-filling/updating from user input
-------------------------------------
Depending on your configuration and the number of hosts your session will
connect to, you may find setting either or both of the above env vars to be
useful. However, Fabric will automatically fill them in as necessary without
any additional configuration.
Specifically, each time a password prompt is presented to the user, the value
entered is used to update both the single default password cache, and the cache
value for the current value of ``env.host_string``.
.. _sudo-passwords:
Specifying ``sudo``-only passwords
----------------------------------
In some situations (such as those involving two-factor authentication, or any
other situation where submitting a password at login time is not desired or
correct) you may want to only cache passwords intended for ``sudo``, instead of
reusing the values for both login and ``sudo`` purposes.
To do this, you may set :ref:`env.sudo_password <sudo_password>` or populate
:ref:`env.sudo_passwords <sudo_passwords>`, which mirror ``env.password`` and
``env.passwords`` (described above). These values will **only** be used in
responding to ``sudo`` password prompts, and will never be submitted at
connection time.
There is also an analogue to the ``--password`` command line flag, named
:option:`--sudo-password`, and like :option:`--initial-password-prompt <-I>`,
there exists :option:`--initial-sudo-password-prompt`.
.. note::
When both types of passwords are filled in (e.g. if ``env.password =
"foo"`` and ``env.sudo_password = "bar"``), the ``sudo`` specific passwords
will be used.
.. note::
Due to backwards compatibility concerns, user-entered ``sudo`` passwords
will still be cached into ``env.password``/``env.passwords``;
``env.sudo_password``/``env.sudo_passwords`` are purely for noninteractive
use.
.. [#] We highly recommend the use of SSH `key-based access
<http://en.wikipedia.org/wiki/Public_key>`_ instead of relying on
homogeneous password setups, as it's significantly more secure.
.. _ssh-config:
Leveraging native SSH config files
==================================
Command-line SSH clients (such as the one provided by `OpenSSH
<http://openssh.org>`_) make use of a specific configuration format typically
known as ``ssh_config``, and will read from a file in the platform-specific
location ``$HOME/.ssh/config`` (or an arbitrary path given to
:option:`--ssh-config-path`/:ref:`env.ssh_config_path <ssh-config-path>`.) This
file allows specification of various SSH options such as default or per-host
usernames, hostname aliases, and toggling other settings (such as whether to
use :ref:`agent forwarding <forward-agent>`.)
Fabric's SSH implementation allows loading a subset of these options from one's
actual SSH config file, should it exist. This behavior is not enabled by
default (in order to be backwards compatible) but may be turned on by setting
:ref:`env.use_ssh_config <use-ssh-config>` to ``True`` at the top of your
fabfile.
If enabled, the following SSH config directives will be loaded and honored by Fabric:
* ``User`` and ``Port`` will be used to fill in the appropriate connection
parameters when not otherwise specified, in the following fashion:
* Globally specified ``User``/``Port`` will be used in place of the current
defaults (local username and 22, respectively) if the appropriate env vars
are not set.
* However, if :ref:`env.user <user>`/:ref:`env.port <port>` *are* set, they
override global ``User``/``Port`` values.
* User/port values in the host string itself (e.g. ``hostname:222``) will
override everything, including any ``ssh_config`` values.
* ``HostName`` can be used to replace the given hostname, just like with
regular ``ssh``. So a ``Host foo`` entry specifying ``HostName example.com``
will allow you to give Fabric the hostname ``'foo'`` and have that expanded
into ``'example.com'`` at connection time.
* ``IdentityFile`` will extend (not replace) :ref:`env.key_filename
<key-filename>`.
* ``ForwardAgent`` will augment :ref:`env.forward_agent <forward-agent>` in an
"OR" manner: if either is set to a positive value, agent forwarding will be
enabled.
* ``ProxyCommand`` will trigger use of a proxy command for host connections,
just as with regular ``ssh``.
.. note::
If all you want to do is bounce SSH traffic off a gateway, you may find
:ref:`env.gateway <gateway>` to be a more efficient connection method
(which will also honor more Fabric-level settings) than the typical ``ssh
gatewayhost nc %h %p`` method of using ``ProxyCommand`` as a gateway.
.. note::
If your SSH config file contains ``ProxyCommand`` directives *and* you have
set :ref:`env.gateway <gateway>` to a non-``None`` value, ``env.gateway``
will take precedence and the ``ProxyCommand`` will be ignored.
If one has a pre-created SSH config file, rationale states it will be
easier for you to modify ``env.gateway`` (e.g. via
`~fabric.context_managers.settings`) than to work around your conf file's
contents entirely.
| PypiClean |
/Herring-0.1.49.tar.gz/Herring-0.1.49/herring/argument_helper.py | from collections import deque
__docformat__ = 'restructuredtext en'
__all__ = ('ArgumentHelper',)
class ArgumentHelper(object):
""" Helper for handling command line arguments. """
@staticmethod
def argv_to_dict(argv):
"""
Given a list of keyword arguments, parse into a kwargs dictionary.
Each argument should either start with '--' indicating a key, or not,
indicating a value.
Also supports "--key=value" syntax.
True will be used for the value of a key that does not have a given
value. Multiple values will be joined with a space.
This method does not attempt to cast any values, they all remain
strings.
>>> argv = ['--flag', 'false', '--foo', 'alpha', 'beta', '--bar=delta', '--charlie']
>>> kwargs = ArgumentHelper.argv_to_dict(argv)
>>> kwargs
{'charlie': True, 'flag': 'false', 'foo': 'alpha beta', 'bar': 'delta'}
:param argv: argument list
:type argv: list
"""
kwargs = {}
current_key = None
args = deque(argv)
while args:
arg = args.popleft()
if arg == '--':
ArgumentHelper.set_kwargs_flag(kwargs, current_key)
elif arg.startswith('--'):
ArgumentHelper.set_kwargs_flag(kwargs, current_key)
current_key = arg[2:]
if '=' in current_key:
current_key, value = current_key.split("=", 1)
kwargs[current_key] = value
else:
ArgumentHelper.merge_kwargs(kwargs, current_key, arg)
ArgumentHelper.set_kwargs_flag(kwargs, current_key)
return kwargs
@staticmethod
def set_kwargs_flag(kwargs, key):
"""
set the flag in kwargs if it has not yet been set.
:param kwargs: keyword arguments
:type kwargs: dict
:param key: key
:type key: str
"""
if key is not None:
if key not in kwargs:
kwargs[key] = True
@staticmethod
def merge_kwargs(kwargs, key, value):
"""
set the kwargs key/value pair, joining any pre-existing value with
a space.
:param kwargs: keyword arguments
:type kwargs: dict
:param key: key
:type key: str
:param value: the value to set the kwarg to
:type value: object
"""
if key is not None:
if key in kwargs:
value = ' '.join([kwargs[key], value])
kwargs[key] = value | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/run-parallel/README.md | # run-parallel [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url]
[travis-image]: https://img.shields.io/travis/feross/run-parallel/master.svg
[travis-url]: https://travis-ci.org/feross/run-parallel
[npm-image]: https://img.shields.io/npm/v/run-parallel.svg
[npm-url]: https://npmjs.org/package/run-parallel
[downloads-image]: https://img.shields.io/npm/dm/run-parallel.svg
[downloads-url]: https://npmjs.org/package/run-parallel
[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg
[standard-url]: https://standardjs.com
### Run an array of functions in parallel
 [](https://saucelabs.com/u/run-parallel)
### install
```
npm install run-parallel
```
### usage
#### parallel(tasks, [callback])
Run the `tasks` array of functions in parallel, without waiting until the previous
function has completed. If any of the functions pass an error to its callback, the main
`callback` is immediately called with the value of the error. Once the `tasks` have
completed, the results are passed to the final `callback` as an array.
It is also possible to use an object instead of an array. Each property will be run as a
function and the results will be passed to the final `callback` as an object instead of
an array. This can be a more readable way of handling the results.
##### arguments
- `tasks` - An array or object containing functions to run. Each function is passed a
`callback(err, result)` which it must call on completion with an error `err` (which can
be `null`) and an optional `result` value.
- `callback(err, results)` - An optional callback to run once all the functions have
completed. This function gets a results array (or object) containing all the result
arguments passed to the task callbacks.
##### example
```js
var parallel = require('run-parallel')
parallel([
function (callback) {
setTimeout(function () {
callback(null, 'one')
}, 200)
},
function (callback) {
setTimeout(function () {
callback(null, 'two')
}, 100)
}
],
// optional callback
function (err, results) {
// the results array will equal ['one','two'] even though
// the second function had a shorter timeout.
})
```
This module is basically equavalent to
[`async.parallel`](https://github.com/caolan/async#paralleltasks-callback), but it's
handy to just have the one function you need instead of the kitchen sink. Modularity!
Especially handy if you're serving to the browser and need to reduce your javascript
bundle size.
Works great in the browser with [browserify](http://browserify.org/)!
### see also
- [run-auto](https://github.com/feross/run-auto)
- [run-parallel-limit](https://github.com/feross/run-parallel-limit)
- [run-series](https://github.com/feross/run-series)
- [run-waterfall](https://github.com/feross/run-waterfall)
### license
MIT. Copyright (c) [Feross Aboukhadijeh](http://feross.org).
| PypiClean |
/Djaizz-23.6.21.1-py3-none-any.whl/djaizz/model/models/ml/hugging_face/zero_shot_classification.py | from sys import version_info
from typing import Union
from django.utils.functional import classproperty
from gradio.interface import Interface
from gradio.inputs import (Textbox as TextboxInput,
Dataframe as DataframeInput,
Checkbox as CheckboxInput)
from gradio.outputs import Label as LabelOutput
from djaizz.model.apps import DjaizzModelModuleConfig
from djaizz.util import PGSQL_IDENTIFIER_MAX_LEN
from .base import PreTrainedHuggingFaceTransformer
if version_info >= (3, 9):
from collections.abc import Sequence
else:
from typing import Sequence # pylint: disable=ungrouped-imports
__all__: Sequence[str] = ('PreTrainedHuggingFaceZeroShotClassifier',)
ZeroShotClassificationInputType = str
ZeroShotClassificationOutputType = dict[str, float]
class PreTrainedHuggingFaceZeroShotClassifier(
PreTrainedHuggingFaceTransformer):
# pylint: disable=abstract-method,too-many-ancestors
"""Djaizz Pre-Trained Hugging Face Zero-Shot Classifier Model class."""
class Meta(PreTrainedHuggingFaceTransformer.Meta):
# pylint: disable=too-few-public-methods
"""Django Model Class Metadata."""
verbose_name: str = 'Pre-Trained Hugging Face Zero-Shot Classifier'
verbose_name_plural: str = \
'Pre-Trained Hugging Face Zero-Shot Classifiers'
db_table: str = (f'{DjaizzModelModuleConfig.label}_'
f"{__qualname__.split(sep='.', maxsplit=1)[0]}")
assert len(db_table) <= PGSQL_IDENTIFIER_MAX_LEN, \
ValueError(f'*** "{db_table}" DB TABLE NAME TOO LONG ***')
default_related_name = 'pretrained_hugging_face_zero_shot_classifiers'
def predict(self,
text_or_texts:
Union[ZeroShotClassificationInputType,
Sequence[ZeroShotClassificationInputType]],
candidate_labels: list[str],
hypothesis_template: str = 'This example is {}.',
multi_label: bool = False) \
-> Union[ZeroShotClassificationOutputType,
list[ZeroShotClassificationOutputType]]:
# pylint: disable=arguments-differ
"""Zero-Shot Classification of Text(s)."""
single_text: bool = isinstance(text_or_texts, str)
if not (single_text or isinstance(text_or_texts, list)):
text_or_texts: list[ZeroShotClassificationInputType] = \
list(text_or_texts)
self.load()
output = self.native_obj(sequences=text_or_texts,
candidate_labels=candidate_labels,
hypothesis_template=hypothesis_template,
multi_label=multi_label)
return (dict(zip(output['labels'], output['scores']))
if single_text
else [dict(zip(result['labels'], result['scores']))
for result in output])
@classproperty
def gradio_ui(cls) -> Interface: # noqa: N805
# pylint: disable=no-self-argument
"""Gradio Interface."""
return Interface(
fn=lambda self, text, candidates, hypothesis_tpl, multi_labels:
cls.predict(self,
text_or_texts=text,
candidate_labels=[s for s in candidates if s],
hypothesis_template=hypothesis_tpl,
multi_label=multi_labels),
# (Callable) - the function to wrap an interface around.
inputs=[TextboxInput(lines=10,
placeholder='Text to Classify',
default='',
numeric=False,
type='str',
label='Text to Classify'),
DataframeInput(headers=None,
row_count=10,
col_count=1,
datatype='str',
col_width=100,
default=None,
type='array',
label='Candidate Labels'),
TextboxInput(lines=1,
placeholder='Hypothesis Format',
default='This example is {}.',
numeric=False,
type='str',
label='Hypothesis Format'),
CheckboxInput(default=False, label='Multi-Label?')],
# (Union[str, list[Union[str, InputComponent]]]) -
# a single Gradio input component,
# or list of Gradio input components.
# Components can either be passed as instantiated objects,
# or referred to by their string shortcuts.
# The number of input components should match
# the number of parameters in fn.
outputs=LabelOutput(num_top_classes=10,
type='auto',
label='Label Probabilities'),
# (Union[str, list[Union[str, OutputComponent]]]) -
# a single Gradio output component,
# or list of Gradio output components.
# Components can either be passed as instantiated objects,
# or referred to by their string shortcuts.
# The number of output components should match
# the number of values returned by fn.
verbose=True,
# (bool) - whether to print detailed information during launch.
examples=None,
# (Union[list[list[Any]], str]) - sample inputs for the function;
# if provided, appears below the UI components and can be used
# to populate the interface.
# Should be nested list, in which the outer list consists of
# samples and each inner list consists of an input
# corresponding to each input component.
# A string path to a directory of examples can also be provided.
# If there are multiple input components and a directory
# is provided, a log.csv file must be present in the directory
# to link corresponding inputs.
examples_per_page=10,
# (int) - If examples are provided, how many to display per page.
live=False,
# (bool) - should the interface automatically reload on change?
layout='unaligned',
# (str) - Layout of input and output panels.
# - "horizontal" arranges them as two columns of equal height,
# - "unaligned" arranges them as two columns of unequal height, and
# - "vertical" arranges them vertically.
show_input=True,
show_output=True,
capture_session=False,
# (bool) - if True, captures the default graph and session
# (needed for Tensorflow 1.x)
interpretation='default',
# (Union[Callable, str]) - function that provides interpretation
# explaining prediction output.
# Pass "default" to use built-in interpreter.
num_shap=2.0,
# (float) - a multiplier that determines how many examples
# are computed for shap-based interpretation.
# Increasing this value will increase shap runtime,
# but improve results.
theme='default',
# (str) - Theme to use - one of
# - "default",
# - "huggingface",
# - "grass",
# - "peach".
# Add "dark" prefix, e.g. "darkpeach" or "darkdefault"
# for darktheme.
repeat_outputs_per_model=True,
title=cls._meta.verbose_name,
# (str) - a title for the interface;
# if provided, appears above the input and output components.
description=('A pre-trained Hugging Face model '
'for zero-shot classification'),
# (str) - a description for the interface;
# if provided, appears above the input and output components.
article=None,
# (str) - an expanded article explaining the interface;
# if provided, appears below the input and output components.
# Accepts Markdown and HTML content.
thumbnail=None,
# (str) - path to image or src to use as display picture for models
# listed in gradio.app/hub
css=None,
# (str) - custom css or path to custom css file
# to use with interface.
server_port=None,
# (int) - will start gradio app on this port (if available)
# server_name=networking.LOCALHOST_NAME,
# (str) - to make app accessible on local network set to "0.0.0.0".
height=500,
width=900,
allow_screenshot=True,
# (bool) - if False, users will not see a button
# to take a screenshot of the interface.
allow_flagging=False,
# (bool) - if False, users will not see a button
# to flag an input and output.
flagging_options=None,
# (list[str]) - if not None, provides options a user must select
# when flagging.
encrypt=False,
# (bool) - If True, flagged data will be encrypted
# by key provided by creator at launch
show_tips=False,
# (bool) - if True, will occasionally show tips
# about new Gradio features
flagging_dir='flagged',
# (str) - what to name the dir where flagged data is stored.
analytics_enabled=True,
enable_queue=False,
# (bool) - if True, inference requests will be served through
# a queue instead of with parallel threads.
# Required for longer inference times (> 1min) to prevent timeout.
) | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dijit/form/Textarea.js | if(!dojo._hasResource["dijit.form.Textarea"]){
dojo._hasResource["dijit.form.Textarea"]=true;
dojo.provide("dijit.form.Textarea");
dojo.require("dijit.form.SimpleTextarea");
dojo.declare("dijit.form.Textarea",dijit.form.SimpleTextarea,{cols:"",_previousNewlines:0,_strictMode:(dojo.doc.compatMode!="BackCompat"),_getHeight:function(_1){
var _2=_1.scrollHeight;
if(dojo.isIE){
_2+=_1.offsetHeight-_1.clientHeight-((dojo.isIE<8&&this._strictMode)?dojo._getPadBorderExtents(_1).h:0);
}else{
if(dojo.isMoz){
_2+=_1.offsetHeight-_1.clientHeight;
}else{
if(dojo.isWebKit&&!(dojo.isSafari<4)){
_2+=dojo._getBorderExtents(_1).h;
}else{
_2+=dojo._getPadBorderExtents(_1).h;
}
}
}
return _2;
},_estimateHeight:function(_3){
_3.style.maxHeight="";
_3.style.height="auto";
_3.rows=(_3.value.match(/\n/g)||[]).length+1;
},_needsHelpShrinking:dojo.isMoz||dojo.isWebKit,_onInput:function(){
this.inherited(arguments);
if(this._busyResizing){
return;
}
this._busyResizing=true;
var _4=this.textbox;
if(_4.scrollHeight&&_4.offsetHeight&&_4.clientHeight){
var _5=this._getHeight(_4)+"px";
if(_4.style.height!=_5){
_4.style.maxHeight=_4.style.height=_5;
}
if(this._needsHelpShrinking){
if(this._setTimeoutHandle){
clearTimeout(this._setTimeoutHandle);
}
this._setTimeoutHandle=setTimeout(dojo.hitch(this,"_shrink"),0);
}
}else{
this._estimateHeight(_4);
}
this._busyResizing=false;
},_busyResizing:false,_shrink:function(){
this._setTimeoutHandle=null;
if(this._needsHelpShrinking&&!this._busyResizing){
this._busyResizing=true;
var _6=this.textbox;
var _7=false;
if(_6.value==""){
_6.value=" ";
_7=true;
}
var _8=_6.scrollHeight;
if(!_8){
this._estimateHeight(_6);
}else{
var _9=_6.style.paddingBottom;
var _a=dojo._getPadExtents(_6);
_a=_a.h-_a.t;
_6.style.paddingBottom=_a+1+"px";
var _b=this._getHeight(_6)-1+"px";
if(_6.style.maxHeight!=_b){
_6.style.paddingBottom=_a+_8+"px";
_6.scrollTop=0;
_6.style.maxHeight=this._getHeight(_6)-_8+"px";
}
_6.style.paddingBottom=_9;
}
if(_7){
_6.value="";
}
this._busyResizing=false;
}
},resize:function(){
this._onInput();
},_setValueAttr:function(){
this.inherited(arguments);
this.resize();
},postCreate:function(){
this.inherited(arguments);
dojo.style(this.textbox,{overflowY:"hidden",overflowX:"auto",boxSizing:"border-box",MsBoxSizing:"border-box",WebkitBoxSizing:"border-box",MozBoxSizing:"border-box"});
this.connect(this.textbox,"onscroll",this._onInput);
this.connect(this.textbox,"onresize",this._onInput);
this.connect(this.textbox,"onfocus",this._onInput);
setTimeout(dojo.hitch(this,"resize"),0);
}});
} | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/plugins/modify/set_field.py | from loguru import logger
from flexget import plugin
from flexget.entry import register_lazy_lookup
from flexget.event import event
from flexget.utils.template import RenderError
logger = logger.bind(name='set')
# Use a string for this sentinel, so it survives serialization
UNSET = '__unset__'
class ModifySet:
"""Allows adding information to a task entry for use later.
Example:
set:
path: ~/download/path/
"""
schema = {'type': 'object', "minProperties": 1}
def on_task_metainfo(self, task, config):
"""Adds the set dict to all accepted entries."""
for entry in task.all_entries:
self.modify(entry, config)
def modify(self, entry, config, errors=True):
"""This can be called from a plugin to add set values to an entry"""
for field in config:
# If this doesn't appear to be a jinja template, just set it right away.
if not isinstance(config[field], str) or '{' not in config[field]:
entry[field] = config[field]
# Store original values before overwriting with a lazy field, so that set directives can reference
# themselves.
else:
orig_value = entry.get(field, UNSET, eval_lazy=False)
try:
del entry[field]
except KeyError:
pass
entry.add_lazy_fields(
self.lazy_set,
[field],
kwargs={
'config': config,
'field': field,
'orig_field_value': orig_value,
'errors': errors,
},
)
@register_lazy_lookup('set_field')
def lazy_set(self, entry, config, field, orig_field_value, errors=True):
level = 'ERROR' if errors else 'DEBUG'
if orig_field_value is not UNSET:
entry[field] = orig_field_value
try:
entry[field] = entry.render(config[field], native=True)
except RenderError as e:
logger.log(level, 'Could not set {} for {}: {}', field, entry['title'], e)
@event('plugin.register')
def register_plugin():
plugin.register(ModifySet, 'set', api_ver=2) | PypiClean |
/GenMotion-0.0.4-py3-none-any.whl/genmotion/render/python/rendermotion.py | import numpy as np
import imageio
import os
import torch
from tqdm import tqdm
from genmotion.render.python.renderer import get_renderer
import genmotion.render.python.utils as geometry
def get_rotation(theta=np.pi/3):
axis = torch.tensor([0, 1, 0], dtype=torch.float)
axisangle = theta*axis
matrix = geometry.axis_angle_to_matrix(axisangle)
return matrix.numpy()
def render_video(meshes, key, action, renderer, savepath, background, cam=(0.75, 0.75, 0, 0.10), color=[0.11, 0.53, 0.8]):
writer = imageio.get_writer(savepath, fps=30)
# center the first frame
meshes = meshes - meshes[0].mean(axis=0)
# matrix = get_rotation(theta=np.pi/4)
# meshes = meshes[45:]
# meshes = np.einsum("ij,lki->lkj", matrix, meshes)
imgs = []
for mesh in tqdm(meshes, desc=f"Visualize {key}, action {action}"):
img = renderer.render(background, mesh, cam, color=color)
imgs.append(img)
# show(img)
imgs = np.array(imgs)
masks = ~(imgs/255. > 0.96).all(-1)
coords = np.argwhere(masks.sum(axis=0))
y1, x1 = coords.min(axis=0)
y2, x2 = coords.max(axis=0)
for cimg in imgs[:, y1:y2, x1:x2]:
writer.append_data(cimg)
writer.close()
def render(data: np.array, save_folder: str):
if data.shape[0] == 3:
visualization, generation, reconstruction = data
data = {"visualization": visualization,
"generation": generation,
"reconstruction": reconstruction}
else:
# output = {f"generation_{key}": output[key] for key in range(2)} # len(output))}
# output = {f"generation_{key}": output[key] for key in range(len(output))}
data = {f"generation_{key}": data[key] for key in range(len(data))}
width = 1024
height = 1024
background = np.zeros((height, width, 3))
renderer = get_renderer(width, height)
# if duration mode, put back durations
if data["generation_3"].shape[-1] == 100:
data["generation_0"] = data["generation_0"][:, :, :, :40]
data["generation_1"] = data["generation_1"][:, :, :, :60]
data["generation_2"] = data["generation_2"][:, :, :, :80]
data["generation_3"] = data["generation_3"][:, :, :, :100]
elif data["generation_3"].shape[-1] == 160:
print("160 mode")
data["generation_0"] = data["generation_0"][:, :, :, :100]
data["generation_1"] = data["generation_1"][:, :, :, :120]
data["generation_2"] = data["generation_2"][:, :, :, :140]
data["generation_3"] = data["generation_3"][:, :, :, :160]
# if str(action) == str(1) and str(key) == "generation_4":
for key in data:
vidmeshes = data[key]
for action in range(len(vidmeshes)):
meshes = vidmeshes[action].transpose(2, 0, 1)
path = os.path.join(save_folder, "action{}_{}.mp4".format(action, key))
render_video(meshes, key, action, renderer, path, background) | PypiClean |
/Monzo%20API-0.3.0.tar.gz/Monzo API-0.3.0/monzo/endpoints/attachment.py | from __future__ import annotations
from datetime import datetime
from os.path import getsize, isfile, splitext
from urllib.parse import urlparse
from monzo.authentication import Authentication
from monzo.endpoints.monzo import Monzo
from monzo.exceptions import MonzoGeneralError
from monzo.helpers import create_date
SUPPORTED_ATTACHMENT_EXTENSIONS = {
'jpeg': 'image/jpeg',
'jpg': 'image/jpg',
'png': 'image/png',
}
class Attachment(Monzo):
"""
Class to manage attachments.
Class provides methods to manage attachments.
"""
__slots__ = [
'_attachment_id',
'_user_id',
'_transaction_id',
'_url',
'_file_type',
'_created',
]
def __init__(
self,
auth: Authentication,
attachment_id: str,
user_id: str,
transaction_id: str,
url: str,
file_type: str,
created: datetime
):
"""
Initialize Attachment.
Args:
auth: Monzo authentication object
attachment_id: The unique ID for the attachment
user_id: User ID transaction is associated with
transaction_id: Transaction ID for the transaction attachment is associated with
url: URL of the image attachment
file_type: File type for attachment
created: Datetime object identifying whe the attachment was created
"""
self._attachment_id = attachment_id
self._user_id = user_id
self._transaction_id = transaction_id
self._url = url
self._file_type = file_type
self._created = created
super().__init__(auth=auth)
@property
def attachment_id(self) -> str:
"""
Property to output attachment ID.
Returns:
Attachment ID
"""
return self._attachment_id
@property
def transaction_id(self) -> str:
"""
Property to output transaction ID.
Returns:
Transaction ID
"""
return self._transaction_id
@property
def url(self) -> str:
"""
Property to output attachment URL.
Returns:
Attachment URL
"""
return self._url
@property
def file_type(self) -> str:
"""
Property to output attachment file type.
Returns:
Attachment file type
"""
return self._transaction_id
@property
def created(self) -> datetime:
"""
Property to output attachment creation time.
Returns:
Attachment creation datetime
"""
return self._created
def delete(self) -> None:
"""Delete the attachment."""
data = {
'id': self.attachment_id
}
self._monzo_auth.make_request(
path='/attachment/deregister',
method='POST',
data=data,
)
@classmethod
def create_attachment(
cls,
auth: Authentication,
transaction_id: str,
url: str
) -> Attachment:
"""
Create a new image attachment.
Creates an image attachment, if the URL is a file system URL the file is uploaded, otherwise the URL is used.
Args:
auth: Monzo authentication object
transaction_id: ID of the transaction to associate the attachment with
url: URL of the transaction
Returns:
Created attachment
"""
file_url = urlparse(url)
_, file_extension = splitext(url)
if file_extension not in SUPPORTED_ATTACHMENT_EXTENSIONS:
raise MonzoGeneralError('Unsupported file type')
file_type = SUPPORTED_ATTACHMENT_EXTENSIONS[file_extension]
if file_url.netloc:
file_type = Attachment._upload_file(auth=auth, url=url, file_type=file_type)
data = {
'external_id': transaction_id,
'file_type': file_type,
'file_url': file_url,
}
response = auth.make_request(
path='',
method='POST',
data=data
)
if response['code'] != 200:
raise MonzoGeneralError('Failed to create attachment')
return Attachment(
auth=auth,
attachment_id=response['data']['attachment']['id'],
user_id=response['data']['attachment']['user_id'],
transaction_id=response['data']['attachment']['external_id'],
url=response['data']['attachment']['file_url'],
file_type=response['data']['attachment']['file_type'],
created=create_date(response['data']['attachment']['created']),
)
@classmethod
def _upload_file(cls, auth: Authentication, url: str, file_type: str) -> str:
"""
Create an upload bucket for the attachment and upload the file.
Args:
auth: Monzo authentication object
url: URL for the file to upload
file_type: Mime type for the file
Returns:
URL of the uploaded file
"""
if not isfile(url):
raise MonzoGeneralError('File does not exist')
content_length = getsize(url)
data = {
'file_name': None,
'file_type': file_type,
'content_length': content_length,
}
response = auth.make_request(
path='',
method='POST',
data=data,
)
# TODO upload file
return response['data']['file_url'] | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/channel_response_py3.py |
from msrest.serialization import Model
class ChannelResponse(Model):
"""ChannelResponse.
:param channel_id: The channel identifier
:type channel_id: int
:param interval_minutes: The interval of the channel. The interval is
measured in minutes
:type interval_minutes: int
:param observation_type:
:type observation_type: ~energycap.sdk.models.ObservationTypeChild
:param channel_description: Description of the channel
:type channel_description: str
:param channel_import_id: The import identifier for the channel.
:type channel_import_id: str
:param has_readings: Indicates if the channel has readings
:type has_readings: bool
:param first_reading_date: Date of the earliest reading for the channel
:type first_reading_date: datetime
:param last_reading_date: Date of the most recent reading for the channel
:type last_reading_date: datetime
:param channel_versions: List of channel versions
Versions include channel settings that may change over time
:type channel_versions: list[~energycap.sdk.models.ChannelVersionResponse]
:param is_used_on_distribution: Indicates if the channel is a part of a
distribution
:type is_used_on_distribution: bool
:param meter:
:type meter: ~energycap.sdk.models.MeterChildIncludeMeterImportIdAndRoute
"""
_attribute_map = {
'channel_id': {'key': 'channelId', 'type': 'int'},
'interval_minutes': {'key': 'intervalMinutes', 'type': 'int'},
'observation_type': {'key': 'observationType', 'type': 'ObservationTypeChild'},
'channel_description': {'key': 'channelDescription', 'type': 'str'},
'channel_import_id': {'key': 'channelImportId', 'type': 'str'},
'has_readings': {'key': 'hasReadings', 'type': 'bool'},
'first_reading_date': {'key': 'firstReadingDate', 'type': 'iso-8601'},
'last_reading_date': {'key': 'lastReadingDate', 'type': 'iso-8601'},
'channel_versions': {'key': 'channelVersions', 'type': '[ChannelVersionResponse]'},
'is_used_on_distribution': {'key': 'isUsedOnDistribution', 'type': 'bool'},
'meter': {'key': 'meter', 'type': 'MeterChildIncludeMeterImportIdAndRoute'},
}
def __init__(self, *, channel_id: int=None, interval_minutes: int=None, observation_type=None, channel_description: str=None, channel_import_id: str=None, has_readings: bool=None, first_reading_date=None, last_reading_date=None, channel_versions=None, is_used_on_distribution: bool=None, meter=None, **kwargs) -> None:
super(ChannelResponse, self).__init__(**kwargs)
self.channel_id = channel_id
self.interval_minutes = interval_minutes
self.observation_type = observation_type
self.channel_description = channel_description
self.channel_import_id = channel_import_id
self.has_readings = has_readings
self.first_reading_date = first_reading_date
self.last_reading_date = last_reading_date
self.channel_versions = channel_versions
self.is_used_on_distribution = is_used_on_distribution
self.meter = meter | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Errors.py | __revision__ = "src/engine/SCons/Errors.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import shutil
import SCons.Util
class BuildError(Exception):
""" Errors occurring while building.
BuildError have the following attributes:
=========================================
Information about the cause of the build error:
-----------------------------------------------
errstr : a description of the error message
status : the return code of the action that caused the build error.
Must be set to a non-zero value even if the build error is not due
to an action returning a non-zero returned code.
exitstatus : SCons exit status due to this build error.
Must be nonzero unless due to an explicit Exit()
call. Not always the same as status, since
actions return a status code that should be
respected, but SCons typically exits with 2
irrespective of the return value of the failed
action.
filename : The name of the file or directory that caused the
build error. Set to None if no files are associated with
this error. This might be different from the target
being built. For example, failure to create the
directory in which the target file will appear. It
can be None if the error is not due to a particular
filename.
exc_info : Info about exception that caused the build
error. Set to (None, None, None) if this build
error is not due to an exception.
Information about the cause of the location of the error:
---------------------------------------------------------
node : the error occured while building this target node(s)
executor : the executor that caused the build to fail (might
be None if the build failures is not due to the
executor failing)
action : the action that caused the build to fail (might be
None if the build failures is not due to the an
action failure)
command : the command line for the action that caused the
build to fail (might be None if the build failures
is not due to the an action failure)
"""
def __init__(self,
node=None, errstr="Unknown error", status=2, exitstatus=2,
filename=None, executor=None, action=None, command=None,
exc_info=(None, None, None)):
# py3: errstr should be string and not bytes.
self.errstr = SCons.Util.to_String(errstr)
self.status = status
self.exitstatus = exitstatus
self.filename = filename
self.exc_info = exc_info
self.node = node
self.executor = executor
self.action = action
self.command = command
Exception.__init__(self, node, errstr, status, exitstatus, filename,
executor, action, command, exc_info)
def __str__(self):
if self.filename:
return self.filename + ': ' + self.errstr
else:
return self.errstr
class InternalError(Exception):
pass
class UserError(Exception):
pass
class StopError(Exception):
pass
class SConsEnvironmentError(Exception):
pass
class MSVCError(IOError):
pass
class ExplicitExit(Exception):
def __init__(self, node=None, status=None, *args):
self.node = node
self.status = status
self.exitstatus = status
Exception.__init__(self, *args)
def convert_to_BuildError(status, exc_info=None):
"""
Convert any return code a BuildError Exception.
:Parameters:
- `status`: can either be a return code or an Exception.
The buildError.status we set here will normally be
used as the exit status of the "scons" process.
"""
if not exc_info and isinstance(status, Exception):
exc_info = (status.__class__, status, None)
if isinstance(status, BuildError):
buildError = status
buildError.exitstatus = 2 # always exit with 2 on build errors
elif isinstance(status, ExplicitExit):
status = status.status
errstr = 'Explicit exit, status %s' % status
buildError = BuildError(
errstr=errstr,
status=status, # might be 0, OK here
exitstatus=status, # might be 0, OK here
exc_info=exc_info)
elif isinstance(status, (StopError, UserError)):
buildError = BuildError(
errstr=str(status),
status=2,
exitstatus=2,
exc_info=exc_info)
elif isinstance(status, shutil.SameFileError):
# PY3 has a exception for when copying file to itself
# It's object provides info differently than below
try:
filename = status.filename
except AttributeError:
filename = None
buildError = BuildError(
errstr=status.args[0],
status=status.errno,
exitstatus=2,
filename=filename,
exc_info=exc_info)
elif isinstance(status, (SConsEnvironmentError, OSError, IOError)):
# If an IOError/OSError happens, raise a BuildError.
# Report the name of the file or directory that caused the
# error, which might be different from the target being built
# (for example, failure to create the directory in which the
# target file will appear).
filename = getattr(status, 'filename', None)
strerror = getattr(status, 'strerror', str(status))
errno = getattr(status, 'errno', 2)
buildError = BuildError(
errstr=strerror,
status=errno,
exitstatus=2,
filename=filename,
exc_info=exc_info)
elif isinstance(status, Exception):
buildError = BuildError(
errstr='%s : %s' % (status.__class__.__name__, status),
status=2,
exitstatus=2,
exc_info=exc_info)
elif SCons.Util.is_String(status):
buildError = BuildError(
errstr=status,
status=2,
exitstatus=2)
else:
buildError = BuildError(
errstr="Error %s" % status,
status=status,
exitstatus=2)
#import sys
#sys.stderr.write("convert_to_BuildError: status %s => (errstr %s, status %s)\n"%(status,buildError.errstr, buildError.status))
return buildError
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/db/models/query.py | import copy
import itertools
import sys
import warnings
from django.core import exceptions
from django.db import connections, router, transaction, IntegrityError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import AutoField
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models import sql
from django.utils.functional import partition
from django.utils import six
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None):
self.model = model
# EmptyQuerySet instantiates QuerySet with model as None
self._db = using
self.query = query or sql.Query(self.model)
self._result_cache = None
self._iter = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k,v in self.__dict__.items():
if k in ('_iter','_result_cache'):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
obj_dict['_known_related_objects'] = dict(
(field.name, val) for field, val in self._known_related_objects.items()
)
return obj_dict
def __setstate__(self, obj_dict):
model = obj_dict['model']
if model is None:
# if model is None, then self should be emptyqs and the related
# objects do not matter.
self._known_related_objects = {}
else:
opts = model._meta
self._known_related_objects = dict(
(opts.get_field(field.name if hasattr(field, 'name') else field), val)
for field, val in obj_dict['_known_related_objects'].items()
)
self.__dict__.update(obj_dict)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
return len(self._result_cache)
def __iter__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __bool__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __contains__(self, val):
# The 'in' operator works without this method, due to __iter__. This
# implementation exists only to shortcut the creation of Model
# instances, by bailing out early if we find a matching element.
pos = 0
if self._result_cache is not None:
if val in self._result_cache:
return True
elif self._iter is None:
# iterator is exhausted, so we have our answer
return False
# remember not to check these again:
pos = len(self._result_cache)
else:
# We need to start filling the result cache out. The following
# ensures that self._iter is not None and self._result_cache is not
# None
it = iter(self)
# Carry on, one result at a time.
while True:
if len(self._result_cache) <= pos:
self._fill_cache(num=1)
if self._iter is None:
# we ran out of items
return False
if self._result_cache[pos] == val:
return True
pos += 1
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist as e:
raise IndexError(e.args)
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
fill_cache = False
if connections[self.db].features.supports_select_related:
fill_cache = self.query.select_related
if isinstance(fill_cache, dict):
requested = fill_cache
else:
requested = None
max_depth = self.query.max_depth
extra_select = list(self.query.extra_select)
aggregate_select = list(self.query.aggregate_select)
only_load = self.query.get_loaded_field_names()
if not fill_cache:
fields = self.model._meta.fields
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if only_load:
for field, model in self.model._meta.get_fields_with_model():
if model is None:
model = self.model
try:
if field.name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
index_start = len(extra_select)
aggregate_start = index_start + len(load_fields or self.model._meta.fields)
skip = None
if load_fields and not fill_cache:
# Some fields have been deferred, so we have to initialise
# via keyword arguments.
skip = set()
init_list = []
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
model_cls = deferred_class_factory(self.model, skip)
# Cache db and model outside the loop
db = self.db
model = self.model
compiler = self.query.get_compiler(using=db)
if fill_cache:
klass_info = get_klass_info(model, max_depth=max_depth,
requested=requested, only_load=only_load)
for row in compiler.results_iter():
if fill_cache:
obj, _ = get_cached_row(row, index_start, db, klass_info,
offset=len(aggregate_select))
else:
# Omit aggregates in object creation.
row_data = row[index_start:aggregate_start]
if skip:
obj = model_cls(**dict(zip(init_list, row_data)))
else:
obj = model(*row_data)
# Store the source database of the object
obj._state.db = db
# This object came from the database; it's not being added.
obj._state.adding = False
if extra_select:
for i, k in enumerate(extra_select):
setattr(obj, k, row[i])
# Add the aggregates to the model
if aggregate_select:
for i, aggregate in enumerate(aggregate_select):
setattr(obj, aggregate, row[i + aggregate_start])
# Add the known related objects to the model, if there are any
if self._known_related_objects:
for field, rel_objs in self._known_related_objects.items():
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=True)
return query.get_aggregation(using=self.db)
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
if self.model._meta.parents:
raise ValueError("Can't bulk create an inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.local_fields
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields= [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
return objs
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
try:
self._for_write = True
return self.get(**lookup), False
except self.model.DoesNotExist:
try:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
sid = transaction.savepoint(using=self.db)
obj.save(force_insert=True, using=self.db)
transaction.savepoint_commit(sid, using=self.db)
return obj, True
except IntegrityError as e:
transaction.savepoint_rollback(sid, using=self.db)
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
# Re-raise the IntegrityError with its original traceback.
six.reraise(*exc_info)
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering()
obj.query.add_ordering('-%s' % latest_by)
return obj.get()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
return dict([(obj._get_pk_val(), obj) for obj in qs])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
rows = query.get_compiler(self.db).execute_sql(None)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(None)
_update.alters_data = True
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of datetime objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
def none(self):
"""
Returns an empty QuerySet.
"""
return self._clone(klass=EmptyQuerySet)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, **kwargs):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
# Default to false for nowait
nowait = kwargs.pop('nowait', False)
obj = self._clone()
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
"""
if 'depth' in kwargs:
warnings.warn('The "depth" keyword argument has been deprecated.\n'
'Use related field names instead.', PendingDeprecationWarning)
depth = kwargs.pop('depth', 0)
if kwargs:
raise TypeError('Unexpected keyword arguments to select_related: %s'
% (list(kwargs),))
obj = self._clone()
if fields:
if depth:
raise TypeError('Cannot pass both "depth" and fields to select_related()')
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
if depth:
obj.query.max_depth = depth
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the
the list is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def dup_select_related(self, other):
"""
Copies the related selection status from the QuerySet 'other' to the
current QuerySet.
"""
self.query.select_related = other.query.select_related
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
for arg in args:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
kwargs[arg.default_alias] = arg
names = getattr(self, '_fields', None)
if names is None:
names = set(self.model._meta.get_all_field_names())
for aggregate in kwargs:
if aggregate in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % aggregate)
obj = self._clone()
obj._setup_aggregate_query(list(kwargs))
# Add the aggregates to the query
for (alias, aggregate_expr) in kwargs.items():
obj.query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=False)
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering()
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should excecute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.model._meta.ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model)
return self._db or router.db_for_read(self.model)
###################
# PRIVATE METHODS #
###################
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i+batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db)
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
opts = self.model._meta
if self.query.group_by is None:
field_names = [f.attname for f in opts.fields]
self.query.add_fields(field_names, False)
self.query.set_group_by()
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = list(self.query.extra_select)
field_names = self.field_names
aggregate_names = list(self.query.aggregate_select)
names = extra_names + field_names + aggregate_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def delete(self):
# values().delete() doesn't work currently - make sure it raises an
# user friendly error.
raise TypeError("Queries with .values() or .values_list() applied "
"can't be deleted")
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.aggregate_names = []
if not self.query.extra and not self.query.aggregates:
# Short cut - if there are no extra or aggregates, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if f in self.query.extra:
self.extra_names.append(f)
elif f in self.query.aggregate_select:
self.aggregate_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.fields]
self.aggregate_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.aggregate_names is not None:
self.query.set_aggregate_mask(self.aggregate_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.aggregate_names = self.aggregate_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.aggregate_names != other.aggregate_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.aggregate_names is not None:
self.aggregate_names.extend(aggregates)
self.query.set_aggregate_mask(self.aggregate_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValueQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
if self.flat and len(self._fields) == 1:
for row in self.query.get_compiler(self.db).results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.aggregate_select:
for row in self.query.get_compiler(self.db).results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = list(self.query.extra_select)
field_names = self.field_names
aggregate_names = list(self.query.aggregate_select)
names = extra_names + field_names + aggregate_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and aggregates.
if self._fields:
fields = list(self._fields) + [f for f in aggregate_names if f not in self._fields]
else:
fields = names
for row in self.query.get_compiler(self.db).results_iter():
data = dict(zip(names, row))
yield tuple([data[f] for f in fields])
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class DateQuerySet(QuerySet):
def iterator(self):
return self.query.get_compiler(self.db).results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query = self.query.clone(klass=sql.DateQuery, setup=True)
self.query.select = []
self.query.add_date_select(self._field_name, self._kind, self._order)
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class EmptyQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None):
super(EmptyQuerySet, self).__init__(model, query, using)
self._result_cache = []
def __and__(self, other):
return self._clone()
def __or__(self, other):
return other._clone()
def count(self):
return 0
def delete(self):
pass
def _clone(self, klass=None, setup=False, **kwargs):
c = super(EmptyQuerySet, self)._clone(klass, setup=setup, **kwargs)
c._result_cache = []
return c
def iterator(self):
# This slightly odd construction is because we need an empty generator
# (it raises StopIteration immediately).
yield next(iter([]))
def all(self):
"""
Always returns EmptyQuerySet.
"""
return self
def filter(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def exclude(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def complex_filter(self, filter_obj):
"""
Always returns EmptyQuerySet.
"""
return self
def select_related(self, *fields, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def annotate(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def order_by(self, *field_names):
"""
Always returns EmptyQuerySet.
"""
return self
def distinct(self, *field_names):
"""
Always returns EmptyQuerySet.
"""
return self
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Always returns EmptyQuerySet.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
return self
def reverse(self):
"""
Always returns EmptyQuerySet.
"""
return self
def defer(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def only(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def update(self, **kwargs):
"""
Don't update anything.
"""
return 0
def aggregate(self, *args, **kwargs):
"""
Return a dict mapping the aggregate names to None
"""
for arg in args:
kwargs[arg.default_alias] = arg
return dict([(key, None) for key in kwargs])
def values(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def values_list(self, *fields, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
# EmptyQuerySet is always an empty result in where-clauses (and similar
# situations).
value_annotation = False
def get_klass_info(klass, max_depth=0, cur_depth=0, requested=None,
only_load=None, local_only=False):
"""
Helper function that recursively returns an information for a klass, to be
used in get_cached_row. It exists just to compute this information only
once for entire queryset. Otherwise it would be computed for each row, which
leads to poor perfomance on large querysets.
Arguments:
* klass - the class to retrieve (and instantiate)
* max_depth - the maximum depth to which a select_related()
relationship should be explored.
* cur_depth - the current depth in the select_related() tree.
Used in recursive calls to determin if we should dig deeper.
* requested - A dictionary describing the select_related() tree
that is to be retrieved. keys are field names; values are
dictionaries describing the keys on that related object that
are themselves to be select_related().
* only_load - if the query has had only() or defer() applied,
this is the list of field names that will be returned. If None,
the full field list for `klass` can be assumed.
* local_only - Only populate local fields. This is used when
following reverse select-related relations
"""
if max_depth and requested is None and cur_depth > max_depth:
# We've recursed deeply enough; stop now.
return None
if only_load:
load_fields = only_load.get(klass) or set()
# When we create the object, we will also be creating populating
# all the parent classes, so traverse the parent classes looking
# for fields that must be included on load.
for parent in klass._meta.get_parent_list():
fields = only_load.get(parent)
if fields:
load_fields.update(fields)
else:
load_fields = None
if load_fields:
# Handle deferred fields.
skip = set()
init_list = []
# Build the list of fields that *haven't* been requested
for field, model in klass._meta.get_fields_with_model():
if field.name not in load_fields:
skip.add(field.attname)
elif local_only and model is not None:
continue
else:
init_list.append(field.attname)
# Retrieve all the requested fields
field_count = len(init_list)
if skip:
klass = deferred_class_factory(klass, skip)
field_names = init_list
else:
field_names = ()
else:
# Load all fields on klass
# We trying to not populate field_names variable for perfomance reason.
# If field_names variable is set, it is used to instantiate desired fields,
# by passing **dict(zip(field_names, fields)) as kwargs to Model.__init__ method.
# But kwargs version of Model.__init__ is slower, so we should avoid using
# it when it is not really neccesary.
if local_only and len(klass._meta.local_fields) != len(klass._meta.fields):
field_count = len(klass._meta.local_fields)
field_names = [f.attname for f in klass._meta.local_fields]
else:
field_count = len(klass._meta.fields)
field_names = ()
restricted = requested is not None
related_fields = []
for f in klass._meta.fields:
if select_related_descend(f, restricted, requested, load_fields):
if restricted:
next = requested[f.name]
else:
next = None
klass_info = get_klass_info(f.rel.to, max_depth=max_depth, cur_depth=cur_depth+1,
requested=next, only_load=only_load)
related_fields.append((f, klass_info))
reverse_related_fields = []
if restricted:
for o in klass._meta.get_all_related_objects():
if o.field.unique and select_related_descend(o.field, restricted, requested,
only_load.get(o.model), reverse=True):
next = requested[o.field.related_query_name()]
klass_info = get_klass_info(o.model, max_depth=max_depth, cur_depth=cur_depth+1,
requested=next, only_load=only_load, local_only=True)
reverse_related_fields.append((o.field, klass_info))
if field_names:
pk_idx = field_names.index(klass._meta.pk.attname)
else:
pk_idx = klass._meta.pk_index()
return klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx
def get_cached_row(row, index_start, using, klass_info, offset=0):
"""
Helper function that recursively returns an object with the specified
related attributes already populated.
This method may be called recursively to populate deep select_related()
clauses.
Arguments:
* row - the row of data returned by the database cursor
* index_start - the index of the row at which data for this
object is known to start
* offset - the number of additional fields that are known to
exist in row for `klass`. This usually means the number of
annotated results on `klass`.
* using - the database alias on which the query is being executed.
* klass_info - result of the get_klass_info function
"""
if klass_info is None:
return None
klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx = klass_info
fields = row[index_start : index_start + field_count]
# If the pk column is None (or the Oracle equivalent ''), then the related
# object must be non-existent - set the relation to None.
if fields[pk_idx] == None or fields[pk_idx] == '':
obj = None
elif field_names:
obj = klass(**dict(zip(field_names, fields)))
else:
obj = klass(*fields)
# If an object was retrieved, set the database state.
if obj:
obj._state.db = using
obj._state.adding = False
# Instantiate related fields
index_end = index_start + field_count + offset
# Iterate over each related object, populating any
# select_related() fields
for f, klass_info in related_fields:
# Recursively retrieve the data for the related object
cached_row = get_cached_row(row, index_end, using, klass_info)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the base object exists, populate the
# descriptor cache
setattr(obj, f.get_cache_name(), rel_obj)
if f.unique and rel_obj is not None:
# If the field is unique, populate the
# reverse descriptor cache on the related object
setattr(rel_obj, f.related.get_cache_name(), obj)
# Now do the same, but for reverse related objects.
# Only handle the restricted case - i.e., don't do a depth
# descent into reverse relations unless explicitly requested
for f, klass_info in reverse_related_fields:
# Recursively retrieve the data for the related object
cached_row = get_cached_row(row, index_end, using, klass_info)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the field is unique, populate the
# reverse descriptor cache
setattr(obj, f.related.get_cache_name(), rel_obj)
if rel_obj is not None:
# If the related object exists, populate
# the descriptor cache.
setattr(rel_obj, f.get_cache_name(), obj)
# Now populate all the non-local field values on the related
# object. If this object has deferred fields, we need to use
# the opts from the original model to get non-local fields
# correctly.
opts = rel_obj._meta
if getattr(rel_obj, '_deferred'):
opts = opts.proxy_for_model._meta
for rel_field, rel_model in opts.get_fields_with_model():
if rel_model is not None:
setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
# populate the field cache for any related object
# that has already been retrieved
if rel_field.rel:
try:
cached_obj = getattr(obj, rel_field.get_cache_name())
setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
except AttributeError:
# Related object hasn't been cached yet
pass
return obj, index_end
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None):
self.raw_query = raw_query
self.model = model
self._db = using
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def __iter__(self):
# Mapping of attrnames to row column positions. Used for constructing
# the model using kwargs, needed when not all model's fields are present
# in the query.
model_init_field_names = {}
# A list of tuples of (column name, column position). Used for
# annotation fields.
annotation_fields = []
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
need_resolv_columns = hasattr(compiler, 'resolve_columns')
query = iter(self.query)
# Find out which columns are model's fields, and which ones should be
# annotated to the model.
for pos, column in enumerate(self.columns):
if column in self.model_fields:
model_init_field_names[self.model_fields[column].attname] = pos
else:
annotation_fields.append((column, pos))
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_field_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
# All model's fields are present in the query. So, it is possible
# to use *args based model instantation. For each field of the model,
# record the query column position matching that field.
model_init_field_pos = []
for field in self.model._meta.fields:
model_init_field_pos.append(model_init_field_names[field.attname])
if need_resolv_columns:
fields = [self.model_fields.get(c, None) for c in self.columns]
# Begin looping through the query values.
for values in query:
if need_resolv_columns:
values = compiler.resolve_columns(values, fields)
# Associate fields to values
if skip:
model_init_kwargs = {}
for attname, pos in six.iteritems(model_init_field_names):
model_init_kwargs[attname] = values[pos]
instance = model_cls(**model_init_kwargs)
else:
model_init_args = [values[pos] for pos in model_init_field_pos]
instance = model_cls(*model_init_args)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
instance._state.db = db
instance._state.adding = False
yield instance
def __repr__(self):
return "<RawQuerySet: %r>" % (self.raw_query % tuple(self.params))
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model)
def using(self, alias):
"""
Selects which database this Raw QuerySet should excecute it's query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existant column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
def insert_query(model, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented. It is not
part of the public API.
"""
query = sql.InsertQuery(model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
if len(result_cache) == 0:
return # nothing to do
model = result_cache[0].__class__
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_lookups = set() # list of lookups like foo__bar__baz
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = [] # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = itertools.chain(related_lookups, auto_lookups)
for lookup in all_lookups:
if lookup in done_lookups:
# We've done exactly this already, skip the whole thing
continue
done_lookups.add(lookup)
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
attrs = lookup.split(LOOKUP_SEP)
for level, attr in enumerate(attrs):
# Prepare main instances
if len(obj_list) == 0:
break
current_lookup = LOOKUP_SEP.join(attrs[0:level+1])
if current_lookup in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[current_lookup]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogenous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(attr, first_obj.__class__.__name__, lookup))
if level == len(attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to a item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup)
if prefetcher is not None and not is_fetched:
obj_list, additional_prl = prefetch_one_level(obj_list, prefetcher, attr)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and
descriptor in followed_descriptors):
for f in additional_prl:
new_prl = LOOKUP_SEP.join([current_lookup, f])
auto_lookups.append(new_prl)
done_queries[current_lookup] = obj_list
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_query_set().
Returns a 4 tuple containing:
(the object with get_prefetch_query_set (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
attr_found = False
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
try:
rel_obj = getattr(instance, attr)
attr_found = True
except AttributeError:
pass
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_query_set() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_query_set'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_query_set'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, attname):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_query_set() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name =\
prefetcher.get_prefetch_query_set(instances)
# We have to handle the possibility that the default manager itself added
# prefetch_related lookups to the QuerySet we just got back. We don't want to
# trigger the prefetch_related functionality by evaluating the query.
# Rather, we need to merge in the prefetch_related lookups.
additional_prl = getattr(rel_qs, '_prefetch_related_lookups', [])
if additional_prl:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
# Need to assign to single cache on instance
setattr(obj, cache_name, vals[0] if vals else None)
else:
# Multi, attribute represents a manager with an .all() method that
# returns a QuerySet
qs = getattr(obj, attname).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now, since we
# have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_prl | PypiClean |
/BasiliskJS-0.8.tar.gz/BasiliskJS-0.8/README.rst |
BasiliskJS - Scriptable Headless WebKit
=========================
`BasiliskJS <https://pypi.python.org/pypi/BasiliskJS>`_ Представляет собой WebKit для python, основан на `PhantomJS <http://phantomjs.org>`_ .
Возможность
============
- **Быстрое тестирование**. Возможность быстрого тестирования без браузера!
- **Автоматизация dom**. Простой интерфейс.
- **Работа с js**. Есть возможность выполнять JavaScript, парсинг динамических страниц.
- **Захват экрана**. Возможность сделать снимок страницы любого размера.
Пример работы
-------------
Простой get запрос на https://github.com/lich666dead/BasiliskJS.
.. code-block:: python
>>> from basilisk import PhantomJS
>>> PhantomJS().get("https://github.com/lich666dead/BasiliskJS")
{'status': 'success', 'urls': ['https://github.com/lich666dead/BasiliskJS']}
Простой post запрос на https://github.com/lich666dead/BasiliskJS.
.. code-block:: python
>>> from basilisk import PhantomJS
>>> PhantomJS().post("https://github.com/lich666dead/BasiliskJS", {'post_data': 'post_data'})
{'status': 'success', 'urls': ['https://github.com/lich666dead/BasiliskJS']}
Запрос с выполнением js.
.. code-block:: python
from basilisk import PhantomJS
js = '''
var temp = {};
for (var i = 0; i != document.getElementsByClassName('nav-item-name').length; i++) {
temp[i] = document.getElementsByClassName('nav-item-name')[i].innerText;
}
return temp;
'''
bs = PhantomJS()
bs.evaluate(js)
print(bs.get("http://phantomjs.org/documentation/"))
result = {
'status': 'success',
'js': {
'0': 'Download', '1': 'Build',
'2': 'Releases', '3': 'Release Names',
'4': 'REPL', '5': 'Quick Start',
'6': 'Headless Testing', '7': 'Screen Capture',
'8': 'Network Monitoring', '9': 'Page Automation',
'10': 'Inter Process Communication', '11': 'Command Line Interface',
'12': 'Troubleshooting', '13': 'FAQ',
'14': 'Examples', '15': 'Best Practices',
'16': 'Tips and Tricks', '17': 'Supported Web Standards',
'18': 'Buzz', '19': "Who's using PhantomJS?",
'20': 'Related Projects', '21': 'Contributing',
'22': 'Source Code', '23': 'Test Suite',
'24': 'Release Preparation', '25': 'Crash Reporting',
'26': 'Bug Reporting'
},
'urls': ['http://phantomjs.org/documentation/']
}
Метод include_js позволяет ипортировать любую js библиотеку.
.. code-block:: python
from basilisk import PhantomJS
js = '''
var $loginForm = $('form#login');
$loginForm.find('input[name="username"]').value('phantomjs');
$loginForm.find('input[name="password"]').value('c45p3r');'''
bs = PhantomJS()
bs.include_js("https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js")
bs.evaluate(js)
bs.get("http://phantomjs.org/documentation/")
Показать html контент:
.. code-block:: python
>>> from basilisk import PhantomJS
>>> PhantomJS(content=True).get('http://phantomjs.org/')
Событие закрытие браузер зависит от параметра (conversion). Это количество переходов по ссылки.
Теперь можно переходить по ссылкам, этим параметром нужно пользоваться осторожно,
иначе можно вызвать зацикливание.
Пример работы с параметром:
.. code-block:: python
from basilisk import PhantomJS
js = '''
document.getElementById('projectUrl1').value = 'phantomjs.org';
document.getElementById('button1').click();'''
bs = PhantomJS(conversion=2)
bs.evaluate(js)
print(bs.get("https://altrumseo.ru/"))
result = {'status': 'success', 'js': None, 'urls': ['https://altrumseo.ru/', 'https://altrumseo.ru/analitics/']}
Как видно у нас в масиве 2 url, закрытие браузер работает на
событие, зависищие от параметра (conversion).
Например если параметра conversion=3, то выполнение просто не зациклится!
Параметры инициализатора:
-------------
- **url**. - url для get запроса.
- **content**. - Паказать content, по умолчанию( False ).
- **image_size**. - Размер изоброжения по умолчанию( {'width': 1920, 'height': 1080} ).
- **add_cookie**. - Дает возможность изменить cookie.
- **screenshot**. - Сделать скриншот, по умолчанию( False ).
- **image_name**. - Путь, название выходного изображения.
- **get_cookies**. - Получить cookies, по умолчанию( False ).
- **user_agent**. - Изменить user-agent.
- **load_images**. - Загрузка изображений на странице, по умолчанию( False ).
- **command**. - Параметр отвечает за путь к браузеру phantomjs.
- **conversion**. - Количество переходов на странице.
Развитие
-------------
На данный момент я на стадии Pre-Alpha. Вы можете увидеть сообщения об ошибках и т.д.
| PypiClean |
/OASYS1-SRW-1.1.106.tar.gz/OASYS1-SRW-1.1.106/orangecontrib/srw/widgets/native/ow_srw_me_degcoh_plotter.py | __author__ = 'labx'
from numpy import nan
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QMessageBox
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from orangecontrib.srw.util.srw_util import SRWPlot
from orangecontrib.srw.widgets.gui.ow_srw_wavefront_viewer import SRWWavefrontViewer
from orangecontrib.srw.widgets.native.util import native_util
class OWSRWDegCohPlotter(SRWWavefrontViewer):
maintainer = "Luca Rebuffi"
maintainer_email = "lrebuffi(@at@)anl.gov"
category = "Native"
keywords = ["data", "file", "load", "read"]
name = "Degree of Coherence Plot"
description = "SRW Native: Degree of Coherence Plot"
icon = "icons/degcoh.png"
priority = 4
want_main_area=1
TABS_AREA_HEIGHT = 618
calculation = Setting(0)
horizontal_cut_file_name = Setting("<file_me_degcoh>.1")
vertical_cut_file_name = Setting("<file_me_degcoh>.2")
mode = Setting(0)
is_final_screen = True
view_type = 1
last_tickets=None
def __init__(self):
super().__init__(show_automatic_box=False, show_view_box=False)
self.do_average=True
self.general_options_box.setVisible(False)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Load SRW Files", callback=self.plot_degcoh)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
gui.separator(self.controlArea)
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
self.tab_bas = oasysgui.createTabPage(self.tabs_setting, "ME Degree of Coherence Setting")
view_box_1 = oasysgui.widgetBox(self.tab_bas, "Calculation Setting", addSpace=False, orientation="vertical")
gui.comboBox(view_box_1, self, "calculation", label="M.E. Output File", items=["Mutual Intensity", "Degree of Coherence"], orientation="horizontal", callback=self.set_calculation)
self.box_1 = oasysgui.widgetBox(view_box_1, "", addSpace=False, orientation="vertical")
self.box_2 = oasysgui.widgetBox(view_box_1, "", addSpace=False, orientation="vertical")
gui.label(self.box_1, self, "Mutual Intensity Files:")
file_box = oasysgui.widgetBox(self.box_1, "", addSpace=False, orientation="horizontal")
self.le_horizontal_cut_file_name = oasysgui.lineEdit(file_box, self, "horizontal_cut_file_name", "Horizontal Cut ", labelWidth=105, valueType=str, orientation="horizontal")
gui.button(file_box, self, "...", callback=self.selectHorizontalCutFile)
file_box = oasysgui.widgetBox(self.box_1, "", addSpace=False, orientation="horizontal")
self.le_vertical_cut_file_name = oasysgui.lineEdit(file_box, self, "vertical_cut_file_name", "Vertical Cut ", labelWidth=105, valueType=str, orientation="horizontal")
gui.button(file_box, self, "...", callback=self.selectVerticalCutFile)
gui.separator(self.box_1)
gui.comboBox(self.box_1, self, "mode", label="Calculation type:", items=["by using Numpy/Scipy (Faster)", "As Original Igor Macro (Slower)"], orientation="horizontal")
gui.label(self.box_2, self, "Degree of Coherence Files:")
file_box = oasysgui.widgetBox(self.box_2, "", addSpace=False, orientation="horizontal")
self.le_horizontal_cut_file_name = oasysgui.lineEdit(file_box, self, "horizontal_cut_file_name", "Horizontal Cut ", labelWidth=105, valueType=str, orientation="horizontal")
gui.button(file_box, self, "...", callback=self.selectHorizontalCutFile)
file_box = oasysgui.widgetBox(self.box_2, "", addSpace=False, orientation="horizontal")
self.le_vertical_cut_file_name = oasysgui.lineEdit(file_box, self, "vertical_cut_file_name", "Vertical Cut ", labelWidth=105, valueType=str, orientation="horizontal")
gui.button(file_box, self, "...", callback=self.selectVerticalCutFile)
self.set_calculation()
view_box_1 = oasysgui.widgetBox(self.tab_bas, "Plot Setting", addSpace=False, orientation="vertical")
view_box_2 = oasysgui.widgetBox(view_box_1, "", addSpace=False, orientation="horizontal")
self.range_combo = gui.comboBox(view_box_2, self, "use_range", label="Plotting Range",
labelWidth=120,
items=["No", "Yes"],
callback=self.set_PlottingRange, sendSelectedValue=False, orientation="horizontal")
self.refresh_button = gui.button(view_box_2, self, "Refresh", callback=self.replot)
self.plot_range_box_1 = oasysgui.widgetBox(view_box_1, "", addSpace=False, orientation="vertical", height=50)
self.plot_range_box_2 = oasysgui.widgetBox(view_box_1, "", addSpace=False, orientation="vertical", height=50)
view_box_2 = oasysgui.widgetBox(self.plot_range_box_1, "", addSpace=False, orientation="horizontal")
oasysgui.lineEdit(view_box_2, self, "range_x_min", "Plotting Range X min [\u03bcm]", labelWidth=150, valueType=float, orientation="horizontal")
oasysgui.lineEdit(view_box_2, self, "range_x_max", "max [\u03bcm]", labelWidth=60, valueType=float, orientation="horizontal")
view_box_3 = oasysgui.widgetBox(self.plot_range_box_1, "", addSpace=False, orientation="horizontal")
oasysgui.lineEdit(view_box_3, self, "range_y_min", "Plotting Range Y min [\u03bcm]", labelWidth=150, valueType=float, orientation="horizontal")
oasysgui.lineEdit(view_box_3, self, "range_y_max", "max [\u03bcm]", labelWidth=60, valueType=float, orientation="horizontal")
self.set_PlottingRange()
def set_calculation(self):
self.box_1.setVisible(self.calculation == 0)
self.box_2.setVisible(self.calculation == 1)
def selectHorizontalCutFile(self):
self.le_horizontal_cut_file_name.setText(oasysgui.selectFileFromDialog(self, self.horizontal_cut_file_name, "Mutual Intensity Horizontal Cut File", file_extension_filter="*.1"))
def selectVerticalCutFile(self):
self.le_vertical_cut_file_name.setText(oasysgui.selectFileFromDialog(self, self.vertical_cut_file_name, "Mutual Intensity Horizontal Cut File", file_extension_filter="*.2"))
def plot_degcoh(self):
try:
self.progressBarInit()
tickets = []
if self.calculation == 0:
mode = "Igor" if self.mode == 1 else "Scipy"
sum_x, difference_x, degree_of_coherence_x = native_util.calculate_degree_of_coherence_vs_sum_and_difference_from_file(self.horizontal_cut_file_name, mode=mode)
self.progressBarSet(40)
sum_y, difference_y, degree_of_coherence_y = native_util.calculate_degree_of_coherence_vs_sum_and_difference_from_file(self.vertical_cut_file_name, mode=mode)
else:
sum_x, difference_x, degree_of_coherence_x = native_util.load_mutual_intensity_file(self.horizontal_cut_file_name)
self.progressBarSet(40)
sum_y, difference_y, degree_of_coherence_y = native_util.load_mutual_intensity_file(self.vertical_cut_file_name)
tickets.append(SRWPlot.get_ticket_2D(sum_x*1000, difference_x*1000, degree_of_coherence_x))
tickets.append(SRWPlot.get_ticket_2D(sum_y*1000, difference_y*1000, degree_of_coherence_y))
self.plot_results(tickets, progressBarValue=80)
self.last_tickets = tickets
self.progressBarFinished()
except Exception as e:
QMessageBox.critical(self, "Error", str(e), QMessageBox.Ok)
def replot(self):
if self.last_tickets is None:
self.plot_degcoh()
else:
self.progressBarInit()
self.progressBarSet(50)
self.plot_results(self.last_tickets, progressBarValue=50)
self.progressBarFinished()
def getVariablesToPlot(self):
return [[1, 2], [1, 2]]
def getWeightedPlots(self):
return [False, False]
def getWeightTickets(self):
return [nan, nan]
def getTitles(self, with_um=False):
if with_um: return ["Degree Of Coherence (H)", "Degree Of Coherence (V)"]
else: return ["Degree Of Coherence (H)", "Degree Of Coherence (V)"]
def getXTitles(self):
return ["(X\u2081 + X\u2082)/2 [\u03bcm]", "(Y\u2081 + Y\u2082)/2 [\u03bcm]"]
def getYTitles(self):
return ["(X\u2081 - X\u2082)/2 [\u03bcm]", "(Y\u2081 - Y\u2082)/2 [\u03bcm]"]
def getXUM(self):
return ["X [\u03bcm]", "X [\u03bcm]"]
def getYUM(self):
return ["Y [\u03bcm]", "Y [\u03bcm]"] | PypiClean |
/CUriTools-0.7.1.tar.gz/CUriTools-0.7.1/curitools/settings.py | import os
import time
import codecs
import re
import getpass
class MissingFileSettings(Exception):
pass
class MissingValueRequired(Exception):
pass
class Settings(object):
def __init__(self, file_path = None):
self.file_path = file_path if file_path is not None else self.find_settings_file()
self.settings_values = {"user": "required", "password" : "required", "language" : "notrequired"}
def find_settings_file(self):
uritools_dir = os.path.expanduser("~")
file_settings = os.path.join(uritools_dir, ".uri.settings")
if os.path.isfile(file_settings):
return file_settings
uritools_dir = os.path.dirname(os.path.realpath(__file__))
file_settings = os.path.join(uritools_dir, ".uri.settings")
if os.path.isfile(file_settings):
return file_settings
file_settings = os.path.join(uritools_dir,"..", ".uri.settings")
if os.path.isfile(file_settings):
return file_settings
file_settings = os.path.join(os.getcwd(),".uri.settings")
if os.path.isfile(file_settings):
return file_settings
file_settings = os.path.join(os.getcwd(),"..", ".uri.settings")
if os.path.isfile(file_settings):
return file_settings
else:
print("Nao foi encontrado um arquivo de configuracoes")
if self.create_file_settings():
uritools_dir = os.path.expanduser("~")
file_settings = os.path.join(uritools_dir, ".uri.settings")
return file_settings
return None
def create_file_settings(self):
var = input("Voce deseja criar o arquivo de configuracao:[S/N] ")
if "S" in var:
user = input("Digite o seu email: ")
password = getpass.getpass("Digite a sua senha: ")
uritools_dir = os.path.expanduser("~")
file_settings = os.path.join(uritools_dir, ".uri.settings")
if user and password:
user = "user: " + user + "\n"
password = "password: " + password
with open(file_settings, "w") as handle:
handle.write(user)
handle.write(password)
return True
else:
print("O arquivo de configuracao e necessario")
return False
def get_setting(self, setting, line):
regex_text = setting + ": (.*)"
m = re.search(regex_text, line)
if m is not None:
return m.group(1)
return None
def read_settings(self):
if self.file_path is None or not os.path.isfile(self.file_path):
raise MissingFileSettings("O arquivo de configuracao nao foi encontrado")
with open(self.file_path, "r") as handle:
text = handle.read()
return text
def extract_settings(self):
text = self.read_settings()
for setting, value in self.settings_values.items():
found = self.get_setting(setting, text)
if value == "required" and found == None:
raise MissingValueRequired("Setting %s not found on file %s" % (setting, self.file_path))
self.__dict__[setting] = found
def get_settings(self):
if hasattr(self, 'user') and hasattr(self, 'password') and self.user and self.password:
return self.user, self.password
else:
self.extract_settings()
return self.get_settings()
def get_language(self):
if hasattr(self, 'language'):
return self.language
else:
return "c++" | PypiClean |
/0x-web3-5.0.0a5.tar.gz/0x-web3-5.0.0a5/ens/utils.py | import copy
import datetime
import functools
from eth_utils import (
is_same_address,
remove_0x_prefix,
to_normalized_address,
)
import idna
from ens.constants import (
ACCEPTABLE_STALE_HOURS,
AUCTION_START_GAS_CONSTANT,
AUCTION_START_GAS_MARGINAL,
EMPTY_SHA3_BYTES,
MIN_ETH_LABEL_LENGTH,
REVERSE_REGISTRAR_DOMAIN,
)
from ens.exceptions import (
InvalidLabel,
InvalidName,
)
default = object()
def Web3():
from web3 import Web3
return Web3
def dict_copy(func):
"copy dict keyword args, to avoid modifying caller's copy"
@functools.wraps(func)
def wrapper(*args, **kwargs):
copied_kwargs = copy.deepcopy(kwargs)
return func(*args, **copied_kwargs)
return wrapper
def ensure_hex(data):
if not isinstance(data, str):
return Web3().toHex(data)
return data
def init_web3(providers=default):
from web3 import Web3
if providers is default:
w3 = Web3(ens=None)
else:
w3 = Web3(providers, ens=None)
return customize_web3(w3)
def customize_web3(w3):
from web3.middleware import make_stalecheck_middleware
w3.middleware_onion.remove('name_to_address')
w3.middleware_onion.add(
make_stalecheck_middleware(ACCEPTABLE_STALE_HOURS * 3600),
name='stalecheck',
)
return w3
def normalize_name(name):
"""
Clean the fully qualified name, as defined in ENS `EIP-137
<https://github.com/ethereum/EIPs/blob/master/EIPS/eip-137.md#name-syntax>`_
This does *not* enforce whether ``name`` is a label or fully qualified domain.
:param str name: the dot-separated ENS name
:raises InvalidName: if ``name`` has invalid syntax
"""
if not name:
return name
elif isinstance(name, (bytes, bytearray)):
name = name.decode('utf-8')
try:
return idna.decode(name, uts46=True, std3_rules=True)
except idna.IDNAError as exc:
raise InvalidName("%s is an invalid name, because %s" % (name, exc)) from exc
def is_valid_name(name):
"""
Validate whether the fully qualified name is valid, as defined in ENS `EIP-137
<https://github.com/ethereum/EIPs/blob/master/EIPS/eip-137.md#name-syntax>`_
:param str name: the dot-separated ENS name
:returns: True if ``name`` is set, and :meth:`~ens.main.ENS.nameprep` will not raise InvalidName
"""
if not name:
return False
try:
normalize_name(name)
return True
except InvalidName:
return False
def name_to_label(name, registrar):
name = normalize_name(name)
if '.' not in name:
label = name
else:
name_pieces = name.split('.')
registrar_pieces = registrar.split('.')
if len(name_pieces) != len(registrar_pieces) + 1:
raise ValueError(
"You must specify a label, like 'tickets' "
"or a fully-qualified name, like 'tickets.%s'" % registrar
)
label, *label_registrar = name_pieces
if label_registrar != registrar_pieces:
raise ValueError("This interface only manages names under .%s " % registrar)
return label
def dot_eth_label(name):
"""
Convert from a name, like 'ethfinex.eth', to a label, like 'ethfinex'
If name is already a label, this should be a noop, except for converting to a string
and validating the name syntax.
"""
label = name_to_label(name, registrar='eth')
if len(label) < MIN_ETH_LABEL_LENGTH:
raise InvalidLabel('name %r is too short' % label)
else:
return label
def to_utc_datetime(timestamp):
if timestamp:
return datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
else:
return None
def sha3_text(val):
if isinstance(val, str):
val = val.encode('utf-8')
return Web3().keccak(val)
def label_to_hash(label):
label = normalize_name(label)
if '.' in label:
raise ValueError("Cannot generate hash for label %r with a '.'" % label)
return Web3().keccak(text=label)
def normal_name_to_hash(name):
node = EMPTY_SHA3_BYTES
if name:
labels = name.split(".")
for label in reversed(labels):
labelhash = label_to_hash(label)
assert isinstance(labelhash, bytes)
assert isinstance(node, bytes)
node = Web3().keccak(node + labelhash)
return node
def raw_name_to_hash(name):
"""
Generate the namehash. This is also known as the ``node`` in ENS contracts.
In normal operation, generating the namehash is handled
behind the scenes. For advanced usage, it is a helpful utility.
This normalizes the name with `nameprep
<https://github.com/ethereum/EIPs/blob/master/EIPS/eip-137.md#name-syntax>`_
before hashing.
:param str name: ENS name to hash
:return: the namehash
:rtype: bytes
:raises InvalidName: if ``name`` has invalid syntax
"""
normalized_name = normalize_name(name)
return normal_name_to_hash(normalized_name)
def address_in(address, addresses):
return any(is_same_address(address, item) for item in addresses)
def address_to_reverse_domain(address):
lower_unprefixed_address = remove_0x_prefix(to_normalized_address(address))
return lower_unprefixed_address + '.' + REVERSE_REGISTRAR_DOMAIN
def estimate_auction_start_gas(labels):
return AUCTION_START_GAS_CONSTANT + AUCTION_START_GAS_MARGINAL * len(labels)
def assert_signer_in_modifier_kwargs(modifier_kwargs):
ERR_MSG = "You must specify the sending account"
assert len(modifier_kwargs) == 1, ERR_MSG
_modifier_type, modifier_dict = dict(modifier_kwargs).popitem()
if 'from' not in modifier_dict:
raise TypeError(ERR_MSG)
return modifier_dict['from']
def is_none_or_zero_address(addr):
return not addr or addr == '0x' + '00' * 20 | PypiClean |
/GQCMS-0.0.4-py3-none-any.whl/build/lib/build/lib/gqcms/General.py | from abc import abstractmethod
from collections import deque
import numpy as np
import scipy.sparse.linalg as sparse_linalg
import warnings
class IterativeAlgorithm:
def __init__(self, env, init_steps: list = [], steps: list = []):
self._env = env
self._init_steps = init_steps
self._steps = steps
# TODO: print algorithm
def print(self):
print(" Initialization steps:")
for i, step in enumerate(self._init_steps):
try:
print(f"\t{i+1}. {step.__name__}")
except AttributeError:
print(f"\t{i+1}. {step.func.__name__}")
print(" Iterative steps:")
for i, step in enumerate(self._steps):
try:
print(f"\t{i+1}. {step.__name__}")
except AttributeError:
print(f"\t{i+1}. {step.func.__name__}")
def add_init_step(self, step):
"""
Add a step to the init sequence
"""
self._init_steps.append(step)
def add_step(self, step):
"""
Add a step at the end of the algorithm
"""
self._steps.append(step)
def insert_init_step(self, step, position):
"""
Insert an init step at a specific position
"""
self._init_steps.insert(position, step)
def insert_step(self, position, step):
"""
Insert a step at a specific position
"""
self._steps.insert(position, step)
def add_steps(self, steps: list):
"""
Add multiple steps to the algorithm
"""
self._steps.extend(steps)
def remove_init_step(self, position):
"""
Remove the init step at the specified position
"""
del self._init_steps[position]
def remove_step(self, position):
"""
Remove the step at the specified position
"""
del self._steps[position]
def advance(self):
"""
Run one cycle
"""
for step in self._steps:
try:
step()
except TypeError:
step(self._env, self._env.system)
def _run_init(self):
"""
Perform the init functions from the init list
"""
# Run init steps
for step in self._init_steps:
try:
step()
except TypeError:
step(self._env, self._env.system)
self._env.iteration = 0
def _run(self):
"""
Run the iterative algorithm
"""
# Start loop
for _ in range(1, self._env.MAXITER):
self._env.iteration += 1
self.advance()
if self._env.bconverged:
# Break if converged
break
# No convergence reached, raise error
else:
warnings.warn("Max number of iteration reached.")
# raise Warning("Max number of iteration reached.")
def solve(self):
self._run_init()
self._run()
return self._env
class DIIS:
def __init__(self, env, P_string: str, max_size: int = None):
"""
Initialize a DIIS object
:paramm env: Environment object to store and retrive data
:param P_string: the matrix name to take from the environment where DIIS is performed on
:param max_size: maximum number of P matrices that are remembered (default is None, infinite size)
:param diis_convergence: convergence criteria of rmsd
"""
self._env = env
self._P_string = P_string
self._P_queue = deque(maxlen=max_size)
self._r_queue = deque(maxlen=max_size)
# Create a dictionairy to store previously computed overlap between residuals
self._B_dict = {}
def _add_P(self):
"""
Add the current P matrix from the environment to the queue
also commpute the residual and add it the the queue
"""
self._P_queue.append(getattr(self._env, self._P_string))
self._add_r()
def _add_r(self):
"""
Computes residual and add to queue
"""
self._r_queue.append(self.compute_residual())
def compute_rmsd(self):
return np.einsum("ij,ij->", self._r_queue[-1], self._r_queue[-1])
@abstractmethod
def compute_residual(self):
pass
@abstractmethod
def check_convergence(self):
pass
def diis_tensor(self):
"""
Create the overlap matrix and set P in the environment equal to the diis solution by solving the Pulay equation
"""
if self._env.iteration >= 1:
# Create B matrix, consists of overlap between the residual vectors
N = len(self._r_queue)
B = np.zeros((N, N))
for i in range(N):
for j in range(N):
if (i, j) in self._B_dict.keys() or (j, i) in self._B_dict.keys():
B[i][j] = self._B_dict[(i, j)]
else:
r_overlap = np.einsum(
"ij,ij->", self._r_queue[i], self._r_queue[j]
)
B[i][j] = r_overlap
self._B_dict[(i, j)] = r_overlap
self._B_dict[(j, i)] = r_overlap
last_row = -np.ones((1, N))
last_col = -np.append(np.ones((N, 1)), [[0]], 0)
B_lagrange = np.append(B, last_row, 0)
B_lagrange = np.append(B_lagrange, last_col, 1)
# Solve Pulay equation
rhs_pulay = np.append(np.zeros((N, 1)), [[-1]])
# C_pulay = linalg.inv(B_lagrange) @ rhs_pulay
C_pulay = sparse_linalg.lsmr(B_lagrange, rhs_pulay)[0]
# Create DIIS P matrix and update environment
setattr(
self._env,
self._P_string,
sum([c_p * P_i for c_p, P_i in zip(C_pulay, self._P_queue)]),
)
def diis_step(self):
# Add current P to the queue and compute residual
self._add_P()
setattr(self._env, f"rmsd_{self._P_string}", self.compute_rmsd())
# If convergence is reached stop algorithm
if self.check_convergence():
self._env.bconverged
# Create DIIS P matrix
self.diis_tensor() | PypiClean |
/Marl-Factory-Grid-0.1.2.tar.gz/Marl-Factory-Grid-0.1.2/marl_factory_grid/modules/destinations/entitites.py | from collections import defaultdict
from marl_factory_grid.environment.entity.agent import Agent
from marl_factory_grid.environment.entity.entity import Entity
from marl_factory_grid.environment import constants as c
from marl_factory_grid.environment.entity.mixin import BoundEntityMixin
from marl_factory_grid.utils.render import RenderEntity
from marl_factory_grid.modules.destinations import constants as d
class Destination(Entity):
var_can_move = False
var_can_collide = False
var_has_position = True
var_is_blocking_pos = False
var_is_blocking_light = False
@property
def any_agent_has_dwelled(self):
return bool(len(self._per_agent_times))
@property
def currently_dwelling_names(self):
return list(self._per_agent_times.keys())
@property
def encoding(self):
return d.DEST_SYMBOL
def __init__(self, *args, dwell_time: int = 0, **kwargs):
super(Destination, self).__init__(*args, **kwargs)
self.dwell_time = dwell_time
self._per_agent_times = defaultdict(lambda: dwell_time)
def do_wait_action(self, agent: Agent):
self._per_agent_times[agent.name] -= 1
return c.VALID
def leave(self, agent: Agent):
del self._per_agent_times[agent.name]
@property
def is_considered_reached(self):
agent_at_position = any(c.AGENT.lower() in x.name.lower() for x in self.tile.guests_that_can_collide)
return (agent_at_position and not self.dwell_time) or any(x == 0 for x in self._per_agent_times.values())
def agent_is_dwelling(self, agent: Agent):
return self._per_agent_times[agent.name] < self.dwell_time
def summarize_state(self) -> dict:
state_summary = super().summarize_state()
state_summary.update(per_agent_times=[
dict(belongs_to=key, time=val) for key, val in self._per_agent_times.items()], dwell_time=self.dwell_time)
return state_summary
def render(self):
return RenderEntity(d.DESTINATION, self.pos)
class BoundDestination(BoundEntityMixin, Destination):
@property
def encoding(self):
return d.DEST_SYMBOL
def __init__(self, entity, *args, **kwargs):
self.bind_to(entity)
super().__init__(*args, **kwargs)
@property
def is_considered_reached(self):
agent_at_position = any(self.bound_entity == x for x in self.tile.guests_that_can_collide)
return (agent_at_position and not self.dwell_time) \
or any(x == 0 for x in self._per_agent_times[self.bound_entity.name]) | PypiClean |
/BEAT_TEST-0.13.1.tar.gz/BEAT_TEST-0.13.1/econml/dynamic/dml/_dml.py |
import abc
import numpy as np
from warnings import warn
from sklearn.base import clone
from sklearn.model_selection import GroupKFold
from scipy.stats import norm
from sklearn.linear_model import (ElasticNetCV, LassoCV, LogisticRegressionCV)
from ...sklearn_extensions.linear_model import (StatsModelsLinearRegression, WeightedLassoCVWrapper)
from ...sklearn_extensions.model_selection import WeightedStratifiedKFold
from ...dml.dml import _FirstStageWrapper, _FinalWrapper
from ..._cate_estimator import TreatmentExpansionMixin, LinearModelFinalCateEstimatorMixin
from ..._ortho_learner import _OrthoLearner
from ...utilities import (_deprecate_positional, add_intercept,
broadcast_unit_treatments, check_high_dimensional,
cross_product, deprecated, fit_with_groups,
hstack, inverse_onehot, ndim, reshape,
reshape_treatmentwise_effects, shape, transpose,
get_feature_names_or_default, check_input_arrays,
filter_none_kwargs)
def _get_groups_period_filter(groups, n_periods):
group_counts = {}
group_period_filter = {i: [] for i in range(n_periods)}
for i, g in enumerate(groups):
if g not in group_counts:
group_counts[g] = 0
group_period_filter[group_counts[g]].append(i)
group_counts[g] += 1
return group_period_filter
class _DynamicModelNuisance:
"""
Nuisance model fits the model_y and model_t at fit time and at predict time
calculates the residual Y and residual T based on the fitted models and returns
the residuals as two nuisance parameters.
"""
def __init__(self, model_y, model_t, n_periods):
self._model_y = model_y
self._model_t = model_t
self.n_periods = n_periods
def fit(self, Y, T, X=None, W=None, sample_weight=None, groups=None):
"""Fit a series of nuisance models for each period or period pairs."""
assert Y.shape[0] % self.n_periods == 0, \
"Length of training data should be an integer multiple of time periods."
period_filters = _get_groups_period_filter(groups, self.n_periods)
self._model_y_trained = {}
self._model_t_trained = {j: {} for j in np.arange(self.n_periods)}
for t in np.arange(self.n_periods):
self._model_y_trained[t] = clone(self._model_y, safe=False).fit(
self._index_or_None(X, period_filters[t]),
self._index_or_None(
W, period_filters[t]),
Y[period_filters[self.n_periods - 1]])
for j in np.arange(t, self.n_periods):
self._model_t_trained[j][t] = clone(self._model_t, safe=False).fit(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]),
T[period_filters[j]])
return self
def predict(self, Y, T, X=None, W=None, sample_weight=None, groups=None):
"""Calculate nuisances for each period or period pairs.
Returns
-------
Y_res : (n, d_y) matrix or vector of length n
Y residuals for each period in panel format.
This shape is required for _OrthoLearner's crossfitting.
T_res : (n, d_t, n_periods) matrix
T residuals for pairs of periods (t, j), where the data is in panel format for t
and in index form for j. For example, the residuals for (t, j) can be retrieved via
T_res[np.arange(n) % n_periods == t, ..., j]. For t < j, the entries of this
matrix are np.nan.
This shape is required for _OrthoLearner's crossfitting.
"""
assert Y.shape[0] % self.n_periods == 0, \
"Length of training data should be an integer multiple of time periods."
period_filters = _get_groups_period_filter(groups, self.n_periods)
Y_res = np.full(Y.shape, np.nan)
T_res = np.full(T.shape + (self.n_periods, ), np.nan)
shape_formatter = self._get_shape_formatter(X, W)
for t in np.arange(self.n_periods):
Y_slice = Y[period_filters[self.n_periods - 1]]
Y_pred = self._model_y_trained[t].predict(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]))
Y_res[period_filters[t]] = Y_slice\
- shape_formatter(Y_slice, Y_pred)
for j in np.arange(t, self.n_periods):
T_slice = T[period_filters[j]]
T_pred = self._model_t_trained[j][t].predict(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]))
T_res[period_filters[j], ..., t] = T_slice\
- shape_formatter(T_slice, T_pred)
return Y_res, T_res
def score(self, Y, T, X=None, W=None, sample_weight=None, groups=None):
assert Y.shape[0] % self.n_periods == 0, \
"Length of training data should be an integer multiple of time periods."
period_filters = _get_groups_period_filter(groups, self.n_periods)
if hasattr(self._model_y, 'score'):
Y_score = np.full((self.n_periods, ), np.nan)
for t in np.arange(self.n_periods):
Y_score[t] = self._model_y_trained[t].score(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]),
Y[period_filters[self.n_periods - 1]])
else:
Y_score = None
if hasattr(self._model_t, 'score'):
T_score = np.full((self.n_periods, self.n_periods), np.nan)
for t in np.arange(self.n_periods):
for j in np.arange(t, self.n_periods):
T_score[j][t] = self._model_t_trained[j][t].score(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]),
T[period_filters[j]])
else:
T_score = None
return Y_score, T_score
def _get_shape_formatter(self, X, W):
if (X is None) and (W is None):
return lambda x, x_pred: np.tile(x_pred.reshape(1, -1), (x.shape[0], 1)).reshape(x.shape)
return lambda x, x_pred: x_pred.reshape(x.shape)
def _index_or_None(self, X, filter_idx):
return None if X is None else X[filter_idx]
class _DynamicModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - E[Y | X, W] = \\theta(X) \\cdot (T - E[T | X, W]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
Assumes model final is parametric with no intercept.
"""
# TODO: update docs
def __init__(self, model_final, n_periods):
self._model_final = model_final
self.n_periods = n_periods
self._model_final_trained = {k: clone(self._model_final, safe=False) for k in np.arange(n_periods)}
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None, groups=None):
# NOTE: sample weight, sample var are not passed in
period_filters = _get_groups_period_filter(groups, self.n_periods)
Y_res, T_res = nuisances
self._d_y = Y.shape[1:]
for t in np.arange(self.n_periods - 1, -1, -1):
Y_adj = Y_res[period_filters[t]].copy()
if t < self.n_periods - 1:
Y_adj -= np.sum(
[self._model_final_trained[j].predict_with_res(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[j], ..., t]
) for j in np.arange(t + 1, self.n_periods)], axis=0)
self._model_final_trained[t].fit(
X[period_filters[0]] if X is not None else None, T[period_filters[t]],
T_res[period_filters[t], ..., t], Y_adj)
return self
def predict(self, X=None):
"""
Return shape: m x dy x (p*dt)
"""
d_t_tuple = self._model_final_trained[0]._d_t
d_t = d_t_tuple[0] if d_t_tuple else 1
x_dy_shape = (X.shape[0] if X is not None else 1, ) + \
self._model_final_trained[0]._d_y
preds = np.zeros(
x_dy_shape +
(self.n_periods * d_t, )
)
for t in range(self.n_periods):
preds[..., t * d_t: (t + 1) * d_t] = \
self._model_final_trained[t].predict(X).reshape(
x_dy_shape + (d_t, )
)
return preds
def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None, groups=None):
assert Y.shape[0] % self.n_periods == 0, \
"Length of training data should be an integer multiple of time periods."
Y_res, T_res = nuisances
scores = np.full((self.n_periods, ), np.nan)
period_filters = _get_groups_period_filter(groups, self.n_periods)
for t in np.arange(self.n_periods - 1, -1, -1):
Y_adj = Y_res[period_filters[t]].copy()
if t < self.n_periods - 1:
Y_adj -= np.sum(
[self._model_final_trained[j].predict_with_res(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[j], ..., t]
) for j in np.arange(t + 1, self.n_periods)], axis=0)
Y_adj_pred = self._model_final_trained[t].predict_with_res(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[t], ..., t])
if sample_weight is not None:
scores[t] = np.mean(np.average((Y_adj - Y_adj_pred)**2, weights=sample_weight, axis=0))
else:
scores[t] = np.mean((Y_adj - Y_adj_pred) ** 2)
return scores
class _LinearDynamicModelFinal(_DynamicModelFinal):
"""Wrapper for the DynamicModelFinal with StatsModelsLinearRegression final model.
The final model is a linear model with (d_t*n_periods) coefficients.
This model is defined after the coefficients and covariance are calculated.
"""
def __init__(self, model_final, n_periods):
super().__init__(model_final, n_periods)
self.model_final_ = StatsModelsLinearRegression(fit_intercept=False)
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None, groups=None):
super().fit(Y, T, X=X, W=W, Z=Z, nuisances=nuisances,
sample_weight=sample_weight, sample_var=sample_var, groups=groups)
# Compose final model
cov = self._get_cov(nuisances, X, groups)
coef = self._get_coef_()
self.model_final_._n_out = self._d_y[0] if self._d_y else 0
self.model_final_._param_var = cov / (Y.shape[0] / self.n_periods)
self.model_final_._param = coef.T if self.model_final_._n_out else coef
def _get_coef_(self):
period_coefs = np.array([self._model_final_trained[t]._model.coef_ for t in range(self.n_periods)])
if self._d_y:
return np.array([
np.array([period_coefs[k, i, :] for k in range(self.n_periods)]).flatten()
for i in range(self._d_y[0])
])
return period_coefs.flatten()
def _get_cov(self, nuisances, X, groups):
if self._d_y:
return np.array(
[self._fit_single_output_cov((nuisances[0][:, i], nuisances[1]), X, i, groups)
for i in range(self._d_y[0])]
)
return self._fit_single_output_cov(nuisances, X, -1, groups)
def _fit_single_output_cov(self, nuisances, X, y_index, groups):
""" Calculates the covariance (n_periods*n_treatments)
x (n_periods*n_treatments) matrix for a single outcome.
"""
Y_res, T_res = nuisances
# Calculate auxiliary quantities
period_filters = _get_groups_period_filter(groups, self.n_periods)
# X ⨂ T_res
XT_res = np.array([
[
self._model_final_trained[0]._combine(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[t], ..., j],
fitting=False
)
for j in range(self.n_periods)
]
for t in range(self.n_periods)
])
d_xt = XT_res.shape[-1]
# sum(model_final.predict(X, T_res))
Y_diff = np.array([
np.sum([
self._model_final_trained[j].predict_with_res(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[j], ..., t]
) for j in np.arange(t, self.n_periods)],
axis=0
)
for t in np.arange(self.n_periods)
])
J = np.zeros((self.n_periods * d_xt,
self.n_periods * d_xt))
Sigma = np.zeros((self.n_periods * d_xt,
self.n_periods * d_xt))
for t in np.arange(self.n_periods):
res_epsilon_t = (Y_res[period_filters[t]] -
(Y_diff[t][:, y_index] if y_index >= 0 else Y_diff[t])
).reshape(-1, 1, 1)
resT_t = XT_res[t][t]
for j in np.arange(self.n_periods):
# Calculating the (t, j) block entry (of size n_treatments x n_treatments) of matrix Sigma
res_epsilon_j = (Y_res[period_filters[j]] -
(Y_diff[j][:, y_index] if y_index >= 0 else Y_diff[j])
).reshape(-1, 1, 1)
resT_j = XT_res[j][j]
cov_resT_tj = resT_t.reshape(-1, d_xt, 1) @ resT_j.reshape(-1, 1, d_xt)
sigma_tj = np.mean((res_epsilon_t * res_epsilon_j) * cov_resT_tj, axis=0)
Sigma[t * d_xt:(t + 1) * d_xt,
j * d_xt:(j + 1) * d_xt] = sigma_tj
if j >= t:
# Calculating the (t, j) block entry (of size n_treatments x n_treatments) of matrix J
m_tj = np.mean(
XT_res[j][t].reshape(-1, d_xt, 1) @ resT_t.reshape(-1, 1, d_xt),
axis=0)
J[t * d_xt:(t + 1) * d_xt,
j * d_xt:(j + 1) * d_xt] = m_tj
return np.linalg.inv(J) @ Sigma @ np.linalg.inv(J).T
class _DynamicFinalWrapper(_FinalWrapper):
def predict_with_res(self, X, T_res):
fts = self._combine(X, T_res, fitting=False)
prediction = self._model.predict(fts)
if self._intercept is not None:
prediction -= self._intercept
return reshape(prediction, (prediction.shape[0],) + self._d_y)
class DynamicDML(LinearModelFinalCateEstimatorMixin, _OrthoLearner):
"""CATE estimator for dynamic treatment effect estimation.
This estimator is an extension of the Double ML approach for treatments assigned sequentially
over time periods.
The estimator is a special case of an :class:`_OrthoLearner` estimator, so it follows the two
stage process, where a set of nuisance functions are estimated in the first stage in a crossfitting
manner and a final stage estimates the CATE model. See the documentation of
:class:`._OrthoLearner` for a description of this two stage process.
Parameters
----------
model_y: estimator or 'auto', optional (default is 'auto')
The estimator for fitting the response to the features. Must implement
`fit` and `predict` methods.
If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.
model_t: estimator or 'auto', optional (default is 'auto')
The estimator for fitting the treatment to the features.
If estimator, it must implement `fit` and `predict` methods;
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
featurizer : :term:`transformer`, optional, default None
Must support fit_transform and transform. Used to create composite features in the final CATE regression.
It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).
If featurizer=None, then CATE is trained on X.
fit_cate_intercept : bool, optional, default True
Whether the linear CATE model should have a constant term.
linear_first_stages: bool
Whether the first stage models are linear (in which case we will expand the features passed to
`model_y` accordingly)
discrete_treatment: bool, optional (default is ``False``)
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
cv: int, cross-validation generator or an iterable, optional (Default=2)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
Iterables should make sure a group belongs to a single split.
For integer/None inputs, :class:`~sklearn.model_selection.GroupKFold` is used
Unless an iterable is used, we call `split(X, T, groups)` to generate the splits.
mc_iters: int, optional (default=None)
The number of times to rerun the first stage models to reduce the variance of the nuisances.
mc_agg: {'mean', 'median'}, optional (default='mean')
How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of
cross-fitting.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
Examples
--------
A simple example with default models:
.. testcode::
:hide:
import numpy as np
np.set_printoptions(suppress=True)
.. testcode::
from econml.dynamic.dml import DynamicDML
np.random.seed(123)
n_panels = 100 # number of panels
n_periods = 3 # number of time periods per panel
n = n_panels * n_periods
groups = np.repeat(a=np.arange(n_panels), repeats=n_periods, axis=0)
X = np.random.normal(size=(n, 1))
T = np.random.normal(size=(n, 2))
y = np.random.normal(size=(n, ))
est = DynamicDML()
est.fit(y, T, X=X, W=None, groups=groups, inference="auto")
>>> est.const_marginal_effect(X[:2])
array([[-0.336..., -0.048..., -0.061..., 0.042..., -0.204...,
0.00667271],
[-0.101..., 0.433..., 0.054..., -0.217..., -0.101...,
-0.159...]])
>>> est.effect(X[:2], T0=0, T1=1)
array([-0.601..., -0.091...])
>>> est.effect(X[:2], T0=np.zeros((2, n_periods*T.shape[1])), T1=np.ones((2, n_periods*T.shape[1])))
array([-0.601..., -0.091...])
>>> est.coef_
array([[ 0.112...],
[ 0.231...],
[ 0.055...],
[-0.125...],
[ 0.049...],
[-0.079...]])
>>> est.coef__interval()
(array([[-0.063...],
[-0.009...],
[-0.114...],
[-0.413...],
[-0.117...],
[-0.262...]]), array([[0.289...],
[0.471...],
[0.225...],
[0.163...],
[0.216...],
[0.103...]]))
"""
def __init__(self, *,
model_y='auto', model_t='auto',
featurizer=None,
fit_cate_intercept=True,
linear_first_stages=False,
discrete_treatment=False,
categories='auto',
cv=2,
mc_iters=None,
mc_agg='mean',
random_state=None):
self.fit_cate_intercept = fit_cate_intercept
self.linear_first_stages = linear_first_stages
self.featurizer = clone(featurizer, safe=False)
self.model_y = clone(model_y, safe=False)
self.model_t = clone(model_t, safe=False)
super().__init__(discrete_treatment=discrete_treatment,
discrete_instrument=False,
categories=categories,
cv=GroupKFold(cv) if isinstance(cv, int) else cv,
mc_iters=mc_iters,
mc_agg=mc_agg,
random_state=random_state)
def _gen_featurizer(self):
return clone(self.featurizer, safe=False)
def _gen_model_y(self):
if self.model_y == 'auto':
model_y = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_y = clone(self.model_y, safe=False)
return _FirstStageWrapper(model_y, True, self._gen_featurizer(),
self.linear_first_stages, self.discrete_treatment)
def _gen_model_t(self):
if self.model_t == 'auto':
if self.discrete_treatment:
model_t = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t = clone(self.model_t, safe=False)
return _FirstStageWrapper(model_t, False, self._gen_featurizer(),
self.linear_first_stages, self.discrete_treatment)
def _gen_model_final(self):
return StatsModelsLinearRegression(fit_intercept=False)
def _gen_ortho_learner_model_nuisance(self, n_periods):
return _DynamicModelNuisance(
model_t=self._gen_model_t(),
model_y=self._gen_model_y(),
n_periods=n_periods)
def _gen_ortho_learner_model_final(self, n_periods):
wrapped_final_model = _DynamicFinalWrapper(
StatsModelsLinearRegression(fit_intercept=False),
fit_cate_intercept=self.fit_cate_intercept,
featurizer=self.featurizer,
use_weight_trick=False)
return _LinearDynamicModelFinal(wrapped_final_model, n_periods=n_periods)
def _prefit(self, Y, T, *args, groups=None, only_final=False, **kwargs):
u_periods = np.unique(np.unique(groups, return_counts=True)[1])
if len(u_periods) > 1:
raise AttributeError(
"Imbalanced panel. Method currently expects only panels with equal number of periods. Pad your data")
self._n_periods = u_periods[0]
# generate an instance of the final model
self._ortho_learner_model_final = self._gen_ortho_learner_model_final(self._n_periods)
if not only_final:
# generate an instance of the nuisance model
self._ortho_learner_model_nuisance = self._gen_ortho_learner_model_nuisance(self._n_periods)
TreatmentExpansionMixin._prefit(self, Y, T, *args, **kwargs)
def _postfit(self, Y, T, *args, **kwargs):
super()._postfit(Y, T, *args, **kwargs)
# Set _d_t to effective number of treatments
self._d_t = (self._n_periods * self._d_t[0], ) if self._d_t else (self._n_periods, )
def _strata(self, Y, T, X=None, W=None, Z=None,
sample_weight=None, sample_var=None, groups=None,
cache_values=False, only_final=False, check_input=True):
# Required for bootstrap inference
return groups
def fit(self, Y, T, *, X=None, W=None, sample_weight=None, sample_var=None, groups,
cache_values=False, inference='auto'):
"""Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
The input data must contain groups with the same size corresponding to the number
of time periods the treatments were assigned over.
The data should be preferably in panel format, with groups clustered together.
If group members do not appear together, the following is assumed:
* the first instance of a group in the dataset is assumed to correspond to the first period of that group
* the second instance of a group in the dataset is assumed to correspond to the
second period of that group
...etc.
Only the value of the features X at the first period of each unit are used for
heterogeneity. The value of X in subseuqnet periods is used as a time-varying control
but not for heterogeneity.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample (required: n = n_groups * n_periods)
T: (n, d_t) matrix or vector of length n
Treatments for each sample (required: n = n_groups * n_periods)
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample (Required: n = n_groups * n_periods). Only first
period features from each unit are used for heterogeneity, the rest are
used as time-varying controls together with W
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample (Required: n = n_groups * n_periods)
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
sample_var: optional(n,) vector or None (Default=None)
Sample variance for each sample
groups: (n,) vector, required
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the `cv` argument passed to this class's initializer
must support a 'groups' argument to its split method.
cache_values: bool, default False
Whether to cache inputs and first stage results, which will allow refitting a different final model
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`) and 'auto'
(or an instance of :class:`.LinearModelFinalInference`).
Returns
-------
self: DynamicDML instance
"""
if sample_weight is not None or sample_var is not None:
warn("This CATE estimator does not yet support sample weights and sample variance. "
"These inputs will be ignored during fitting.",
UserWarning)
return super().fit(Y, T, X=X, W=W,
sample_weight=None, sample_var=None, groups=groups,
cache_values=cache_values,
inference=inference)
def score(self, Y, T, X=None, W=None, sample_weight=None, *, groups):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted residual nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds.
Then calculates the MSE of the final residual Y on residual T regression.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample (required: n = n_groups * n_periods)
T: (n, d_t) matrix or vector of length n
Treatments for each sample (required: n = n_groups * n_periods)
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample (Required: n = n_groups * n_periods)
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample (Required: n = n_groups * n_periods)
groups: (n,) vector, required
All rows corresponding to the same group will be kept together during splitting.
Returns
-------
score: float
The MSE of the final CATE model on the new data.
"""
if not hasattr(self._ortho_learner_model_final, 'score'):
raise AttributeError("Final model does not have a score method!")
Y, T, X, W, groups = check_input_arrays(Y, T, X, W, groups)
self._check_fitted_dims(X)
X, T = super()._expand_treatments(X, T)
n_iters = len(self._models_nuisance)
n_splits = len(self._models_nuisance[0])
# for each mc iteration
for i, models_nuisances in enumerate(self._models_nuisance):
# for each model under cross fit setting
for j, mdl in enumerate(models_nuisances):
nuisance_temp = mdl.predict(Y, T, **filter_none_kwargs(X=X, W=W, groups=groups))
if not isinstance(nuisance_temp, tuple):
nuisance_temp = (nuisance_temp,)
if i == 0 and j == 0:
nuisances = [np.zeros((n_iters * n_splits,) + nuis.shape) for nuis in nuisance_temp]
for it, nuis in enumerate(nuisance_temp):
nuisances[it][i * n_iters + j] = nuis
for it in range(len(nuisances)):
nuisances[it] = np.mean(nuisances[it], axis=0)
return self._ortho_learner_model_final.score(Y, T, nuisances=nuisances,
**filter_none_kwargs(X=X, W=W,
sample_weight=sample_weight, groups=groups))
def cate_treatment_names(self, treatment_names=None):
"""
Get treatment names for each time period.
If the treatment is discrete, it will return expanded treatment names.
Parameters
----------
treatment_names: list of strings of length T.shape[1] or None
The names of the treatments. If None and the T passed to fit was a dataframe,
it defaults to the column names from the dataframe.
Returns
-------
out_treatment_names: list of strings
Returns (possibly expanded) treatment names.
"""
slice_treatment_names = super().cate_treatment_names(treatment_names)
treatment_names_out = []
for k in range(self._n_periods):
treatment_names_out += [f"({t})$_{k}$" for t in slice_treatment_names]
return treatment_names_out
def cate_feature_names(self, feature_names=None):
"""
Get the output feature names.
Parameters
----------
feature_names: list of strings of length X.shape[1] or None
The names of the input features. If None and X is a dataframe, it defaults to the column names
from the dataframe.
Returns
-------
out_feature_names: list of strings or None
The names of the output features :math:`\\phi(X)`, i.e. the features with respect to which the
final constant marginal CATE model is linear. It is the names of the features that are associated
with each entry of the :meth:`coef_` parameter. Not available when the featurizer is not None and
does not have a method: `get_feature_names(feature_names)`. Otherwise None is returned.
"""
if self._d_x is None:
# Handles the corner case when X=None but featurizer might be not None
return None
if feature_names is None:
feature_names = self._input_names["feature_names"]
if self.original_featurizer is None:
return feature_names
return get_feature_names_or_default(self.original_featurizer, feature_names)
def _expand_treatments(self, X, *Ts):
# Expand treatments for each time period
outTs = []
base_expand_treatments = super()._expand_treatments
for T in Ts:
if ndim(T) == 0:
one_T = base_expand_treatments(X, T)[1]
one_T = one_T.reshape(-1, 1) if ndim(one_T) == 1 else one_T
T = np.tile(one_T, (1, self._n_periods, ))
else:
assert (T.shape[1] == self._n_periods if self.transformer else T.shape[1] == self._d_t[0]), \
f"Expected a list of time period * d_t, instead got a treatment array of shape {T.shape}."
if self.transformer:
T = np.hstack([
base_expand_treatments(
X, T[:, [t]])[1] for t in range(self._n_periods)
])
outTs.append(T)
return (X,) + tuple(outTs)
@property
def bias_part_of_coef(self):
return self.ortho_learner_model_final_._model_final._fit_cate_intercept
@property
def fit_cate_intercept_(self):
return self.ortho_learner_model_final_._model_final._fit_cate_intercept
@property
def original_featurizer(self):
# NOTE: important to use the _ortho_learner_model_final_ attribute instead of the
# attribute so that the trained featurizer will be passed through
return self.ortho_learner_model_final_._model_final_trained[0]._original_featurizer
@property
def featurizer_(self):
# NOTE This is used by the inference methods and has to be the overall featurizer. intended
# for internal use by the library
return self.ortho_learner_model_final_._model_final_trained[0]._featurizer
@property
def model_final_(self):
# NOTE This is used by the inference methods and is more for internal use to the library
# We need to use the _ortho_learner's copy to retain the information from fitting
return self.ortho_learner_model_final_.model_final_
@property
def model_final(self):
return self._gen_model_final()
@model_final.setter
def model_final(self, model):
if model is not None:
raise ValueError("Parameter `model_final` cannot be altered for this estimator!")
@property
def models_y(self):
return [[mdl._model_y for mdl in mdls] for mdls in super().models_nuisance_]
@property
def models_t(self):
return [[mdl._model_t for mdl in mdls] for mdls in super().models_nuisance_]
@property
def nuisance_scores_y(self):
return self.nuisance_scores_[0]
@property
def nuisance_scores_t(self):
return self.nuisance_scores_[1]
@property
def residuals_(self):
"""
A tuple (y_res, T_res, X, W), of the residuals from the first stage estimation
along with the associated X and W. Samples are not guaranteed to be in the same
order as the input order.
"""
if not hasattr(self, '_cached_values'):
raise AttributeError("Estimator is not fitted yet!")
if self._cached_values is None:
raise AttributeError("`fit` was called with `cache_values=False`. "
"Set to `True` to enable residual storage.")
Y_res, T_res = self._cached_values.nuisances
return Y_res, T_res, self._cached_values.X, self._cached_values.W | PypiClean |
/Flask-TinyMCE-1.0.0.tar.gz/Flask-TinyMCE-1.0.0/flask_tinymce/static/plugins/insertdatetime/plugin.min.js | !function(){"use strict";function l(e){return e.getParam("insertdatetime_timeformat",e.translate("%H:%M:%S"))}function s(e){return e.getParam("insertdatetime_formats",["%H:%M:%S","%Y-%m-%d","%I:%M:%S %p","%D"])}function r(e,t){if((e=""+e).length<t)for(var n=0;n<t-e.length;n++)e="0"+e;return e}function d(e,t,n){return void 0===n&&(n=new Date),(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=(t=t.replace("%D","%m/%d/%Y")).replace("%r","%I:%M:%S %p")).replace("%Y",""+n.getFullYear())).replace("%y",""+n.getYear())).replace("%m",r(n.getMonth()+1,2))).replace("%d",r(n.getDate(),2))).replace("%H",""+r(n.getHours(),2))).replace("%M",""+r(n.getMinutes(),2))).replace("%S",""+r(n.getSeconds(),2))).replace("%I",""+((n.getHours()+11)%12+1))).replace("%p",n.getHours()<12?"AM":"PM")).replace("%B",""+e.translate(u[n.getMonth()]))).replace("%b",""+e.translate(o[n.getMonth()]))).replace("%A",""+e.translate(i[n.getDay()]))).replace("%a",""+e.translate(a[n.getDay()]))).replace("%%","%")}function p(e,t){var n,r,a,i,o,u;e.getParam("insertdatetime_element",!1)?(n=d(e,t),r=/%[HMSIp]/.test(t)?d(e,"%Y-%m-%dT%H:%M"):d(e,"%Y-%m-%d"),(a=e.dom.getParent(e.selection.getStart(),"time"))?(o=a,u=(i=e).dom.create("time",{datetime:r},n),o.parentNode.insertBefore(u,o),i.dom.remove(o),i.selection.select(u,!0),i.selection.collapse(!1)):e.insertContent('<time datetime="'+r+'">'+n+"</time>")):e.insertContent(d(e,t))}var e=tinymce.util.Tools.resolve("tinymce.PluginManager"),a="Sun Mon Tue Wed Thu Fri Sat Sun".split(" "),i="Sunday Monday Tuesday Wednesday Thursday Friday Saturday Sunday".split(" "),o="Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split(" "),u="January February March April May June July August September October November December".split(" "),g=tinymce.util.Tools.resolve("tinymce.util.Tools");e.add("insertdatetime",function(e){var n,r,t,a,i,o,u,c;function m(e){return r.execCommand("mceInsertDate",!1,e)}(n=e).addCommand("mceInsertDate",function(e,t){p(n,null!=t?t:n.getParam("insertdatetime_dateformat",n.translate("%Y-%m-%d")))}),n.addCommand("mceInsertTime",function(e,t){p(n,null!=t?t:l(n))}),u=s(r=e),t=0<(o=s(i=r)).length?o[0]:l(i),a=t,c={get:function(){return a},set:function(e){a=e}},r.ui.registry.addSplitButton("insertdatetime",{icon:"insert-time",tooltip:"Insert date/time",select:function(e){return e===c.get()},fetch:function(e){e(g.map(u,function(e){return{type:"choiceitem",text:d(r,e),value:e}}))},onAction:function(e){m(c.get())},onItemAction:function(e,t){c.set(t),m(t)}}),r.ui.registry.addNestedMenuItem("insertdatetime",{icon:"insert-time",text:"Date/time",getSubmenuItems:function(){return g.map(u,function(e){return{type:"menuitem",text:d(r,e),onAction:(t=e,function(){c.set(t),m(t)})};var t})}})})}(); | PypiClean |
/functions/structural_holes/HAM.py | __all__ = [
"get_structural_holes_HAM"
]
import sys
import numpy as np
import json, os
import scipy.sparse as sps
import scipy.linalg as spl
from sklearn import metrics
from scipy.cluster.vq import kmeans, vq, kmeans2
from collections import Counter
eps=2.220446049250313e-16
import scipy.stats as stat
def sym(w):
'''
Initialize a random orthogonal matrix F = w * (wT * w)^ (-1/2)
Parameters
----------
w : A random matrix.
Returns
-------
F : a random orthogonal matrix.
'''
return w.dot(spl.inv(spl.sqrtm(w.T.dot(w))))
def avg_entropy(predicted_labels, actual_labels):
'''
Calculate the average entropy between predicted_labels and actual_labels.
Parameters
----------
predicted_labels : a Ndarray of predicted_labels.
actual_labels : a Ndarray of actual_labels.
Returns
-------
A float of average entropy.
'''
actual_labels_dict = {}
predicted_labels_dict = {}
for label in np.unique(actual_labels):
actual_labels_dict[label] = np.nonzero(actual_labels==label)[0]
for label in np.unique(predicted_labels):
predicted_labels_dict[label] = np.nonzero(predicted_labels==label)[0]
avg_value = 0
N = len(predicted_labels)
# store entropy for each community
for label, items in predicted_labels_dict.items():
N_i = float(len(items))
p_i = []
for label2, items2 in actual_labels_dict.items():
common = set(items.tolist()).intersection(set(items2.tolist()))
p_ij = float(len(common))/ N_i
p_i.append(p_ij)
entropy_i = stat.entropy(p_i)
avg_value += entropy_i * (N_i / float(N))
return avg_value
def load_adj_matrix(G):
'''
Transfer the graph into sparse matrix.
Parameters
----------
G : graph
An undirected graph.
Returns
-------
A : A sparse matrix A
'''
listE = []
for edge in G.edges:
listE.append(edge[0]-1)
listE.append(edge[1]-1)
# listE.append(edge[0])
# listE.append(edge[1])
adj_tuples = np.array(listE).reshape(-1,2)
n = len(np.unique(adj_tuples))
vals = np.array([1] * len(G.edges))
max_id = max(max(adj_tuples[:, 0]), max(adj_tuples[:, 1])) + 1
# print(vals)
# print(n)
# print(adj_tuples)
# print(max_id)
A = sps.csr_matrix((vals, (adj_tuples[:, 0], adj_tuples[:, 1])), shape=(max_id, max_id))
A = A + A.T
# print(A)
return sps.csr_matrix(A)
def majority_voting(votes):
'''
majority voting.
Parameters
----------
votes : a Ndarray of votes
Returns
-------
the most common label.
'''
C = Counter(votes)
pairs = C.most_common(2)
if len(pairs)==0:
return 0
if pairs[0][0] > 0:
return pairs[0][0]
elif len(pairs)>1:
return pairs[1][0]
else:
return 0
def label_by_neighbors(AdjMat,labels):
'''
classifify SHS using majority voting.
Parameters
----------
AdjMat : adjacency matrix
labels : a Ndarray of labeled communities of the nodes.
Returns
-------
labels : a Ndarray of labeled communities of the nodes.
'''
assert (AdjMat.shape[0] == len(labels)), "dimensions are not equal"
# print labels
# print(labels)
unlabeled_idx = (labels==0)
num_unlabeled = sum(unlabeled_idx)
count = 0
while num_unlabeled > 0:
# print(num_unlabeled)
idxs = np.array(np.nonzero(unlabeled_idx)[0])
# print(idxs)
next_labels = np.zeros(len(labels))
for idx in idxs:
neighbors = np.nonzero(AdjMat[idx,:] > 0)[1]
# print(neighbors)
if len(neighbors)==0:
next_labels[idx] = majority_voting(labels)
# print(next_labels)
else :
neighbor_labels = labels[neighbors]
# print idx, neighbors, neighbor_labels
next_labels[idx] = majority_voting(neighbor_labels)
labels[idxs] = next_labels[idxs]
unlabeled_idx = (labels==0)
num_unlabeled = sum(unlabeled_idx)
# print num_unlabeled
return labels
def get_structural_holes_HAM(G, k, c,ground_truth_labels):
'''
using HAM to jointly detect SHS and communities.
https://dl.acm.org/doi/10.1145/2939672.2939807
Parameters
----------
G : graph
An undirected graph.
k : int
top - k structural hole spanners
c : the number of communities
ground_truth_labels : ground truth labels of nodes.
Returns
-------
a Ndarray of top k nodes as structural hole spanners, and a Ndarray of labeled communities of the nodes.
'''
A_mat = load_adj_matrix(G)
A = A_mat # adjacency matrix
n = A.shape[0] # the number of nodes
epsilon = 1e-4 # smoothing value: epsilon
max_iter = 50 # maximum iteration value
seeeed = 5433
np.random.seed(seeeed)
# print(n,c)
topk = k
# invD = sps.diags(np.array(A.sum(axis=0))[0, :] ** (-1.0), 0) # Inv of degree matrix D^-1
invD = sps.diags((np.array(A.sum(axis=0))[0, :]+eps) ** (-1.0), 0) # Inv of degree matrix D^-1
L = (sps.identity(n) - invD.dot(A)).tocsr() # Laplacian matrix L = I - D^-1 * A
F = sym(np.random.random((n, c))) # Initialize a random orthogonal matrix F
# Algorithm 1
for step in range(max_iter):
Q = sps.identity(n).tocsr()
P = L.dot(F)
for i in range(n):
Q[i, i] = 0.5 / (spl.norm(P[i, :]) + epsilon)
R = L.T.dot(Q).dot(L)
W, V = np.linalg.eigh(R.todense())
Wsort = np.argsort(W) # sort from smallest to largest
F = V[:, Wsort[0:c]] # select the smallest eigenvectors
# find SH spanner
SH = np.zeros((n,))
for i in range(n):
SH[i] = np.linalg.norm(F[i, :])
SHrank = np.argsort(SH) # index of SH
# print(SHrank[0:topk]+1) # the index starts from 1.
# METRICS BEGIN
to_keep_index = np.sort(SHrank[topk:])
A_temp = A[to_keep_index, :]
A_temp = A_temp[:, to_keep_index]
HAM_labels_keep = np.asarray(ground_truth_labels)[to_keep_index]
allLabels = np.asarray(ground_truth_labels)
cluster_matrix = F
labelbook, distortion = kmeans(cluster_matrix[to_keep_index, :], c)
HAM_labels, dist = vq(cluster_matrix[to_keep_index, :], labelbook)
print("AMI")
print('HAM: ' + str(metrics.adjusted_mutual_info_score(HAM_labels, HAM_labels_keep.T[0])))
# classifify SHS using majority voting
predLabels = np.zeros(len(ground_truth_labels))
predLabels[to_keep_index] = HAM_labels + 1
# print(predLabels)
HAM_predLabels = label_by_neighbors(A, predLabels)
# print(HAM_predLabels)
print('HAM_all: ' + str(metrics.adjusted_mutual_info_score(HAM_predLabels, allLabels.T[0])))
print("NMI")
print('HAM: ' + str(metrics.normalized_mutual_info_score(HAM_labels, HAM_labels_keep.T[0])))
print('HAM_all: ' + str(metrics.normalized_mutual_info_score(HAM_predLabels, allLabels.T[0])))
print("Entropy")
print('HAM: ' + str(avg_entropy(HAM_labels, HAM_labels_keep.T[0])))
print('HAM_all: ' + str(avg_entropy(HAM_predLabels, allLabels.T[0])))
# METRICS END
return SHrank[0:topk]+1, HAM_predLabels
if __name__ == "__main__":
sys.path.append('../../../')
import ONAP as og
g = og.classes.Graph()
# edges1 = [(1, 2), (2, 3), (1, 3), (3, 4), (4, 5), (4, 6), (5, 6)]
# edges2 = [(3, 7), (4, 7), (10, 7), (11, 7)]
# edges3 = [(8, 9), (8, 10), (9, 10), (10, 11), (11, 12), (11, 13), (12, 13)]
# g.add_edges(edges1)
# g.add_edges(edges2)
# g.add_edges(edges3)
# k = 5
# c = 5
# ground_truth_labels = [[0], [0], [0], [1], [1], [1], [2], [3], [3], [3], [4], [4], [4]]
edges0 = [(1, 32), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (1, 11), (1, 12), (1, 13), (1, 14), (1, 18), (1, 20), (1, 22), (2, 3), (2, 4), (2, 8), (2, 14), (2, 18), (2, 20), (2, 22), (2, 31), (3, 4), (3, 33), (3, 8), (3, 9), (3, 10), (3, 14), (3, 28), (3, 29), (4, 8), (4, 13), (4, 14), (5, 11), (5, 7), (6, 7), (6, 11), (6, 17), (7, 17), (9, 31), (9, 34), (9, 33), (10, 34), (14, 34), (15, 33), (15, 34), (16, 33), (16, 34), (19, 33), (19, 34), (20, 34), (21, 33), (21, 34), (23, 33), (23, 34), (24, 33), (24, 26), (24, 28), (24, 34), (24, 30), (25, 32), (25, 26), (25, 28), (26, 32), (27, 34), (27, 30), (28, 34), (29, 32), (29, 34), (30, 33), (30, 34), (31, 34), (31, 33), (32, 33), (32, 34), (33, 34), ]
# edges0 = [(0, 31), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 10), (0, 11), (0, 12), (0, 13), (0, 17), (0, 19), (0, 21), (1, 2), (1, 3), (1, 7), (1, 13), (1, 17), (1, 19), (1, 21), (1, 30), (2, 3), (2, 32), (2, 7), (2, 8), (2, 9), (2, 13), (2, 27), (2, 28), (3, 7), (3, 12), (3, 13), (4, 10), (4, 6), (5, 6), (5, 10), (5, 16), (6, 16), (8, 30), (8, 33), (8, 32), (9, 33), (13, 33), (14, 32), (14, 33), (15, 32), (15, 33), (18, 32), (18, 33), (19, 33), (20, 32), (20, 33), (22, 32), (22, 33), (23, 32), (23, 25), (23, 27), (23, 33), (23, 29), (24, 31), (24, 25), (24, 27), (25, 31), (26, 33), (26, 29), (27, 33), (28, 31), (28, 33), (29, 32), (29, 33), (30, 33), (30, 32), (31, 32), (31, 33), (32, 33),]
g.add_edges(edges0)
ground_truth_labels =[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]]
# print(ground_truth_labels)
k = 3 # top-k spanners
c = len(np.unique(ground_truth_labels))
# ground_truth_labels=[[1], [1], [1], [1], [1], [1], [1], [1], [1], [2], [2], [2], [2], [2], [2], [2], [2], [2], [2], [2], [2], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [4], [4], [4], [4], [4], [4], [4], [4], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], [5], ]
# edges=[(13, 18),(17, 11),(18, 17),(12, 18),(17, 14),(12, 21),(16, 12),(11, 19),(12, 20),(13, 17),(16, 20),(13, 16),(16, 22),(13, 14),(21, 13),(19, 17),(22, 13),(20, 17),(22, 14),(17, 21),(12, 22),(14, 21),(14, 11),(21, 20),(15, 16),(17, 12),(19, 22),(11, 18),(12, 15),(16, 18),(17, 16),(19, 12),(14, 19),(31, 26),(36, 30),(31, 29),(29, 34),(28, 33),(26, 36),(33, 32),(33, 25),(25, 34),(29, 24),(27, 32),(33, 30),(30, 34),(28, 34),(42, 41),(41, 44),(40, 44),(38, 43),(39, 40),(43, 42),(41, 39),(40, 42),(40, 38),(37, 41),(37, 44),(51, 61),(64, 46),(61, 84),(46, 72),(74, 50),(50, 59),(54, 84),(58, 66),(58, 68),(86, 64),(49, 81),(54, 68),(65, 71),(77, 45),(62, 55),(72, 82),(58, 53),(71, 67),(54, 71),(56, 54),(69, 55),(54, 79),(74, 61),(73, 81),(79, 47),(62, 66),(66, 48),(59, 82),(83, 71),(67, 85),(65, 72),(54, 55),(45, 67),(74, 72),(50, 58),(46, 76),(68, 57),(51, 59),(74, 62),(57, 82),(49, 86),(63, 57),(61, 45),(49, 61),(54, 86),(61, 73),(83, 58),(62, 83),(53, 50),(85, 69),(49, 59),(74, 69),(55, 60),(51, 65),(82, 67),(70, 50),(61, 47),(59, 57),]
# g.add_edges(edges)
# k = 5 # top-k spanners
# c = len(np.unique(ground_truth_labels))
# need the ground_truth_labels.
k_top, communities = get_structural_holes_HAM(g, k, c, ground_truth_labels)
print(k_top)
print(communities) | PypiClean |
/MazgaDB-1.1.2.tar.gz/MazgaDB-1.1.2/mazga_db/__init__.py | import sqlite3
from dataclasses import make_dataclass
from prettytable import from_db_cursor
def __save__(db, class_data, name_table, key):
for data in db.accept_columns(name_table):
db.update_line(name_table=name_table, key1=key, value1=getattr(class_data, key), key2=data[0], value2=getattr(class_data, data[0]))
class MazgaDB:
def __init__(self, db: str, classes: dict = dict()) -> object:
self.conn = sqlite3.connect(db)
self.cur = self.conn.cursor()
self.db = db
self.data_class = classes
def accept_columns(self, name_table: str) -> list:
self.cur.execute(f'PRAGMA table_info({name_table})')
columns = []
for column in self.cur.fetchall():
if column[2] == 'INT':
type_ = 'int'
else:
type_ = 'str'
columns.append([column[1], type_])
return columns
def create_table(self, name_table: str, param: dict) -> None:
"""
Пример создания таблицы
CREATE TABLE IF NOT EXISTS users(
userid INT PRIMARY KEY,
fname TEXT,
lname TEXT,
gender TEXT);
:param name_table:
:param param:
"""
self.execute(
f"""CREATE TABLE IF NOT EXISTS {name_table}({','.join([t + ' ' + param[t] for t in param])})"""
)
def append_line(self, name_table: str, values: list) -> None:
self.execute(
f"""INSERT INTO {name_table} VALUES({','.join(['"' + str(t) + '"' for t in values])});"""
)
def update_line(self,
name_table: str, key1: str, value1: str, key2: str, value2: str) -> None:
self.execute(
f"UPDATE {name_table} SET {key2} = '{value2}' WHERE {key1} = '{value1}'"
)
def delete_line(self, name_table: str, key: str, value: str) -> None:
self.execute(f"""DELETE FROM {name_table} WHERE {key} = '{value}'""")
def append_column(self, name_table: str, name_column: str, type_column: str) -> None:
self.execute(
f"ALTER TABLE {name_table} ADD COLUMN {name_column} '{type_column}'"
)
def delete_column(self, name_table: str, column: str) -> None:
"""
Передайте название столбца который хотите удалить
:param name_table:
:param column:
"""
columns = self.accept_columns(name_table)
columns.remove(column)
self.execute(f"CREATE TABLE config AS SELECT {','.join(columns)} FROM {name_table};")
self.execute(f"DROP TABLE {name_table};")
self.execute(f"ALTER TABLE config RENAME TO {name_table};")
def is_there(self, name_table: str, key: str, value: str) -> bool:
self.cur.execute(f"SELECT * FROM {name_table} WHERE {key} = '{value}'")
return len(self.cur.fetchall()) > 0
def read_table(self, name_table: str, type: str = 's', params: list = None) -> str:
try:
self.cur.execute(f"SELECT {','.join(params) if params else '*'} FROM {name_table}")
mytable = from_db_cursor(self.cur)
if type == 's':
return str(mytable)
elif type == 'm':
counts = len(self.accept_columns(name_table))+1
table = str(mytable).replace('+', '-', counts).replace('+', '|', counts).replace('+', '-')
return table
else:
raise ValueError("unknown type. There are only two types 's'(string table) and 'm'(Markdown table)")
except sqlite3.Error as error:
return error
def saw_tables(self) -> str:
"""
Возращает все таблицы из файла
:return:
"""
return self.execute("SELECT name FROM sqlite_master WHERE type='table';")
def select_class(self, name_table: str, key: str, value, class_data=None) -> object:
data = self.execute(f"SELECT * FROM {name_table} WHERE {key} = '{value}'")[0]
if class_data:
return class_data(*data)
elif name_table in self.data_class:
return self.data_class[name_table](*data)
else:
class_ = make_dataclass(cls_name=name_table.title(), fields=self.accept_columns(name_table), namespace={'__call__': lambda self1: __save__(self, self1, name_table, key)})
return class_(*data)
def select(self, name_table: str, key: str, value, param: str = None):
"""
Обычный SELECT из sqlite3
:param name_table:
:param key:
:param value:
:param param:
"""
return self.execute(f"SELECT {','.join(param) if param else '*'} FROM {name_table} WHERE {key} = '{value}'")
def execute(self, sql_request: str) -> list:
self.cur.execute(sql_request)
self.conn.commit()
return self.cur.fetchall() | PypiClean |
/Beaver-36.3.1-py3-none-any.whl/beaver/transports/sqs_transport.py | import boto.sqs
import uuid
from boto.sqs.message import Message, RawMessage
from beaver.transports.base_transport import BaseTransport
from beaver.transports.exception import TransportException
from sys import getsizeof
class SqsTransport(BaseTransport):
def __init__(self, beaver_config, logger=None):
super(SqsTransport, self).__init__(beaver_config, logger=logger)
self._access_key = beaver_config.get('sqs_aws_access_key')
self._secret_key = beaver_config.get('sqs_aws_secret_key')
self._profile = beaver_config.get('sqs_aws_profile_name')
self._region = beaver_config.get('sqs_aws_region')
self._queue_owner_acct_id = beaver_config.get('sqs_aws_queue_owner_acct_id')
self._queue = beaver_config.get('sqs_aws_queue').split(',')
self._bulk_lines = beaver_config.get('sqs_bulk_lines')
try:
if self._profile:
self._connection = boto.sqs.connect_to_region(self._region,
profile_name=self._profile)
elif self._access_key is None and self._secret_key is None:
self._connection = boto.sqs.connect_to_region(self._region)
else:
self._connection = boto.sqs.connect_to_region(self._region,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key)
if self._connection is None:
self._logger.warn('Unable to connect to AWS - check your AWS credentials')
raise TransportException('Unable to connect to AWS - check your AWS credentials')
self._queues = {}
for queue in self._queue:
self._logger.debug('Attempting to load SQS queue: {0}'.format(queue))
if self._queue_owner_acct_id is None:
self._queues[queue] = self._connection.get_queue(queue)
else:
self._queues[queue] = self._connection.get_queue(queue,
owner_acct_id=self._queue_owner_acct_id)
if self._queues[queue] is None:
raise TransportException('Unable to access queue with name {0}'.format(queue))
self._logger.debug('Successfully loaded SQS queue: {0}'.format(queue))
except Exception, e:
raise TransportException(e.message)
def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
if self._bulk_lines:
message_batch = ''
message_count = 0
else:
message_batch = []
message_batch_size = 0
message_batch_size_max = 250000 # Max 256KiB but leave some headroom
for line in lines:
if self._bulk_lines:
m = self.format(filename, line, timestamp, **kwargs)
message_size = getsizeof(m)
else:
m = Message()
m.set_body(self.format(filename, line, timestamp, **kwargs))
message_size = len(m)
if (message_size > message_batch_size_max):
self._logger.debug('Dropping the message as it is too large to send ({0} bytes)'.format(message_size))
continue
# Check the new total size before adding a new message and don't try to send an empty batch
if self._bulk_lines and (len(message_batch) > 0) and (((message_batch_size + message_size) >= message_batch_size_max)):
self._logger.debug('Flushing {0} messages to SQS queue {1} bytes'.format(message_count, message_batch_size))
self._send_message(message_batch)
message_batch = ''
message_count = 0
message_batch_size = 0
# SQS can only handle up to 10 messages in batch send and it can not exceed 256KiB (see above)
elif (len(message_batch) > 0) and (((message_batch_size + message_size) >= message_batch_size_max) or (len(message_batch) == 10)):
self._logger.debug('Flushing {0} messages to SQS queue {1} bytes'.format(len(message_batch), message_batch_size))
self._send_message_batch(message_batch)
message_batch = []
message_batch_size = 0
message_batch_size = message_batch_size + message_size
if self._bulk_lines:
message_batch += '{0},'.format(m)
message_count += 1
else:
message_batch.append((uuid.uuid4(), self.format(filename, line, timestamp, **kwargs), 0))
if len(message_batch) > 0:
if self._bulk_lines:
self._logger.debug('Flushing the last {0} messages to SQS queue {1} bytes'.format(message_count, message_batch_size))
self._send_message(message_batch)
else:
self._logger.debug('Flushing the last {0} messages to SQS queue {1} bytes'.format(len(message_batch), message_batch_size))
self._send_message_batch(message_batch)
return True
def _send_message(self, msg):
for queue in self._queues:
try:
msg = '[{0}]'.format(msg.rstrip(','))
m = RawMessage()
m.set_body(msg)
result = self._queues[queue].write(m)
if not result:
self._logger.error('Error occurred sending message to SQS queue {0}. result: {1}'.format(
self._queue_name, result))
raise TransportException('Error occurred sending message to queue {0}'.format(self._queue_name))
except Exception, e:
self._logger.exception('Exception occurred sending message to SQS queue')
raise TransportException(e.message)
def _send_message_batch(self, message_batch):
for queue in self._queues:
try:
self._logger.debug('Attempting to push batch message to SQS queue: {0}'.format(queue))
result = self._queues[queue].write_batch(message_batch)
if not result:
self._logger.error('Error occurred sending messages to SQS queue {0}. result: {1}'.format(
queue, result))
raise TransportException('Error occurred sending message to queue {0}'.format(queue))
self._logger.debug('Successfully pushed batch message to SQS queue: {0}'.format(queue))
except Exception, e:
self._logger.exception('Exception occurred sending batch to SQS queue')
raise TransportException(e.message)
def interrupt(self):
return True
def unhandled(self):
return True | PypiClean |
/BetterPyXZH-1.1.0.20201231.1.tar.gz/BetterPyXZH-1.1.0.20201231.1/README.md | # BetterPy
Use Something to Make Python Better Together!<br><br>
### V1.1.0.20201231:
V1.1.0.20201231:更改_PLUS()为_COMPUTE(),修复了一点BUG.<br>
betterpyInfo()打印程序信息<br>
_BK(<提示信息>)设置程序中断<br>
_DEBUG(<一个或多个变量>)输出变量信息并中断<br>
_QUIT(<提示信息>)中断并退出<br>
_COMPUTE(<变量a>,<运算符>,<变量b>,<可选:输出普通计算结果>)高精度加法.当运算符非"+"或"-"时,会引发ValueError<br>
_RUN_CLOCK(<代码>)执行代码并计算运行所耗时间<br>
_CASE(<变量>,<目标字典>)当变量等于字典中的某个键时,执行该键对应的值.例:_CASE(1,{1:'print("1")',2:'print("2")'})的输出为:1.当变量不在目标字典的范围内时,会引发KeyError<br>
_HELP(<函数名>)输出该函数对应的帮助.当函数名不在模块的范围内时,会引发KeyError<br>
##### Github代码位置:master分支
Happy World!(划掉) Happy New Year! | PypiClean |
/Gauss_dist-0.1.tar.gz/Gauss_dist-0.1/Gauss_dist/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
return self.p, self.n
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the binomial distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/JoUtil-1.3.3-py3-none-any.whl/JoTools/txkjRes/AllRes.py |
import os
import cv2
import copy
import time
import random
from flask import jsonify
import numpy as np
from abc import ABC
from PIL import Image
from .resBase import ResBase
from .deteObj import DeteObj, PointObj
from .deteAngleObj import DeteAngleObj
from ..txkjRes.resTools import ResTools
from ..utils.JsonUtil import JsonUtil
from ..txkjRes.deteXml import parse_xml, save_to_xml, save_to_xml_wh_format
from ..utils.FileOperationUtil import FileOperationUtil
from ..utils.DecoratorUtil import DecoratorUtil
from labelme import utils
class AllRes(ResBase):
def __init__(self, json_path=None, assign_img_path=None, json_dict=None):
self._alarms = []
self.flags = {}
self.version = "4.4.0"
super().__init__(assign_img_path, json_dict, json_path=json_path)
def __add__(self, other):
if not isinstance(other, DeteRes):
raise TypeError("should be DeteRes")
res = self.deep_copy()
for each_point_obj in other:
if each_point_obj not in self:
res.add_obj_2(each_point_obj)
return res
def __sub__(self, other):
for each_dete_obj in other:
self.del_dete_obj(each_dete_obj)
return self
def __contains__(self, item):
for each_point_obj in self._alarms:
if item == each_point_obj:
return True
return False
def __len__(self):
return len(self._alarms)
def __getitem__(self, index):
return self._alarms[index]
def __setattr__(self, key, value):
object.__setattr__(self, key, value)
#
if key == 'img_path' and isinstance(value, str) and self.parse_auto:
self._parse_img_info()
elif key == 'json_path' and isinstance(value, str) and self.parse_auto:
self._parse_json_file()
pass
elif key == 'json_dict' and isinstance(value, dict) and self.parse_auto:
self._parse_json_str()
@property
def alarms(self):
return self._alarms
def reset_alarms(self, assign_alarms=None):
"""重置 alarms"""
if assign_alarms is None:
self._alarms = []
else:
self._alarms = assign_alarms
# ------------------------------------------------------------------------------------------------------------------
def _parse_json_file(self):
"""解析 json 文件,获得各个要素"""
if self.json_path:
a = JsonUtil.load_data_from_json_file(self.json_path, encoding='GBK')
else:
raise ValueError("* self.json_path is none")
# parse attr
self.version = a["version"] if "version" in a else ""
self.width = a["imageWidth"] if "imageWidth" in a else ""
self.height = a["imageHeight"] if "imageWidth" in a else ""
self.file_name = a["imagePath"] if "imagePath" in a else ""
self.image_data_bs64 = a["imageData"]
obj_index = -1
for each_shape in a["shapes"]:
each_shape_type = each_shape["shape_type"] # 数据的类型 point,
#
obj_index += 1
each_label = each_shape["label"]
# point
if each_shape_type == 'point':
each_x, each_y = each_shape["points"][0]
new_point = PointObj(each_x, each_y, each_label, assign_id=obj_index)
self._alarms.append(new_point)
# rectangle
elif each_shape_type == 'rectangle':
(x1, y1), (x2, y2) = each_shape["points"][0], each_shape["points"][1]
new_rectangle = RectangleObj(x1, y1, x2, y2, tag=each_label, assign_id=obj_index)
self._alarms.append(new_rectangle)
# circle
elif each_shape_type == 'circle':
(center_x, center_y), (point_x, point_y) = each_shape["points"][0], each_shape["points"][1]
new_cricle = CricleObj(center_x, center_y, point_x, point_y, tag=each_label, assign_id=obj_index)
self._alarms.append(new_cricle)
# polygon
elif each_shape_type == 'polygon':
new_polygon = PolygonObj(tag=each_label, assign_id=obj_index)
for each_point in each_shape["points"]:
new_polygon.add_point(each_point[0], each_point[1], tag="poly_point")
self._alarms.append(new_polygon)
# line
elif each_shape_type == 'line':
(start_x, start_y), (end_x, end_y) = each_shape["points"][0], each_shape["points"][1]
new_line = LineObj(start_x, start_y, end_x, end_y, tag=each_label, assign_id=obj_index)
self._alarms.append(new_line)
# line strip
elif each_shape_type == 'line_strip':
new_line_strip = LineStripObj(tag=each_label, assign_id=obj_index)
for each_point in each_shape["points"]:
new_line_strip.add_point(each_point[0], each_point[1], tag="line_strip_point")
self._alarms.append(new_line_strip)
def _parse_json_str(self):
a = self.json_dict
# parse attr
self.version = a["version"] if "version" in a else ""
self.width = a["imageWidth"] if "imageWidth" in a else ""
self.height = a["imageHeight"] if "imageWidth" in a else ""
self.file_name = a["imagePath"] if "imagePath" in a else ""
self.image_data_bs64 = a["imageData"]
point_index = 0
for each_shape in a["shapes"]:
each_shape_type = each_shape["shape_type"] # 数据的类型 point,
#
if each_shape_type == 'point':
# 解析点
point_index += 1
each_label = each_shape["label"]
each_points_x, each_points_y = each_shape["points"][0]
new_point = PointObj(each_points_x, each_points_y, each_label, assign_id=point_index)
self.alarms.append(new_point)
def save_to_json_file(self, save_json_path, include_img_data=False):
# todo 要想 labelme 能读出来,需要加上 imageData 信息,但是也支持不带 imageData 的 json 生成,可以后期使用函数进行修复,变为可读取即可
json_info = {"version":"", "imageWidth":"", "imageHeight":"", "imagePath":"", "imageData":"", "shapes":[], "flasg":{}}
if self.version:
json_info["version"] = self.version
if self.width:
json_info["imageWidth"] = str(self.width)
if self.height:
json_info["imageHeight"] = str(self.height)
if self.file_name:
json_info["imagePath"] = self.file_name
if self.flags:
json_info["flags"] = self.flags
#
for each_shape in self._alarms:
each_shape_info = {
"label": each_shape.tag,
"points": [[each_shape.x, each_shape.y]],
"group_id": each_shape.group_id,
"shape_type": each_shape.shape_type}
json_info["shapes"].append(each_shape_info)
# save img data
if self.img_path and include_img_data:
img = cv2.imdecode(np.fromfile(self.img_path, dtype=np.uint8), 1)
image_data_bs64 = utils.img_arr_to_b64(img).decode('utf-8')
json_info["imageData"] = image_data_bs64
# save
JsonUtil.save_data_to_json_file(json_info, save_json_path, encoding="GBK")
def save_to_json_str(self):
json_info = {"version": "", "imageWidth": "", "imageHeight": "", "imagePath": "", "imageData": "", "shapes": [],
"flasg": {}}
if self.version:
json_info["version"] = self.version
if self.image_width:
json_info["imageWidth"] = self.image_width
if self.image_height:
json_info["imageHeight"] = self.image_height
if self.img_name:
json_info["imagePath"] = self.img_name
if self.flags:
json_info["flags"] = self.flags
#
shapes = []
for each_shape in self._alarms:
each_shape_info = {
"label": each_shape.tag,
"points": [[each_shape.x, each_shape.y]],
"group_id": each_shape.group_id,
"shape_type": each_shape.shape_type}
shapes.append(each_shape_info)
# todo 这边还需要测试和核对,
json_dict['shapes'] = JsonUtil.save_data_to_json_str(shapes)
return json_dict
def draw_res(self, save_path, radius=3):
# todo 画出其中的 各个形状,圆,矩形,多边形等等
img = cv2.imdecode(np.fromfile(self.img_path, dtype=np.uint8), 1)
for each_point_obj in self:
img = cv2.circle(img, (int(each_point_obj.x), int(each_point_obj.y)), radius, [255,255,0], thickness=2)
cv2.imencode('.jpg', img)[1].tofile(save_path)
def get_fzc_format(self):
"""按照防振锤模型设定的输出格式进行格式化, [tag, index, int(x1), int(y1), int(x2), int(y2), str(score)], des"""
res_list = []
# 遍历得到多有的
for each_obj in self._alarms:
res_list.append([each_obj.tag, each_obj.id, each_obj.x, each_obj.y, str(each_obj.conf), each_obj.des])
return res_list
def print_as_fzc_format(self):
"""按照防振锤的格式打印出来"""
for each in self.get_fzc_format():
print(each)
def add_obj(self, x, y, tag, conf=-1, assign_id=-1, describe='', area=-1):
point_res_tmp = PointObj(x=x, y=y, tag=tag, conf=conf, assign_id=assign_id, describe=describe, area=area)
self.alarms.append(point_res_tmp)
def add_obj_2(self, point_obj):
self.alarms.append(point_obj)
def deep_copy(self, copy_img=False):
if copy_img:
return copy.deepcopy(self)
else:
a = AllRes()
a.parse_auto = False
a.height = self.height
a.width = self.width
a.json_path = self.json_path
a.img_path = self.img_path
a.file_name = self.file_name
a.folder = self.folder
# img 是不进行深拷贝的,因为不会花很长的时间
a.img = self.img
a.json_dict = copy.deepcopy(self.json_dict)
a.reset_alarms(copy.deepcopy(self.alarms))
a.redis_conn_info = self.redis_conn_info
a.img_redis_key = self.img_redis_key
a.parse_auto = True
return a
def del_point_obj(self, assign_dete_obj):
#
for each_point_obj in copy.deepcopy(self._alarms):
if each_point_obj == assign_dete_obj:
# del each_dete_obj # 使用 del 删除不了
self._alarms.remove(each_point_obj)
# break or not
if not del_all:
return
def filter_by_tags(self, tags, update=True):
if tags:
res = self.deep_copy()
res.reset_alarms()
for each_point_obj in self:
if each_point_obj.tag in tags:
res.add_obj_2(each_point_obj)
if update:
self.reset_alarms(res.alarms)
return res | PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/services/subset.py |
import logging
import operator
from django.contrib.gis.geos import Polygon, LineString
from eoxserver.core.config import get_eoxserver_config
from eoxserver.core.decoders import config, enum
from eoxserver.contrib.osr import SpatialReference
from eoxserver.resources.coverages import crss
from eoxserver.services.exceptions import (
InvalidAxisLabelException, InvalidSubsettingException,
InvalidSubsettingCrsException
)
__all__ = ["Subsets", "Trim", "Slice"]
logger = logging.getLogger(__name__)
class Subsets(list):
""" Convenience class to handle a variety of spatial and/or temporal
subsets.
:param iterable: an iterable of objects inheriting from :class:`Trim`
or :class:`Slice`
:param crs: the CRS definition
:param allowed_types: the allowed subset types. defaults to both
:class:`Trim` and :class:`Slice`
"""
def __init__(self, iterable, crs=None, allowed_types=None):
""" Constructor. Allows to add set the initial subsets
"""
self.allowed_types = allowed_types if allowed_types is not None else (
Trim, Slice
)
# Do a manual insertion here to assure integrity
for subset in iterable:
self.append(subset)
self._crs = crs
# List API
def extend(self, iterable):
""" See :meth:`list.extend` """
for subset in iterable:
self._check_subset(subset)
super(Subsets, self).append(subset)
def append(self, subset):
""" See :meth:`list.append` """
self._check_subset(subset)
super(Subsets, self).append(subset)
def insert(self, i, subset):
""" See :meth:`list.insert` """
self._check_subset(subset)
super(Subsets, self).insert(i, subset)
# Subset related stuff
@property
def has_x(self):
""" Check if a subset along the X-axis is given. """
return any(map(lambda s: s.is_x, self))
@property
def has_y(self):
""" Check if a subset along the Y-axis is given. """
return any(map(lambda s: s.is_y, self))
@property
def has_t(self):
""" Check if a subset along the temporal axis is given. """
return any(map(lambda s: s.is_temporal, self))
@property
def crs(self):
""" Return the subset CRS definiton. """
return self._crs
@crs.setter
def crs(self, value):
""" Set the subset CRS definiton. """
self._crs = value
@property
def srid(self):
""" Tries to find the correct integer SRID for the crs.
"""
crs = self.crs
if crs is not None:
srid = crss.parseEPSGCode(crs,
(crss.fromURL, crss.fromURN, crss.fromShortCode)
)
if srid is None and not crss.is_image_crs(crs):
raise InvalidSubsettingCrsException(
"Could not parse EPSG code from URI '%s'" % crs
)
return srid
return None
def get_filters(self, containment="overlaps"):
""" Filter a :class:`Django QuerySet <django.db.models.query.QuerySet>`
of objects inheriting from :class:`EOObject
<eoxserver.resources.coverages.models.EOObject>`.
:param queryset: the ``QuerySet`` to filter
:param containment: either "overlaps" or "contains"
:returns: a ``dict`` with the filters
"""
filters = {}
if not len(self):
return filters
bbox = [None, None, None, None]
srid = self.srid
if srid is None:
srid = 4326
max_extent = crss.crs_bounds(srid)
tolerance = crss.crs_tolerance(srid)
# check if time intervals are configured as "open" or "closed"
config = get_eoxserver_config()
reader = SubsetConfigReader(config)
if reader.time_interval_interpretation == "closed":
gt_op = "__gte"
lt_op = "__lte"
else:
gt_op = "__gt"
lt_op = "__lt"
for subset in self:
if isinstance(subset, Slice):
is_slice = True
value = subset.value
elif isinstance(subset, Trim):
is_slice = False
low = subset.low
high = subset.high
# we need the value in case low == high
value = low
if subset.is_temporal:
if is_slice or (high == low and containment == "overlaps"):
filters['begin_time__lte'] = value
filters['end_time__gte'] = value
elif high == low:
filters['begin_time__gte'] = value
filters['end_time__lte'] = value
else:
# check if the temporal bounds must be strictly contained
if containment == "contains":
if high is not None:
filters['end_time' + lt_op] = high
if low is not None:
filters['begin_time' + gt_op] = low
# or just overlapping
else:
if high is not None:
filters['begin_time' + lt_op] = high
if low is not None:
filters['end_time' + gt_op] = low
else:
if is_slice:
if subset.is_x:
line = LineString(
(value, max_extent[1]),
(value, max_extent[3])
)
else:
line = LineString(
(max_extent[0], value),
(max_extent[2], value)
)
line.srid = srid
if srid != 4326:
line.transform(4326)
filters['footprint__intersects'] = line
else:
if subset.is_x:
bbox[0] = subset.low
bbox[2] = subset.high
else:
bbox[1] = subset.low
bbox[3] = subset.high
if bbox != [None, None, None, None]:
bbox = list(map(
lambda v: v[0] if v[0] is not None else v[1],
zip(bbox, max_extent)
))
bbox[0] -= tolerance
bbox[1] -= tolerance
bbox[2] += tolerance
bbox[3] += tolerance
logger.debug(
"Applying BBox %s with containment '%s'." % (bbox, containment)
)
poly = Polygon.from_bbox(bbox)
poly.srid = srid
if srid != 4326:
poly.transform(4326)
if containment == "overlaps":
filters['footprint__intersects'] = poly
elif containment == "contains":
filters['footprint__within'] = poly
return filters
def filter(self, queryset, containment="overlaps"):
""" Filter a :class:`Django QuerySet <django.db.models.query.QuerySet>`
of objects inheriting from :class:`EOObject
<eoxserver.resources.coverages.models.EOObject>`.
:param queryset: the ``QuerySet`` to filter
:param containment: either "overlaps" or "contains"
:returns: a ``QuerySet`` with additional filters applied
"""
if not len(self):
return queryset
filters = self.get_filters(containment)
return queryset.filter(**filters)
def matches(self, eo_object, containment="overlaps"):
""" Check if the given :class:`EOObject
<eoxserver.resources.coverages.models.EOObject>` matches the given
subsets.
:param eo_object: the ``EOObject`` to match
:param containment: either "overlaps" or "contains"
:returns: a boolean value indicating if the object is contained in the
given subsets
"""
if not len(self):
return True
bbox = [None, None, None, None]
srid = self.srid
if srid is None:
srid = 4326
max_extent = crss.crs_bounds(srid)
tolerance = crss.crs_tolerance(srid)
# check if time intervals are configured as "open" or "closed"
config = get_eoxserver_config()
reader = SubsetConfigReader(config)
# note that the operator is inverted from filter() above as the
# filters use an inclusive search whereas here it's exclusive
if reader.time_interval_interpretation == "closed":
gt_op = operator.gt
lt_op = operator.lt
else:
gt_op = operator.ge
lt_op = operator.le
footprint = eo_object.footprint
begin_time = eo_object.begin_time
end_time = eo_object.end_time
for subset in self:
if isinstance(subset, Slice):
is_slice = True
value = subset.value
elif isinstance(subset, Trim):
is_slice = False
low = subset.low
high = subset.high
# we need the value in case low == high
value = low
if subset.is_temporal:
if is_slice or (low == high and containment == "overlaps"):
if begin_time > value or end_time < value:
return False
elif low == high:
if begin_time < value or end_time > value:
return False
else:
# check if the temporal bounds must be strictly contained
if containment == "contains":
if high is not None:
if gt_op(end_time, high):
return False
if low is not None:
if lt_op(begin_time, low):
return False
# or just overlapping
else:
if high is not None:
if gt_op(begin_time, high):
return False
if low is not None:
if lt_op(end_time, low):
return False
else:
if is_slice:
if subset.is_x:
line = LineString(
(value, max_extent[1]),
(value, max_extent[3])
)
else:
line = LineString(
(max_extent[0], value),
(max_extent[2], value)
)
line.srid = srid
if srid != 4326:
line.transform(4326)
if not line.intersects(footprint):
return False
else:
if subset.is_x:
bbox[0] = subset.low
bbox[2] = subset.high
else:
bbox[1] = subset.low
bbox[3] = subset.high
if bbox != [None, None, None, None]:
bbox = map(
lambda v: v[0] if v[0] is not None else v[1],
zip(bbox, max_extent)
)
bbox[0] -= tolerance
bbox[1] -= tolerance
bbox[2] += tolerance
bbox[3] += tolerance
logger.debug(
"Applying BBox %s with containment '%s'." % (bbox, containment)
)
poly = Polygon.from_bbox(bbox)
poly.srid = srid
if srid != 4326:
poly.transform(4326)
if containment == "overlaps":
if not footprint.intersects(poly):
return False
elif containment == "contains":
if not footprint.within(poly):
return False
return True
def _check_subset(self, subset):
if not isinstance(subset, Subset):
raise ValueError("Supplied argument is not a subset.")
if not isinstance(subset, self.allowed_types):
raise InvalidSubsettingException(
"Supplied subset is not allowed."
)
if self.has_x and subset.is_x:
raise InvalidSubsettingException(
"Multiple subsets for X-axis given."
)
if self.has_y and subset.is_y:
raise InvalidSubsettingException(
"Multiple subsets for Y-axis given."
)
if self.has_t and subset.is_temporal:
raise InvalidSubsettingException(
"Multiple subsets for time-axis given."
)
@property
def xy_bbox(self):
""" Returns the minimum bounding box for all X and Y subsets.
:returns: a list of four elements [minx, miny, maxx, maxy], which might
be ``None``
"""
bbox = [None, None, None, None]
for subset in self:
if subset.is_x:
if isinstance(subset, Trim):
bbox[0] = subset.low
bbox[2] = subset.high
else:
bbox[0] = bbox[2] = subset.value
elif subset.is_y:
if isinstance(subset, Trim):
bbox[1] = subset.low
bbox[3] = subset.high
else:
bbox[1] = bbox[3] = subset.value
return bbox
def bounding_polygon(self, coverage):
""" Returns a minimum bounding :class:`django.contrib.gis.geos.Polygon`
for the given :class:`Coverage
<eoxserver.render.coverages.objects.Coverage>`
:param coverage: the coverage to calculate the bounding polygon for
:returns: the calculated ``Polygon``
"""
srid = SpatialReference(coverage.grid.coordinate_reference_system).srid
extent = coverage.extent
size_x, size_y = coverage.size
footprint = coverage.footprint
subset_srid = self.srid
if subset_srid is None:
bbox = list(extent)
else:
bbox = list(footprint.extent)
for subset in self:
if not isinstance(subset, Trim) or subset.is_temporal:
continue
if subset_srid is None:
# transform coordinates from imageCRS to coverages CRS
if subset.is_x:
if subset.low is not None:
l = max(float(subset.low) / float(size_x), 0.0)
bbox[0] = extent[0] + l * (extent[2] - extent[0])
if subset.high is not None:
l = max(float(subset.high) / float(size_x), 0.0)
bbox[2] = extent[0] + l * (extent[2] - extent[0])
elif subset.is_y:
if subset.low is not None:
l = max(float(subset.low) / float(size_y), 0.0)
bbox[1] = extent[3] - l * (extent[3] - extent[1])
if subset.high is not None:
l = max(float(subset.high) / float(size_y), 0.0)
bbox[3] = extent[3] - l * (extent[3] - extent[1])
else:
if subset.is_x:
if subset.low is not None:
bbox[0] = max(subset.low, bbox[0])
if subset.high is not None:
bbox[2] = min(subset.high, bbox[2])
if subset.is_y:
if subset.low is not None:
bbox[1] = max(subset.low, bbox[1])
if subset.high is not None:
bbox[3] = min(subset.high, bbox[3])
if subset_srid is None:
poly = Polygon.from_bbox(bbox)
poly.srid = srid
else:
poly = Polygon.from_bbox(bbox)
poly.srid = subset_srid
return poly
class Subset(object):
""" Base class for all subsets.
"""
def __init__(self, axis):
axis = axis.lower()
if axis not in all_axes:
raise InvalidAxisLabelException(axis)
self.axis = axis
@property
def is_temporal(self):
return self.axis in temporal_axes
@property
def is_x(self):
return self.axis in x_axes
@property
def is_y(self):
return self.axis in y_axes
class Slice(Subset):
""" Slice subsets reduce the dimension of the subsetted object by one and
slice the given ``axis`` at the specified ``value``.
:param axis: the axis name
:param value: the slice point
"""
def __init__(self, axis, value):
super(Slice, self).__init__(axis)
self.value = value
def __repr__(self):
return "Slice: %s[%s]" % (self.axis, self.value)
class Trim(Subset):
""" Trim subsets reduce the domain of the specified ``axis``
:param axis: the axis name
:param low: the lower end of the ``Trim``; if omitted, the ``Trim`` has no
lower bound
:param high: the upper end of the ``Trim``; if omitted, the ``Trim`` has no
upper bound
"""
def __init__(self, axis, low=None, high=None):
super(Trim, self).__init__(axis)
if low is not None and high is not None and low > high:
raise InvalidSubsettingException(
"Invalid bounds: lower bound greater than upper bound."
)
self.low = low
self.high = high
def __repr__(self):
return "Trim: %s[%s:%s]" % (
self.axis, self.low, self.high
)
temporal_axes = ("t", "time", "phenomenontime")
x_axes = ("x", "lon", "long")
y_axes = ("y", "lat")
z_axes = ("z", "height")
all_axes = temporal_axes + x_axes + y_axes + z_axes
def is_temporal(axis):
""" Returns whether or not an axis is a temporal one.
"""
return (axis.lower() in temporal_axes)
class SubsetConfigReader(config.Reader):
section = "services.owscommon"
time_interval_interpretation = config.Option(
default="closed", type=enum(("closed", "open"), False)
) | PypiClean |
/Ancestration-0.1.0.tar.gz/Ancestration-0.1.0/README.txt | Ancestration – Family Inheritance for Python
============================================
This project implements the so-called *family inheritance* for Python 2 and 3.
It is based on the doctoral thesis of Patrick Lay "Entwurf eines Objektmodells
für semistrukturierte Daten im Kontext von XML Content Management Systemen"
(Rheinische Friedrich-Wilhelms Universität Bonn, 2006) and is developed as
part of the diploma thesis of Michael Pohl "Architektur und Implementierung
des Objektmodells für ein Web Application Framework" (Rheinische
Friedrich-Wilhelms Universität Bonn, 2013-2014).
| PypiClean |
/Hikka_Pyro_New-2.0.103-py3-none-any.whl/hikkapyro/errors/exceptions/bad_request_400.py |
from ..rpc_error import RPCError
class BadRequest(RPCError):
"""Bad Request"""
CODE = 400
"""``int``: RPC Error Code"""
NAME = __doc__
class AboutTooLong(BadRequest):
"""The provided about/bio text is too long"""
ID = "ABOUT_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AccessTokenExpired(BadRequest):
"""The bot token has expired"""
ID = "ACCESS_TOKEN_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AccessTokenInvalid(BadRequest):
"""The bot access token is invalid"""
ID = "ACCESS_TOKEN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminsTooMuch(BadRequest):
"""The chat has too many administrators"""
ID = "ADMINS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminRankEmojiNotAllowed(BadRequest):
"""Emoji are not allowed in custom administrator titles"""
ID = "ADMIN_RANK_EMOJI_NOT_ALLOWED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminRankInvalid(BadRequest):
"""The custom administrator title is invalid or too long"""
ID = "ADMIN_RANK_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AlbumPhotosTooMany(BadRequest):
"""Too many photos were included in the album"""
ID = "ALBUM_PHOTOS_TOO_MANY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ApiIdInvalid(BadRequest):
"""The api_id/api_hash combination is invalid"""
ID = "API_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ApiIdPublishedFlood(BadRequest):
"""You are using an API key that is limited on the server side because it was published somewhere"""
ID = "API_ID_PUBLISHED_FLOOD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ArticleTitleEmpty(BadRequest):
"""The article title is empty"""
ID = "ARTICLE_TITLE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AudioTitleEmpty(BadRequest):
"""The title attribute of the audio is empty"""
ID = "AUDIO_TITLE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthBytesInvalid(BadRequest):
"""The authorization bytes are invalid"""
ID = "AUTH_BYTES_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthTokenAlreadyAccepted(BadRequest):
"""The authorization token was already used"""
ID = "AUTH_TOKEN_ALREADY_ACCEPTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthTokenExpired(BadRequest):
"""The provided authorization token has expired and the updated QR-code must be re-scanned"""
ID = "AUTH_TOKEN_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthTokenInvalid(BadRequest):
"""An invalid authorization token was provided"""
ID = "AUTH_TOKEN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AutoarchiveNotAvailable(BadRequest):
"""This feature is not yet enabled for your account due to it not receiving too many private messages from strangers"""
ID = "AUTOARCHIVE_NOT_AVAILABLE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BankCardNumberInvalid(BadRequest):
"""The credit card number is invalid"""
ID = "BANK_CARD_NUMBER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BannedRightsInvalid(BadRequest):
"""You provided a set of restrictions that is invalid"""
ID = "BANNED_RIGHTS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BasePortLocInvalid(BadRequest):
"""The base port location is invalid"""
ID = "BASE_PORT_LOC_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotsTooMuch(BadRequest):
"""The chat has too many bots"""
ID = "BOTS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotChannelsNa(BadRequest):
"""Bots can't edit admin privileges"""
ID = "BOT_CHANNELS_NA"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotCommandDescriptionInvalid(BadRequest):
"""The command description was empty, too long or had invalid characters"""
ID = "BOT_COMMAND_DESCRIPTION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotDomainInvalid(BadRequest):
"""The domain used for the auth button does not match the one configured in @BotFather"""
ID = "BOT_DOMAIN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotGamesDisabled(BadRequest):
"""Bot games cannot be used in this type of chat"""
ID = "BOT_GAMES_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotGroupsBlocked(BadRequest):
"""This bot can't be added to groups"""
ID = "BOT_GROUPS_BLOCKED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotInlineDisabled(BadRequest):
"""The inline feature of the bot is disabled"""
ID = "BOT_INLINE_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotInvalid(BadRequest):
"""This is not a valid bot"""
ID = "BOT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotMethodInvalid(BadRequest):
"""The method can't be used by bots"""
ID = "BOT_METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotMissing(BadRequest):
"""This method can only be run by a bot"""
ID = "BOT_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotOnesideNotAvail(BadRequest):
"""Bots can't pin messages for one side only in private chats"""
ID = "BOT_ONESIDE_NOT_AVAIL"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotPaymentsDisabled(BadRequest):
"""This method can only be run by a bot"""
ID = "BOT_PAYMENTS_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotPollsDisabled(BadRequest):
"""Sending polls by bots has been disabled"""
ID = "BOT_POLLS_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotResponseTimeout(BadRequest):
"""The bot did not answer to the callback query in time"""
ID = "BOT_RESPONSE_TIMEOUT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotScoreNotModified(BadRequest):
"""The bot score was not modified"""
ID = "BOT_SCORE_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BroadcastIdInvalid(BadRequest):
"""The channel is invalid"""
ID = "BROADCAST_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BroadcastPublicVotersForbidden(BadRequest):
"""Polls with public voters cannot be sent in channels"""
ID = "BROADCAST_PUBLIC_VOTERS_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BroadcastRequired(BadRequest):
"""The request can only be used with a channel"""
ID = "BROADCAST_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonDataInvalid(BadRequest):
"""The button callback data is invalid or too large"""
ID = "BUTTON_DATA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonTypeInvalid(BadRequest):
"""The type of one of the buttons you provided is invalid"""
ID = "BUTTON_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonUrlInvalid(BadRequest):
"""The button url is invalid"""
ID = "BUTTON_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonUserPrivacyRestricted(BadRequest):
"""The privacy settings of the user specified in a keyboard button do not allow creating such button"""
ID = "BUTTON_USER_PRIVACY_RESTRICTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallAlreadyAccepted(BadRequest):
"""The call is already accepted"""
ID = "CALL_ALREADY_ACCEPTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallAlreadyDeclined(BadRequest):
"""The call is already declined"""
ID = "CALL_ALREADY_DECLINED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallPeerInvalid(BadRequest):
"""The provided call peer object is invalid"""
ID = "CALL_PEER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallProtocolFlagsInvalid(BadRequest):
"""Call protocol flags invalid"""
ID = "CALL_PROTOCOL_FLAGS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CdnMethodInvalid(BadRequest):
"""The method can't be used on CDN DCs"""
ID = "CDN_METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelsAdminPublicTooMuch(BadRequest):
"""You are an administrator of too many public channels"""
ID = "CHANNELS_ADMIN_PUBLIC_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelsTooMuch(BadRequest):
"""You have joined too many channels or supergroups, leave some and try again"""
ID = "CHANNELS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelAddInvalid(BadRequest):
"""Internal error."""
ID = "CHANNEL_ADD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelBanned(BadRequest):
"""The channel is banned"""
ID = "CHANNEL_BANNED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelInvalid(BadRequest):
"""The channel parameter is invalid"""
ID = "CHANNEL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelPrivate(BadRequest):
"""The channel/supergroup is not accessible"""
ID = "CHANNEL_PRIVATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelTooLarge(BadRequest):
"""The channel is too large"""
ID = "CHANNEL_TOO_LARGE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatAboutNotModified(BadRequest):
"""The chat about text was not modified because you tried to edit it using the same content"""
ID = "CHAT_ABOUT_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatAboutTooLong(BadRequest):
"""The chat about text is too long"""
ID = "CHAT_ABOUT_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatAdminRequired(BadRequest):
"""The method requires chat admin privileges"""
ID = "CHAT_ADMIN_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatForwardsRestricted(BadRequest):
"""The chat restricts forwarding content"""
ID = "CHAT_FORWARDS_RESTRICTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatIdEmpty(BadRequest):
"""The provided chat id is empty"""
ID = "CHAT_ID_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatIdInvalid(BadRequest):
"""The chat id being used is invalid or not known yet. Make sure you see the chat before interacting with it"""
ID = "CHAT_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatInvalid(BadRequest):
"""The chat is invalid"""
ID = "CHAT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatInvitePermanent(BadRequest):
"""The chat invite link is primary"""
ID = "CHAT_INVITE_PERMANENT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatLinkExists(BadRequest):
"""The action failed because the supergroup is linked to a channel"""
ID = "CHAT_LINK_EXISTS"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatNotModified(BadRequest):
"""The chat settings (title, permissions, photo, etc..) were not modified because you tried to edit them using the same content"""
ID = "CHAT_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatRestricted(BadRequest):
"""The chat is restricted and cannot be used"""
ID = "CHAT_RESTRICTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatSendInlineForbidden(BadRequest):
"""You cannot use inline bots to send messages in this chat"""
ID = "CHAT_SEND_INLINE_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatTitleEmpty(BadRequest):
"""The chat title is empty"""
ID = "CHAT_TITLE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatTooBig(BadRequest):
"""The chat is too big for this action"""
ID = "CHAT_TOO_BIG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CodeEmpty(BadRequest):
"""The provided code is empty"""
ID = "CODE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CodeHashInvalid(BadRequest):
"""The provided code hash invalid"""
ID = "CODE_HASH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CodeInvalid(BadRequest):
"""The provided code is invalid (i.e. from email)"""
ID = "CODE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ConnectionApiIdInvalid(BadRequest):
"""The provided API id is invalid"""
ID = "CONNECTION_API_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ConnectionAppVersionEmpty(BadRequest):
"""App version is empty"""
ID = "CONNECTION_APP_VERSION_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ConnectionDeviceModelEmpty(BadRequest):
"""The device model is empty"""
ID = "CONNECTION_DEVICE_MODEL_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ConnectionLangPackInvalid(BadRequest):
"""The specified language pack is not valid"""
ID = "CONNECTION_LANG_PACK_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ConnectionLayerInvalid(BadRequest):
"""The connection layer is invalid. Missing InvokeWithLayer-InitConnection call"""
ID = "CONNECTION_LAYER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ConnectionNotInited(BadRequest):
"""The connection was not initialized"""
ID = "CONNECTION_NOT_INITED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ConnectionSystemEmpty(BadRequest):
"""The connection to the system is empty"""
ID = "CONNECTION_SYSTEM_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ConnectionSystemLangCodeEmpty(BadRequest):
"""The system language code is empty"""
ID = "CONNECTION_SYSTEM_LANG_CODE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ContactAddMissing(BadRequest):
"""Contact to add is missing"""
ID = "CONTACT_ADD_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ContactIdInvalid(BadRequest):
"""The provided contact id is invalid"""
ID = "CONTACT_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ContactNameEmpty(BadRequest):
"""The provided contact name is empty"""
ID = "CONTACT_NAME_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ContactReqMissing(BadRequest):
"""Missing contact request"""
ID = "CONTACT_REQ_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DataInvalid(BadRequest):
"""The encrypted data is invalid"""
ID = "DATA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DataJsonInvalid(BadRequest):
"""The provided JSON data is invalid"""
ID = "DATA_JSON_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DataTooLong(BadRequest):
"""Data too long"""
ID = "DATA_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DateEmpty(BadRequest):
"""The date argument is empty"""
ID = "DATE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DcIdInvalid(BadRequest):
"""The dc_id parameter is invalid"""
ID = "DC_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DhGAInvalid(BadRequest):
"""The g_a parameter invalid"""
ID = "DH_G_A_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DocumentInvalid(BadRequest):
"""The document is invalid"""
ID = "DOCUMENT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmailHashExpired(BadRequest):
"""The email hash expired and cannot be used to verify it"""
ID = "EMAIL_HASH_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmailInvalid(BadRequest):
"""The email provided is invalid"""
ID = "EMAIL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmailUnconfirmed(BadRequest):
"""Email unconfirmed"""
ID = "EMAIL_UNCONFIRMED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmailUnconfirmed(BadRequest):
"""The provided email isn't confirmed, {value} is the length of the verification code that was just sent to the email"""
ID = "EMAIL_UNCONFIRMED_X"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmailVerifyExpired(BadRequest):
"""The verification email has expired"""
ID = "EMAIL_VERIFY_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmoticonEmpty(BadRequest):
"""The emoticon parameter is empty"""
ID = "EMOTICON_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmoticonInvalid(BadRequest):
"""The emoticon parameter is invalid"""
ID = "EMOTICON_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmoticonStickerpackMissing(BadRequest):
"""The emoticon sticker pack you are trying to obtain is missing"""
ID = "EMOTICON_STICKERPACK_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EncryptedMessageInvalid(BadRequest):
"""The special binding message (bind_auth_key_inner) contains invalid data"""
ID = "ENCRYPTED_MESSAGE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EncryptionAlreadyAccepted(BadRequest):
"""The secret chat is already accepted"""
ID = "ENCRYPTION_ALREADY_ACCEPTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EncryptionAlreadyDeclined(BadRequest):
"""The secret chat is already declined"""
ID = "ENCRYPTION_ALREADY_DECLINED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EncryptionDeclined(BadRequest):
"""The secret chat was declined"""
ID = "ENCRYPTION_DECLINED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EncryptionIdInvalid(BadRequest):
"""The provided secret chat id is invalid"""
ID = "ENCRYPTION_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EntitiesTooLong(BadRequest):
"""The entity provided contains data that is too long, or you passed too many entities to this message"""
ID = "ENTITIES_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EntityBoundsInvalid(BadRequest):
"""The message entity bounds are invalid"""
ID = "ENTITY_BOUNDS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EntityMentionUserInvalid(BadRequest):
"""The mentioned entity is not an user"""
ID = "ENTITY_MENTION_USER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ErrorTextEmpty(BadRequest):
"""The provided error message is empty"""
ID = "ERROR_TEXT_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ExpireDateInvalid(BadRequest):
"""The expiration date is invalid"""
ID = "EXPIRE_DATE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ExportCardInvalid(BadRequest):
"""The provided card is invalid"""
ID = "EXPORT_CARD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ExternalUrlInvalid(BadRequest):
"""The external media URL is invalid"""
ID = "EXTERNAL_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FieldNameEmpty(BadRequest):
"""The field with the name FIELD_NAME is missing"""
ID = "FIELD_NAME_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FieldNameInvalid(BadRequest):
"""The field with the name FIELD_NAME is invalid"""
ID = "FIELD_NAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileIdInvalid(BadRequest):
"""The file id is invalid"""
ID = "FILE_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileMigrate(BadRequest):
"""The file is in Data Center No. {value}"""
ID = "FILE_MIGRATE_X"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartsInvalid(BadRequest):
"""Invalid number of parts."""
ID = "FILE_PARTS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartEmpty(BadRequest):
"""The file part sent is empty"""
ID = "FILE_PART_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartInvalid(BadRequest):
"""The file part number is invalid."""
ID = "FILE_PART_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartLengthInvalid(BadRequest):
"""The length of a file part is invalid"""
ID = "FILE_PART_LENGTH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartSizeChanged(BadRequest):
"""The part size is different from the size of one of the previous parts in the same file"""
ID = "FILE_PART_SIZE_CHANGED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartSizeInvalid(BadRequest):
"""The file part size is invalid"""
ID = "FILE_PART_SIZE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartTooBig(BadRequest):
"""The size limit for the content of the file part has been exceeded"""
ID = "FILE_PART_TOO_BIG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartMissing(BadRequest):
"""Part {value} of the file is missing from storage"""
ID = "FILE_PART_X_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileReferenceEmpty(BadRequest):
"""The file id contains an empty file reference, you must obtain a valid one by fetching the message from the origin context"""
ID = "FILE_REFERENCE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileReferenceExpired(BadRequest):
"""The file id contains an expired file reference, you must obtain a valid one by fetching the message from the origin context"""
ID = "FILE_REFERENCE_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileReferenceInvalid(BadRequest):
"""The file id contains an invalid file reference, you must obtain a valid one by fetching the message from the origin context"""
ID = "FILE_REFERENCE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilterIdInvalid(BadRequest):
"""The specified filter ID is invalid"""
ID = "FILTER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FirstnameInvalid(BadRequest):
"""The first name is invalid"""
ID = "FIRSTNAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FolderIdEmpty(BadRequest):
"""The folder you tried to delete was already empty"""
ID = "FOLDER_ID_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FolderIdInvalid(BadRequest):
"""The folder id is invalid"""
ID = "FOLDER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FreshChangeAdminsForbidden(BadRequest):
"""You can't change administrator settings in this chat because your session was logged-in recently"""
ID = "FRESH_CHANGE_ADMINS_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FromMessageBotDisabled(BadRequest):
"""Bots can't use fromMessage min constructors"""
ID = "FROM_MESSAGE_BOT_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FromPeerInvalid(BadRequest):
"""The from peer value is invalid"""
ID = "FROM_PEER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GameBotInvalid(BadRequest):
"""You cannot send that game with the current bot"""
ID = "GAME_BOT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GeoPointInvalid(BadRequest):
"""Invalid geo point provided"""
ID = "GEO_POINT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GifContentTypeInvalid(BadRequest):
"""GIF content-type invalid"""
ID = "GIF_CONTENT_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GifIdInvalid(BadRequest):
"""The provided gif/animation id is invalid"""
ID = "GIF_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GraphInvalidReload(BadRequest):
"""Invalid graph token provided, please reload the stats and provide the updated token"""
ID = "GRAPH_INVALID_RELOAD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GraphOutdatedReload(BadRequest):
"""The graph data is outdated"""
ID = "GRAPH_OUTDATED_RELOAD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GroupcallSsrcDuplicateMuch(BadRequest):
"""Too many group call synchronization source duplicates"""
ID = "GROUPCALL_SSRC_DUPLICATE_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GroupedMediaInvalid(BadRequest):
"""The album contains invalid media"""
ID = "GROUPED_MEDIA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class GroupCallInvalid(BadRequest):
"""The group call is invalid"""
ID = "GROUP_CALL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class HashInvalid(BadRequest):
"""The provided hash is invalid"""
ID = "HASH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ImageProcessFailed(BadRequest):
"""The server failed to process your image"""
ID = "IMAGE_PROCESS_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ImportFileInvalid(BadRequest):
"""The imported file is invalid"""
ID = "IMPORT_FILE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ImportFormatUnrecognized(BadRequest):
"""The imported format is unrecognized"""
ID = "IMPORT_FORMAT_UNRECOGNIZED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ImportIdInvalid(BadRequest):
"""The import id is invalid"""
ID = "IMPORT_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InlineResultExpired(BadRequest):
"""The inline bot query expired"""
ID = "INLINE_RESULT_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputConstructorInvalid(BadRequest):
"""The provided constructor is invalid"""
ID = "INPUT_CONSTRUCTOR_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputFetchError(BadRequest):
"""An error occurred while deserializing TL parameters"""
ID = "INPUT_FETCH_ERROR"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputFetchFail(BadRequest):
"""Failed deserializing TL payload"""
ID = "INPUT_FETCH_FAIL"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputFilterInvalid(BadRequest):
"""The filter is invalid for this query"""
ID = "INPUT_FILTER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputLayerInvalid(BadRequest):
"""The provided layer is invalid"""
ID = "INPUT_LAYER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputMethodInvalid(BadRequest):
"""The method invoked is invalid in the current schema"""
ID = "INPUT_METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputRequestTooLong(BadRequest):
"""The input request is too long"""
ID = "INPUT_REQUEST_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputUserDeactivated(BadRequest):
"""The target user has been deleted/deactivated"""
ID = "INPUT_USER_DEACTIVATED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InviteHashEmpty(BadRequest):
"""The invite hash is empty"""
ID = "INVITE_HASH_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InviteHashExpired(BadRequest):
"""The chat invite link is no longer valid"""
ID = "INVITE_HASH_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InviteHashInvalid(BadRequest):
"""The invite link hash is invalid"""
ID = "INVITE_HASH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InviteRequestSent(BadRequest):
"""The request to join this chat or channel has been successfully sent"""
ID = "INVITE_REQUEST_SENT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InviteRevokedMissing(BadRequest):
"""The action required a chat invite link to be revoked first"""
ID = "INVITE_REVOKED_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LangPackInvalid(BadRequest):
"""The provided language pack is invalid"""
ID = "LANG_PACK_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LastnameInvalid(BadRequest):
"""The last name is invalid"""
ID = "LASTNAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LimitInvalid(BadRequest):
"""The limit parameter is invalid"""
ID = "LIMIT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LinkNotModified(BadRequest):
"""The chat link was not modified because you tried to link to the same target"""
ID = "LINK_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LocationInvalid(BadRequest):
"""The file location is invalid"""
ID = "LOCATION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MaxIdInvalid(BadRequest):
"""The max_id parameter is invalid"""
ID = "MAX_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MaxQtsInvalid(BadRequest):
"""The provided QTS is invalid"""
ID = "MAX_QTS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class Md5ChecksumInvalid(BadRequest):
"""The file's checksum did not match the md5_checksum parameter"""
ID = "MD5_CHECKSUM_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MediaCaptionTooLong(BadRequest):
"""The media caption is too long"""
ID = "MEDIA_CAPTION_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MediaEmpty(BadRequest):
"""The media you tried to send is invalid"""
ID = "MEDIA_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MediaInvalid(BadRequest):
"""The media is invalid"""
ID = "MEDIA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MediaNewInvalid(BadRequest):
"""The new media to edit the message with is invalid"""
ID = "MEDIA_NEW_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MediaPrevInvalid(BadRequest):
"""The previous media cannot be edited with anything else"""
ID = "MEDIA_PREV_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MegagroupIdInvalid(BadRequest):
"""The supergroup is invalid"""
ID = "MEGAGROUP_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MegagroupPrehistoryHidden(BadRequest):
"""The action failed because the supergroup has the pre-history hidden"""
ID = "MEGAGROUP_PREHISTORY_HIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MegagroupRequired(BadRequest):
"""The request can only be used with a supergroup"""
ID = "MEGAGROUP_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageEditTimeExpired(BadRequest):
"""You can no longer edit this message because too much time has passed"""
ID = "MESSAGE_EDIT_TIME_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageEmpty(BadRequest):
"""The message sent is empty or contains invalid characters"""
ID = "MESSAGE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageIdsEmpty(BadRequest):
"""The requested message doesn't exist or you provided no message id"""
ID = "MESSAGE_IDS_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageIdInvalid(BadRequest):
"""The message id is invalid"""
ID = "MESSAGE_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageNotModified(BadRequest):
"""The message was not modified because you tried to edit it using the same content"""
ID = "MESSAGE_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessagePollClosed(BadRequest):
"""You can't interact with a closed poll"""
ID = "MESSAGE_POLL_CLOSED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageTooLong(BadRequest):
"""The message text is too long"""
ID = "MESSAGE_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MethodInvalid(BadRequest):
"""The API method is invalid and cannot be used"""
ID = "METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MsgIdInvalid(BadRequest):
"""The message ID used in the peer was invalid"""
ID = "MSG_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MsgWaitFailed(BadRequest):
"""A waiting call returned an error"""
ID = "MSG_WAIT_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MultiMediaTooLong(BadRequest):
"""The album/media group contains too many items"""
ID = "MULTI_MEDIA_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class NewSaltInvalid(BadRequest):
"""The new salt is invalid"""
ID = "NEW_SALT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class NewSettingsInvalid(BadRequest):
"""The new settings are invalid"""
ID = "NEW_SETTINGS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class NextOffsetInvalid(BadRequest):
"""The next offset value is invalid"""
ID = "NEXT_OFFSET_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class OffsetInvalid(BadRequest):
"""The offset parameter is invalid"""
ID = "OFFSET_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class OffsetPeerIdInvalid(BadRequest):
"""The provided offset peer is invalid"""
ID = "OFFSET_PEER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class OptionsTooMuch(BadRequest):
"""The poll options are too many"""
ID = "OPTIONS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class OptionInvalid(BadRequest):
"""The option specified is invalid and does not exist in the target poll"""
ID = "OPTION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PackShortNameInvalid(BadRequest):
"""Invalid sticker pack name. It must begin with a letter, can't contain consecutive underscores and must end in '_by_<bot username>'."""
ID = "PACK_SHORT_NAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PackShortNameOccupied(BadRequest):
"""A sticker pack with this name already exists"""
ID = "PACK_SHORT_NAME_OCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PackTitleInvalid(BadRequest):
"""The sticker pack title is invalid"""
ID = "PACK_TITLE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ParticipantsTooFew(BadRequest):
"""The chat doesn't have enough participants"""
ID = "PARTICIPANTS_TOO_FEW"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ParticipantVersionOutdated(BadRequest):
"""The other participant is using an outdated Telegram app version"""
ID = "PARTICIPANT_VERSION_OUTDATED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordEmpty(BadRequest):
"""The password provided is empty"""
ID = "PASSWORD_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordHashInvalid(BadRequest):
"""The two-step verification password is invalid"""
ID = "PASSWORD_HASH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordMissing(BadRequest):
"""The account is missing the two-step verification password"""
ID = "PASSWORD_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordRecoveryNa(BadRequest):
"""The password recovery e-mail is not available"""
ID = "PASSWORD_RECOVERY_NA"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordRequired(BadRequest):
"""The two-step verification password is required for this method"""
ID = "PASSWORD_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordTooFresh(BadRequest):
"""The two-step verification password was added recently and you are required to wait {value} seconds"""
ID = "PASSWORD_TOO_FRESH_X"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PaymentProviderInvalid(BadRequest):
"""The payment provider was not recognised or its token was invalid"""
ID = "PAYMENT_PROVIDER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PeerFlood(BadRequest):
"""The method can't be used because your account is currently limited"""
ID = "PEER_FLOOD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PeerIdInvalid(BadRequest):
"""The peer id being used is invalid or not known yet. Make sure you meet the peer before interacting with it"""
ID = "PEER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PeerIdNotSupported(BadRequest):
"""The provided peer id is not supported"""
ID = "PEER_ID_NOT_SUPPORTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PersistentTimestampEmpty(BadRequest):
"""The pts argument is empty"""
ID = "PERSISTENT_TIMESTAMP_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PersistentTimestampInvalid(BadRequest):
"""The persistent timestamp is invalid"""
ID = "PERSISTENT_TIMESTAMP_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneCodeEmpty(BadRequest):
"""The phone code is missing"""
ID = "PHONE_CODE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneCodeExpired(BadRequest):
"""The confirmation code has expired"""
ID = "PHONE_CODE_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneCodeHashEmpty(BadRequest):
"""The phone code hash is missing"""
ID = "PHONE_CODE_HASH_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneCodeInvalid(BadRequest):
"""The confirmation code is invalid"""
ID = "PHONE_CODE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberAppSignupForbidden(BadRequest):
"""You can't sign up using this app"""
ID = "PHONE_NUMBER_APP_SIGNUP_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberBanned(BadRequest):
"""The phone number is banned from Telegram and cannot be used"""
ID = "PHONE_NUMBER_BANNED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberFlood(BadRequest):
"""This number has tried to login too many times"""
ID = "PHONE_NUMBER_FLOOD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberInvalid(BadRequest):
"""The phone number is invalid"""
ID = "PHONE_NUMBER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberOccupied(BadRequest):
"""The phone number is already in use"""
ID = "PHONE_NUMBER_OCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberUnoccupied(BadRequest):
"""The phone number is not yet being used"""
ID = "PHONE_NUMBER_UNOCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhonePasswordProtected(BadRequest):
"""The phone is password protected"""
ID = "PHONE_PASSWORD_PROTECTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoContentTypeInvalid(BadRequest):
"""The photo content type is invalid"""
ID = "PHOTO_CONTENT_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoContentUrlEmpty(BadRequest):
"""The photo content URL is empty"""
ID = "PHOTO_CONTENT_URL_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoCropFileMissing(BadRequest):
"""Photo crop file missing"""
ID = "PHOTO_CROP_FILE_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoCropSizeSmall(BadRequest):
"""The photo is too small"""
ID = "PHOTO_CROP_SIZE_SMALL"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoExtInvalid(BadRequest):
"""The photo extension is invalid"""
ID = "PHOTO_EXT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoFileMissing(BadRequest):
"""Profile photo file missing"""
ID = "PHOTO_FILE_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoIdInvalid(BadRequest):
"""The photo id is invalid"""
ID = "PHOTO_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoInvalid(BadRequest):
"""The photo is invalid"""
ID = "PHOTO_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoInvalidDimensions(BadRequest):
"""The photo dimensions are invalid"""
ID = "PHOTO_INVALID_DIMENSIONS"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoSaveFileInvalid(BadRequest):
"""The photo you tried to send cannot be saved by Telegram"""
ID = "PHOTO_SAVE_FILE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoThumbUrlEmpty(BadRequest):
"""The photo thumb URL is empty"""
ID = "PHOTO_THUMB_URL_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoThumbUrlInvalid(BadRequest):
"""The photo thumb URL is invalid"""
ID = "PHOTO_THUMB_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PinnedDialogsTooMuch(BadRequest):
"""Too many pinned dialogs"""
ID = "PINNED_DIALOGS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PinRestricted(BadRequest):
"""You can't pin messages in private chats with other people"""
ID = "PIN_RESTRICTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PollAnswersInvalid(BadRequest):
"""The poll answers are invalid"""
ID = "POLL_ANSWERS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PollOptionDuplicate(BadRequest):
"""A duplicate option was sent in the same poll"""
ID = "POLL_OPTION_DUPLICATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PollOptionInvalid(BadRequest):
"""A poll option used invalid data (the data may be too long)"""
ID = "POLL_OPTION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PollQuestionInvalid(BadRequest):
"""The poll question is invalid"""
ID = "POLL_QUESTION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PollUnsupported(BadRequest):
"""This layer does not support polls in the invoked method"""
ID = "POLL_UNSUPPORTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PollVoteRequired(BadRequest):
"""Cast a vote in the poll before calling this method"""
ID = "POLL_VOTE_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PrivacyKeyInvalid(BadRequest):
"""The privacy key is invalid"""
ID = "PRIVACY_KEY_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PrivacyTooLong(BadRequest):
"""Your privacy exception list has exceeded the maximum capacity"""
ID = "PRIVACY_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PrivacyValueInvalid(BadRequest):
"""The privacy value is invalid"""
ID = "PRIVACY_VALUE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QueryIdEmpty(BadRequest):
"""The query ID is empty"""
ID = "QUERY_ID_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QueryIdInvalid(BadRequest):
"""The callback query id is invalid"""
ID = "QUERY_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QueryTooShort(BadRequest):
"""The query is too short"""
ID = "QUERY_TOO_SHORT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QuizCorrectAnswersEmpty(BadRequest):
"""The correct answers of the quiz are empty"""
ID = "QUIZ_CORRECT_ANSWERS_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QuizCorrectAnswersTooMuch(BadRequest):
"""The quiz contains too many correct answers"""
ID = "QUIZ_CORRECT_ANSWERS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QuizCorrectAnswerInvalid(BadRequest):
"""The correct answers of the quiz are invalid"""
ID = "QUIZ_CORRECT_ANSWER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QuizMultipleInvalid(BadRequest):
"""A quiz can't have multiple answers"""
ID = "QUIZ_MULTIPLE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class RandomIdEmpty(BadRequest):
"""The random ID is empty"""
ID = "RANDOM_ID_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class RandomIdInvalid(BadRequest):
"""The provided random ID is invalid"""
ID = "RANDOM_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class RandomLengthInvalid(BadRequest):
"""The random length is invalid"""
ID = "RANDOM_LENGTH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class RangesInvalid(BadRequest):
"""Invalid range provided"""
ID = "RANGES_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ReactionEmpty(BadRequest):
"""The reaction provided is empty"""
ID = "REACTION_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ReactionInvalid(BadRequest):
"""Invalid reaction provided (only valid emoji are allowed)"""
ID = "REACTION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ReflectorNotAvailable(BadRequest):
"""The call reflector is not available"""
ID = "REFLECTOR_NOT_AVAILABLE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ReplyMarkupBuyEmpty(BadRequest):
"""Reply markup for buy button empty"""
ID = "REPLY_MARKUP_BUY_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ReplyMarkupGameEmpty(BadRequest):
"""The provided reply markup for the game is empty"""
ID = "REPLY_MARKUP_GAME_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ReplyMarkupInvalid(BadRequest):
"""The provided reply markup is invalid"""
ID = "REPLY_MARKUP_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ReplyMarkupTooLong(BadRequest):
"""The reply markup is too long"""
ID = "REPLY_MARKUP_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ResultsTooMuch(BadRequest):
"""The result contains too many items"""
ID = "RESULTS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ResultIdDuplicate(BadRequest):
"""The result contains items with duplicated identifiers"""
ID = "RESULT_ID_DUPLICATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ResultIdEmpty(BadRequest):
"""Result ID empty"""
ID = "RESULT_ID_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ResultIdInvalid(BadRequest):
"""The given result cannot be used to send the selection to the bot"""
ID = "RESULT_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ResultTypeInvalid(BadRequest):
"""The result type is invalid"""
ID = "RESULT_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class RevoteNotAllowed(BadRequest):
"""You cannot change your vote"""
ID = "REVOTE_NOT_ALLOWED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class RsaDecryptFailed(BadRequest):
"""Internal RSA decryption failed"""
ID = "RSA_DECRYPT_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ScheduleBotNotAllowed(BadRequest):
"""Bots are not allowed to schedule messages"""
ID = "SCHEDULE_BOT_NOT_ALLOWED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ScheduleDateInvalid(BadRequest):
"""Invalid schedule date provided"""
ID = "SCHEDULE_DATE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ScheduleDateTooLate(BadRequest):
"""The date you tried to schedule is too far in the future (more than one year)"""
ID = "SCHEDULE_DATE_TOO_LATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ScheduleStatusPrivate(BadRequest):
"""You cannot schedule a message until the person comes online if their privacy does not show this information"""
ID = "SCHEDULE_STATUS_PRIVATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ScheduleTooMuch(BadRequest):
"""You tried to schedule too many messages in this chat"""
ID = "SCHEDULE_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SearchQueryEmpty(BadRequest):
"""The search query is empty"""
ID = "SEARCH_QUERY_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SecondsInvalid(BadRequest):
"""The seconds interval is invalid"""
ID = "SECONDS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SendMessageMediaInvalid(BadRequest):
"""The message media is invalid"""
ID = "SEND_MESSAGE_MEDIA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SendMessageTypeInvalid(BadRequest):
"""The message type is invalid"""
ID = "SEND_MESSAGE_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SessionTooFresh(BadRequest):
"""You can't do this action because the current session was logged-in recently"""
ID = "SESSION_TOO_FRESH_X"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SettingsInvalid(BadRequest):
"""Invalid settings were provided"""
ID = "SETTINGS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class Sha256HashInvalid(BadRequest):
"""The provided SHA256 hash is invalid"""
ID = "SHA256_HASH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ShortnameOccupyFailed(BadRequest):
"""An error occurred when trying to register the short-name used for the sticker pack. Try a different name"""
ID = "SHORTNAME_OCCUPY_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SlowmodeMultiMsgsDisabled(BadRequest):
"""Slowmode is enabled, you cannot forward multiple messages to this group"""
ID = "SLOWMODE_MULTI_MSGS_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SmsCodeCreateFailed(BadRequest):
"""An error occurred while creating the SMS code"""
ID = "SMS_CODE_CREATE_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SrpIdInvalid(BadRequest):
"""Invalid SRP ID provided"""
ID = "SRP_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SrpPasswordChanged(BadRequest):
"""The password has changed"""
ID = "SRP_PASSWORD_CHANGED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StartParamEmpty(BadRequest):
"""The start parameter is empty"""
ID = "START_PARAM_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StartParamInvalid(BadRequest):
"""The start parameter is invalid"""
ID = "START_PARAM_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StartParamTooLong(BadRequest):
"""The start parameter is too long"""
ID = "START_PARAM_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickersetInvalid(BadRequest):
"""The requested sticker set is invalid"""
ID = "STICKERSET_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickersetNotModified(BadRequest):
"""The sticker set is not modified"""
ID = "STICKERSET_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickersEmpty(BadRequest):
"""The sticker provided is empty"""
ID = "STICKERS_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickersTooMuch(BadRequest):
"""Too many stickers in the set"""
ID = "STICKERS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerDocumentInvalid(BadRequest):
"""The sticker document is invalid"""
ID = "STICKER_DOCUMENT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerEmojiInvalid(BadRequest):
"""The sticker emoji is invalid"""
ID = "STICKER_EMOJI_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerFileInvalid(BadRequest):
"""The sticker file is invalid"""
ID = "STICKER_FILE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerIdInvalid(BadRequest):
"""The provided sticker id is invalid"""
ID = "STICKER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerInvalid(BadRequest):
"""The provided sticker is invalid"""
ID = "STICKER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerPngDimensions(BadRequest):
"""The sticker png dimensions are invalid"""
ID = "STICKER_PNG_DIMENSIONS"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerPngNopng(BadRequest):
"""Stickers must be png files but the provided image was not a png"""
ID = "STICKER_PNG_NOPNG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerTgsNotgs(BadRequest):
"""A tgs sticker file was expected, but something else was provided"""
ID = "STICKER_TGS_NOTGS"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerThumbPngNopng(BadRequest):
"""A png sticker thumbnail file was expected, but something else was provided"""
ID = "STICKER_THUMB_PNG_NOPNG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickerVideoNowebm(BadRequest):
"""A webm video file was expected, but something else was provided"""
ID = "STICKER_VIDEO_NOWEBM"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TakeoutInvalid(BadRequest):
"""The takeout id is invalid"""
ID = "TAKEOUT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TakeoutRequired(BadRequest):
"""The method must be invoked inside a takeout session"""
ID = "TAKEOUT_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TempAuthKeyEmpty(BadRequest):
"""The temporary auth key provided is empty"""
ID = "TEMP_AUTH_KEY_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ThemeFileInvalid(BadRequest):
"""Invalid theme file provided"""
ID = "THEME_FILE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ThemeFormatInvalid(BadRequest):
"""Invalid theme format provided"""
ID = "THEME_FORMAT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ThemeInvalid(BadRequest):
"""Invalid theme provided"""
ID = "THEME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ThemeMimeInvalid(BadRequest):
"""You cannot create this theme because the mime-type is invalid"""
ID = "THEME_MIME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TmpPasswordDisabled(BadRequest):
"""The temporary password is disabled"""
ID = "TMP_PASSWORD_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TmpPasswordInvalid(BadRequest):
"""The temporary password is invalid"""
ID = "TMP_PASSWORD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TokenInvalid(BadRequest):
"""The provided token is invalid"""
ID = "TOKEN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TtlDaysInvalid(BadRequest):
"""The provided TTL days is invalid"""
ID = "TTL_DAYS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TtlMediaInvalid(BadRequest):
"""The media does not support self-destruction"""
ID = "TTL_MEDIA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TypesEmpty(BadRequest):
"""The types parameter is empty"""
ID = "TYPES_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TypeConstructorInvalid(BadRequest):
"""The type constructor is invalid"""
ID = "TYPE_CONSTRUCTOR_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UntilDateInvalid(BadRequest):
"""That date parameter is invalid"""
ID = "UNTIL_DATE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UrlInvalid(BadRequest):
"""The URL provided is invalid"""
ID = "URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsageLimitInvalid(BadRequest):
"""The usage limit is invalid"""
ID = "USAGE_LIMIT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsernameInvalid(BadRequest):
"""The username is invalid"""
ID = "USERNAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsernameNotModified(BadRequest):
"""The username was not modified because you tried to edit it using the same one"""
ID = "USERNAME_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsernameNotOccupied(BadRequest):
"""The username is not occupied by anyone"""
ID = "USERNAME_NOT_OCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsernameOccupied(BadRequest):
"""The username is already in use by someone else"""
ID = "USERNAME_OCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserpicUploadRequired(BadRequest):
"""You are required to upload a profile picture for this action"""
ID = "USERPIC_UPLOAD_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsersTooFew(BadRequest):
"""Not enough users (to create a chat, for example)"""
ID = "USERS_TOO_FEW"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsersTooMuch(BadRequest):
"""The maximum number of users has been exceeded (to create a chat, for example)"""
ID = "USERS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserAdminInvalid(BadRequest):
"""The action requires admin privileges. Probably you tried to edit admin privileges on someone you don't have rights to"""
ID = "USER_ADMIN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserAlreadyParticipant(BadRequest):
"""The user is already a participant of this chat"""
ID = "USER_ALREADY_PARTICIPANT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserBannedInChannel(BadRequest):
"""You are limited from sending messages in supergroups/channels, check @SpamBot for details"""
ID = "USER_BANNED_IN_CHANNEL"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserBlocked(BadRequest):
"""The user is blocked"""
ID = "USER_BLOCKED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserBot(BadRequest):
"""Bots in channels can only be administrators, not members."""
ID = "USER_BOT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserBotInvalid(BadRequest):
"""This method can only be used by a bot"""
ID = "USER_BOT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserBotRequired(BadRequest):
"""The method can be used by bots only"""
ID = "USER_BOT_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserChannelsTooMuch(BadRequest):
"""The user is already in too many channels or supergroups"""
ID = "USER_CHANNELS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserCreator(BadRequest):
"""You can't leave this channel because you're its creator"""
ID = "USER_CREATOR"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserIdInvalid(BadRequest):
"""The user id being used is invalid or not known yet. Make sure you meet the user before interacting with it"""
ID = "USER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserInvalid(BadRequest):
"""The provided user is invalid"""
ID = "USER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserIsBlocked(BadRequest):
"""The user blocked you"""
ID = "USER_IS_BLOCKED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserIsBot(BadRequest):
"""A bot cannot send messages to other bots or to itself"""
ID = "USER_IS_BOT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserKicked(BadRequest):
"""This user was kicked from this chat"""
ID = "USER_KICKED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserNotMutualContact(BadRequest):
"""The user is not a mutual contact"""
ID = "USER_NOT_MUTUAL_CONTACT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserNotParticipant(BadRequest):
"""The user is not a member of this chat"""
ID = "USER_NOT_PARTICIPANT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class VideoContentTypeInvalid(BadRequest):
"""The video content type is invalid (i.e.: not streamable)"""
ID = "VIDEO_CONTENT_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class VideoFileInvalid(BadRequest):
"""The video file is invalid"""
ID = "VIDEO_FILE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class VoiceMessagesForbidden(BadRequest):
"""Voice messages are restricted"""
ID = "VOICE_MESSAGES_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class VolumeLocNotFound(BadRequest):
"""The volume location can't be found"""
ID = "VOLUME_LOC_NOT_FOUND"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WallpaperFileInvalid(BadRequest):
"""The provided file cannot be used as a wallpaper"""
ID = "WALLPAPER_FILE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WallpaperInvalid(BadRequest):
"""The input wallpaper was not valid"""
ID = "WALLPAPER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WallpaperMimeInvalid(BadRequest):
"""The wallpaper mime type is invalid"""
ID = "WALLPAPER_MIME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WcConvertUrlInvalid(BadRequest):
"""WC convert URL invalid"""
ID = "WC_CONVERT_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentInvalid(BadRequest):
"""The web document is invalid"""
ID = "WEBDOCUMENT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentMimeInvalid(BadRequest):
"""The web document mime type is invalid"""
ID = "WEBDOCUMENT_MIME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentSizeTooBig(BadRequest):
"""The web document is too big"""
ID = "WEBDOCUMENT_SIZE_TOO_BIG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentUrlEmpty(BadRequest):
"""The web document URL is empty"""
ID = "WEBDOCUMENT_URL_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentUrlInvalid(BadRequest):
"""The web document URL is invalid"""
ID = "WEBDOCUMENT_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebpageCurlFailed(BadRequest):
"""Telegram server could not fetch the provided URL"""
ID = "WEBPAGE_CURL_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebpageMediaEmpty(BadRequest):
"""The URL doesn't contain any valid media"""
ID = "WEBPAGE_MEDIA_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class YouBlockedUser(BadRequest):
"""You blocked this user"""
ID = "YOU_BLOCKED_USER"
"""``str``: RPC Error ID"""
MESSAGE = __doc__ | PypiClean |
/CLCR-1.0.0-py3-none-any.whl/CLCR_benchmarks/old_benchmark_one.py |
"""First benchmark for the CLCR program (determine average cutoff distance)"""
__author__ = "6947325: Johannes Zieres"
__credits__ = ""
__email__ = "johannes.zieres@gmail.com"
import datetime
import os
import random
import glob
import time
import matplotlib.pyplot as plt
def exclude_proteins_with_j(input_file_path):
"""
creates a new protein aminoacid fasta file, without proteins containing J (Leucine or Isoleucine), because EXONERATE
cant handle that at the database creation
:param input_file_path: file path of the original unmodified protein file
:return: None, the new file is created at the cwd of the program
"""
# Initialisiation
input_file = open(input_file_path)
new_lines = []
current_protein = []
append_bool = False
for line in input_file:
if line[0] != ">":
for y in line:
if y == "J":
append_bool = False
if append_bool:
current_protein.append(line)
else:
if append_bool:
new_lines += current_protein
current_protein = []
append_bool = True
current_protein.append(line)
# Appending the last remaining region if possible
if append_bool:
new_lines += current_protein
input_file.close()
# Creating new file without the proteins which are containing "J" (for Leucine or Isoleucine)
new_file_path = os.getcwd() + "/" + input_file_path.split("/")[-1]
print("new file at: ", new_file_path)
new_file = open(new_file_path, "w")
for new_line in new_lines:
new_file.write(new_line)
new_file.close()
return None
def create_database():
"""
Executes the command for creating the diamond database
:return: None
"""
diamond_command = "diamond makedb --in /share/project/johannes/bachelor/exonerate_vs_diamond_benchmark/NCBI_files" \
"/GCF_000001735.4_TAIR10.1_protein.faa -d /share/project/johannes/bachelor/exonerate_vs_diamond" \
"_benchmark/diamond_database/diamond_db"
os.system(diamond_command)
return None
def create_queries(cds_seq_file_path, query_quantity, max_cds_len):
"""
reads in the cds regions out of the cds file, afterwards shuffeling the region list and returning as many regions,
as set with the query_quantity threshold, not more than regions than given
:param cds_seq_file_path: file path to the cds file
:param max_cds_len: threshold, all CDS region with a lenght over the parameter are excluded, used at the
complete_prot_* function cases to push down the runntime
:param query_quantity: int to set how many of the given regions should be returned
:return: list containing the randomly choosen regions as sublists consiting of the header string and a list with all
bases(easily converted to string with "".join(list[X:Y]))
"""
# Reading in the queries out of the cds file
cds_regions = []
cds_file = open(cds_seq_file_path)
region_header = ""
current_cds = ["#####"]
print("--> read in cds file")
for line in cds_file:
if line[0] != "#": # To exclude comments
if line[0] != ">":
for base in line.strip(): # Filling up a new sequence
current_cds.append(base)
else:
if len(current_cds) <= max_cds_len:
cds_regions.append([region_header, current_cds])
# Reset list/ append new first argument
current_cds = []
region_header = line.strip()
# Appending the last remaining region if possible
if len(current_cds) <= max_cds_len:
cds_regions.append([region_header, current_cds])
cds_file.close()
# Remove the first initialising region
cds_regions.pop(0)
print("--> create cds queries")
# Choosing random regions
random.seed() # Initialising random number generator with system time
random.shuffle(cds_regions) # Shuffling the region list
cds_regions_output = [] # Containing the exon regions which will be modified
# Case if the input var is to big
if query_quantity > len(cds_regions):
query_quantity = len(cds_regions)
# Appending the choosen region to the output list
for i in range(query_quantity):
cds_regions_output.append(cds_regions[i])
return cds_regions_output
def database_comp_exonerate_del(cds_list, protein_file_path):
"""
(create frameshift with deletion version)
This functions determines the first nucleotide position in a query, where a inserted frameshift is detected by
Exonerate instead of "cutting" the alignment at this position. For this, CDS regions are used, and step by step
frameshifts are inserted into them, starting with a deletion of the start nucleotide and stopping with a deletion
of the first nucleotide, which is marked by exonerate as a frameshift.
:param cds_list: list containing the unmodified cds regions, which are later used as modified queries
:param protein_file_path: path to the protein file containing the amino acid sequences
:return: None, but an output file containing the results named exonerate_output is created
"""
# Reading in the protein sequences
# Reading in the queries out of the cds file
protein_sequences = []
protein_file = open(protein_file_path)
protein_header = ""
current_protein_seq = ["#####"]
print("--> read in protein file")
for line in protein_file:
if line[0] != "#": # To exclude comments
if line[0] != ">":
for amin_acid in line.strip(): # Filling up a new sequence
current_protein_seq.append(amin_acid)
else:
protein_sequences.append([protein_header, current_protein_seq])
# Reset list/ append new first argument
current_protein_seq = []
protein_header = line.strip()
# Appending the last remaining region if possible
protein_sequences.append([protein_header, current_protein_seq])
protein_file.close()
# Remove the first initialising region
protein_sequences.pop(0)
# Searching the fitting protein for each CDS
# Consisting of lists with the sublist with the protein data, and a matching sublist with the CDS data
combined_cds_protein_list = [] # [[cds, protein], ...]
print("--> search matching CDS-protein pairs")
# Searches the matching pairs and appends them into the combined list
for cds_region in cds_list:
cds_id = cds_region[0].split("protein_id=")[1].split("]")[0]
for protein in protein_sequences:
protein_id = protein[0].split()[0][1:]
if cds_id == protein_id:
combined_cds_protein_list.append([cds_region, protein])
break
# Initialising
list_length = len(combined_cds_protein_list)
process_count = 0
frameshift_detection_threshold_list = [] # Saves at what position a frameshift was detected in each query
print("--> start with exonerate runs")
# Run to detect the detection threshold at the beginning
for region_pair in combined_cds_protein_list:
# bad programmed process messages
if process_count == int(list_length * 0.1):
print("--> 10% finished")
elif process_count == int(list_length * 0.25):
print("--> 25% finished")
elif process_count == int(list_length * 0.5):
print("--> 50% finished")
elif process_count == int(list_length * 0.75):
print("--> 75% finished")
for position in range(40): # 40 as biggest position, to prevent unnecessary runs
# Create current CDS query
# Insert the frameshift on the current position as a deletion
mod_current_cds = ("".join(region_pair[0][1][:position])) + ("".join(region_pair[0][1][position+1:]))
# Create CDS query file, old data will be overwritten by the "w" parameter
new_cds_query = open("cds_query.fasta", "w")
new_cds_query.write(region_pair[0][0] + "\n") # Appending the CDS header
new_cds_query.write(mod_current_cds + "\n") # Appending the CDS nucleotide sequence
new_cds_query.close()
# Create current protein subject
mod_current_prot = region_pair[1][1]
mod_current_prot = "".join(mod_current_prot)
# Create protein query file, old data will be overwritten by the "w" parameter
new_prot_sbjct = open("prot_sbjct.fasta", "w")
new_prot_sbjct.write(region_pair[1][0] + "\n")
new_prot_sbjct.write(mod_current_prot + "\n")
new_prot_sbjct.close()
# Run exonerate with the current data, the python script waits till the shell command is finished
os.system("exonerate -m protein2dna --showvulgar true -Q protein -T dna --showalignment false --verbose 0 "
"-q prot_sbjct.fasta -t cds_query.fasta >exonerate_temp_output.txt")
# Reset the parameter
frameshift_detected = False
# Read in the exonerate output, file online consisting of one vulgar-format line
new_exonerate_output = open("exonerate_temp_output.txt")
for line in new_exonerate_output:
for char in line:
if char == "F": # vulgar format for frameshift detected
frameshift_detected = True
# Breaks the for loop if a frameshift is detected
if frameshift_detected:
break
# Appends the current position, where a frameshift was detected, if no frameshift was detected and the loop was
# Completely run, also the current last loop run position will be added (like upper threshold)
frameshift_detection_threshold_list.append(position)
process_count += 1 # Increase the progress count
# Saving the results in a file and calculating the mean
result_sum = 0
output_file = open("exonerate_output.txt", "w")
for x in frameshift_detection_threshold_list:
result_sum += x
output_file.write(str(x) + "\n")
output_file.close()
print("--> The mean threshold for exonerate frameshift detection is: ",
result_sum/len(frameshift_detection_threshold_list))
return None
def database_comp_exonerate_ins(cds_list, protein_file_path):
"""
(create frameshift with insertion version)
This functions determines the first nucleotide position in a query, where a inserted frameshift is detected by
Exonerate instead of "cutting" the alignment at this position. For this, CDS regions are used, and step by step
frameshifts are inserted into them, starting with a deletion of the start nucleotide and stopping with a insertion
of the first nucleotide, which is marked by exonerate as a frameshift.
:param cds_list: list containing the unmodified cds regions, which are later used as modified queries
:param protein_file_path: path to the protein file containing the amino acid sequences
:return: None, but an output file containing the results named exonerate_output is created
"""
# Reading in the protein sequences
# Reading in the queries out of the cds file
protein_sequences = []
protein_file = open(protein_file_path)
protein_header = ""
current_protein_seq = ["#####"]
print("--> read in protein file")
for line in protein_file:
if line[0] != "#": # To exclude comments
if line[0] != ">":
for amin_acid in line.strip(): # Filling up a new sequence
current_protein_seq.append(amin_acid)
else:
protein_sequences.append([protein_header, current_protein_seq])
# Reset list/ append new first argument
current_protein_seq = []
protein_header = line.strip()
# Appending the last remaining region if possible
protein_sequences.append([protein_header, current_protein_seq])
protein_file.close()
# Remove the first initialising region
protein_sequences.pop(0)
# Searching the fitting protein for each CDS
# Consisting of lists with the sublist with the protein data, and a matching sublist with the CDS data
combined_cds_protein_list = [] # [[cds, protein], ...]
print("--> search matching CDS-protein pairs")
# Searches the matching pairs and appends them into the combined list
for cds_region in cds_list:
cds_id = cds_region[0].split("protein_id=")[1].split("]")[0]
for protein in protein_sequences:
protein_id = protein[0].split()[0][1:]
if cds_id == protein_id:
combined_cds_protein_list.append([cds_region, protein])
break
# Initialising
list_length = len(combined_cds_protein_list)
process_count = 0
frameshift_detection_threshold_list = [] # Saves at what position a frameshift was detected in each query
base_list = ["A", "T", "G", "C"]
print("--> start with exonerate runs")
# Run to detect the detection threshold at the beginning
for region_pair in combined_cds_protein_list:
# bad programmed process messages
if process_count == int(list_length * 0.1):
print("--> 10% finished")
elif process_count == int(list_length * 0.25):
print("--> 25% finished")
elif process_count == int(list_length * 0.5):
print("--> 50% finished")
elif process_count == int(list_length * 0.75):
print("--> 75% finished")
for position in range(40): # 40 as biggest position, to prevent unnecessary runs
insertion_base = random.choice(base_list) # choose a random base
# Create current CDS query
# Insert the frameshift on the current position as a insertion
mod_current_cds = ("".join(region_pair[0][1][:position])) + insertion_base +\
("".join(region_pair[0][1][position:]))
# Create CDS query file, old data will be overwritten by the "w" parameter
new_cds_query = open("cds_query.fasta", "w")
new_cds_query.write(region_pair[0][0] + "\n") # Appending the CDS header
new_cds_query.write(mod_current_cds + "\n") # Appending the CDS nucleotide sequence
new_cds_query.close()
# Create current protein subject
mod_current_prot = region_pair[1][1]
mod_current_prot = "".join(mod_current_prot)
# Create protein query file, old data will be overwritten by the "w" parameter
new_prot_sbjct = open("prot_sbjct.fasta", "w")
new_prot_sbjct.write(region_pair[1][0] + "\n")
new_prot_sbjct.write(mod_current_prot + "\n")
new_prot_sbjct.close()
# Run exonerate with the current data, the python script waits till the shell command is finished
os.system("exonerate -m protein2dna --showvulgar true -Q protein -T dna --showalignment false --verbose 0 "
"-q prot_sbjct.fasta -t cds_query.fasta >exonerate_temp_output.txt")
# Reset the parameter
frameshift_detected = False
# Read in the exonerate output, file online consisting of one vulgar-format line
new_exonerate_output = open("exonerate_temp_output.txt")
for line in new_exonerate_output:
for char in line:
if char == "F": # vulgar format for frameshift detected
frameshift_detected = True
# Breaks the for loop if a frameshift is detected
if frameshift_detected:
break
# Appends the current position, where a frameshift was detected, if no frameshift was detected and the loop was
# Completely run, also the current last loop run position will be added (like upper threshold)
frameshift_detection_threshold_list.append(position)
process_count += 1 # Increase the progress count
# Saving the results in a file and calculating the mean
result_sum = 0
output_file = open("exonerate_output.txt", "w")
for x in frameshift_detection_threshold_list:
result_sum += x
output_file.write(str(x) + "\n")
output_file.close()
print("--> The mean threshold for exonerate frameshift detection is: ",
result_sum/len(frameshift_detection_threshold_list))
return None
def database_comp_diamond_del(query_list, protein_database):
"""
(create frameshift with deletion version)
Receives a query list, which is containing all CDS region, which are used for the benchmark, and the link to the
diamond database for the diamond blastx runs. The frameshifts are inserted as deletion like in the
database_comp_exonerate function.
:param query_list: List containing all used CDS regions for the benchmark
:param protein_database: path to the protein database for the diamond blastx runs
:return: None, but an output file containing the results named diamond_output is created
"""
# Initialising
list_length = len(query_list)
process_count = 0
frameshift_detection_threshold_list = [] # Saves at what position a frameshift was detected in each query
print("--> start with diamond runs")
# Run to detect the detection threshold at the beginning
for cds_region in query_list:
# bad programmed process messages
if process_count == int(list_length * 0.1):
print("--> 10% finished")
elif process_count == int(list_length * 0.25):
print("--> 25% finished")
elif process_count == int(list_length * 0.5):
print("--> 50% finished")
elif process_count == int(list_length * 0.75):
print("--> 75% finished")
for position in range(40): # 40 as biggest position, to prevent unnecessary runs
# Create current CDS query
# Insert the frameshift on the current position as a deletion
mod_current_cds = ("".join(cds_region[1][:position])) + ("".join(cds_region[1][position + 1:]))
# Create CDS query file, old data will be overwritten by the "w" parameter
new_cds_query = open("cds_query.fasta", "w")
new_cds_query.write(cds_region[0] + "\n") # Appending the CDS header
new_cds_query.write(mod_current_cds + "\n") # Appending the CDS nucleotide sequence
new_cds_query.close()
# Run diamond with the current data, the python script waits till the shell command is finished
os.system("diamond blastx -d " + protein_database + " -q cds_query.fasta -o diamond_temp_output.txt -k 1 "
"--quiet -F 15 -f 0 ")
# Reset the parameter
frameshift_detected = False
# Read in the exonerate output, file online consisting of one vulgar-format line
new_diamond_output = open("diamond_temp_output.txt")
for line in new_diamond_output:
for char in line.strip():
if len(line) > 5: # to exclude empty lines
if line[0:6] == "Query ": # case for alignment line
if (char == "/") or (char == "\\"): # frameshift detected
frameshift_detected = True
break
# Inner break if a frameshift is detected
if frameshift_detected:
break
# Breaks the for loop if a frameshift is detected
if frameshift_detected:
break
# Appends the current position, where a frameshift was detected, if no frameshift was detected and the loop was
# Completely run, also the current last loop run position will be added (like upper threshold)
frameshift_detection_threshold_list.append(position)
process_count += 1 # Increase the progress count
# Saving the results in a file and calculating the mean
result_sum = 0
output_file = open("diamond_output.txt", "w")
for x in frameshift_detection_threshold_list:
result_sum += x
output_file.write(str(x) + "\n")
output_file.close()
print("--> The mean threshold for diamond frameshift detection is: ",
result_sum / len(frameshift_detection_threshold_list))
return None
def database_comp_diamond_ins(query_list, protein_database):
"""
(create frameshift with insertion version)
Receives a query list, which is containing all CDS region, which are used for the benchmark, and the link to the
diamond database for the diamond blastx runs. The frameshifts are inserted as deletion like in the
database_comp_exonerate function.
:param query_list: List containing all used CDS regions for the benchmark
:param protein_database: path to the protein database for the diamond blastx runs
:return: None, but an output file containing the results named diamond_output is created
"""
# Initialising
list_length = len(query_list)
process_count = 0
frameshift_detection_threshold_list = [] # Saves at what position a frameshift was detected in each query
base_list = ["A", "T", "G", "C"]
print("--> start with diamond runs")
# Run to detect the detection threshold at the beginning
for cds_region in query_list:
# bad programmed process messages
if process_count == int(list_length * 0.1):
print("--> 10% finished")
elif process_count == int(list_length * 0.25):
print("--> 25% finished")
elif process_count == int(list_length * 0.5):
print("--> 50% finished")
elif process_count == int(list_length * 0.75):
print("--> 75% finished")
for position in range(40): # 40 as biggest position, to prevent unnecessary runs
insertion_base = random.choice(base_list) # choose a random base
# Create current CDS query
# Insert the frameshift on the current position as a insertion
mod_current_cds = ("".join(cds_region[1][:position])) + insertion_base + \
("".join(cds_region[1][position:]))
# Create CDS query file, old data will be overwritten by the "w" parameter
new_cds_query = open("cds_query.fasta", "w")
new_cds_query.write(cds_region[0] + "\n") # Appending the CDS header
new_cds_query.write(mod_current_cds + "\n") # Appending the CDS nucleotide sequence
new_cds_query.close()
# Run diamond with the current data, the python script waits till the shell command is finished
os.system("diamond blastx -d " + protein_database + " -q cds_query.fasta -o diamond_temp_output.txt -k 1 "
"--quiet -F 15 -f 0 ")
# Reset the parameter
frameshift_detected = False
# Read in the exonerate output, file online consisting of one vulgar-format line
new_diamond_output = open("diamond_temp_output.txt")
for line in new_diamond_output:
for char in line.strip():
if len(line) > 5: # to exclude empty lines
if line[0:6] == "Query ": # case for alignment line
if (char == "/") or (char == "\\"): # frameshift detected
frameshift_detected = True
break
# Inner break if a frameshift is detected
if frameshift_detected:
break
# Breaks the for loop if a frameshift is detected
if frameshift_detected:
break
# Appends the current position, where a frameshift was detected, if no frameshift was detected and the loop was
# Completely run, also the current last loop run position will be added (like upper threshold)
frameshift_detection_threshold_list.append(position)
process_count += 1 # Increase the progress count
# Saving the results in a file and calculating the mean
result_sum = 0
output_file = open("diamond_output.txt", "w")
for x in frameshift_detection_threshold_list:
result_sum += x
output_file.write(str(x) + "\n")
output_file.close()
print("--> The mean threshold for diamond frameshift detection is: ",
result_sum / len(frameshift_detection_threshold_list))
return None
def create_output_plots(input_file_path, picture_name):
"""
Creates the bar graphs, to visualize the benchmark results. All position are += 1 to convert the python list
positions to the real positions.
:param input_file_path: path to the output file of the database_comp_* functions.
:param picture_name: name for the output picture
:return: a png file with the bar graph
"""
# list containing all alignment cutoff positions of the input file
cutoff_list = []
# read in the input file
input_file = open(input_file_path)
# appends all cutoff to the list
for line in input_file:
if int(line.strip()) != 0: # to exclude the very view false detections
cutoff_list.append(int(line.strip()))
# count the amount of cutoff per position
count_cutoff_list = []
for cutoff in cutoff_list:
new_cutoff_position = True
for cutoff_position in count_cutoff_list:
if cutoff == cutoff_position[0]:
cutoff_position[1] += 1
new_cutoff_position = False
break
if new_cutoff_position:
count_cutoff_list.append([cutoff, 1])
input_file.close()
# sort the cutoff list ascending by their cutoff positions
count_cutoff_list = sorted(count_cutoff_list, key=lambda current_cuttoff: int(current_cuttoff[0]))
print(count_cutoff_list)
# x-coordinates of left sides of bars
cuttoff_positions = [x for x in range(1, len(count_cutoff_list)+1)]
# heights of bars
cuttoff_counts = [y[1] for y in count_cutoff_list]
# labels for bars
tick_label = [str(z[0]+1) for z in count_cutoff_list] # +1 to correct the python list positions to the real pos
# plotting a bar chart
plt.bar(cuttoff_positions, cuttoff_counts, tick_label=tick_label,
width=0.8, color=['green'])
# naming the x-axis
plt.xlabel('cutoff positions')
# naming the y-axis
plt.ylabel('cutoffs per position')
# plot title
plt.title('Diamond alignment cutoff distribution')
# function saves the plot
plt.savefig(picture_name + '.png')
return None
def complete_prot_exonerate_del(cds_list, protein_file_path):
"""
(create frameshift with deletion version)
This functions determines the frameshift detection rate in CDS regions by Exonerate, for this the first 50 and last
50 nucleotides are skipped, to exclude the alignment cuttoff problem.
:param cds_list: list containing the unmodified cds regions, which are later used as modified queries
:param protein_file_path: path to the protein file containing the amino acid sequences
:return: None, but an output file containing the results named exonerate_output is created
"""
# Reading in the protein sequences
# Reading in the queries out of the cds file
protein_sequences = []
protein_file = open(protein_file_path)
protein_header = ""
current_protein_seq = ["#####"]
print("--> read in protein file")
for line in protein_file:
if line[0] != "#": # To exclude comments
if line[0] != ">":
for amin_acid in line.strip(): # Filling up a new sequence
current_protein_seq.append(amin_acid)
else:
protein_sequences.append([protein_header, current_protein_seq])
# Reset list/ append new first argument
current_protein_seq = []
protein_header = line.strip()
# Appending the last remaining region if possible
protein_sequences.append([protein_header, current_protein_seq])
protein_file.close()
# Remove the first initialising region
protein_sequences.pop(0)
# Searching the fitting protein for each CDS
# Consisting of lists with the sublist with the protein data, and a matching sublist with the CDS data
combined_cds_protein_list = [] # [[cds, protein], ...]
print("--> search matching CDS-protein pairs")
# Searches the matching pairs and appends them into the combined list
for cds_region in cds_list:
cds_id = cds_region[0].split("protein_id=")[1].split("]")[0]
for protein in protein_sequences:
protein_id = protein[0].split()[0][1:]
if cds_id == protein_id:
combined_cds_protein_list.append([cds_region, protein])
break
# Initialising
list_length = len(combined_cds_protein_list)
process_count = 0
frameshift_detectionrate_list = [] # Saves the frameshift detection rate for each CDS
print("--> start with exonerate runs")
# Run to detect the detection threshold at the beginning
for region_pair in combined_cds_protein_list:
# bad programmed process messages
if process_count == int(list_length * 0.1):
print("--> 10% finished")
elif process_count == int(list_length * 0.25):
print("--> 25% finished")
elif process_count == int(list_length * 0.5):
print("--> 50% finished")
elif process_count == int(list_length * 0.75):
print("--> 75% finished")
# counts how many of the inserted frameshifts are detected
frameshift_detection_count = 0
# skipps the first and last 50 nucleotide positions to exclude the cuttoff problems
for position in range(50, (len(region_pair[0][1]) - 50)):
# Create current CDS query
# Insert the frameshift on the current position as a deletion
mod_current_cds = ("".join(region_pair[0][1][:position])) + ("".join(region_pair[0][1][position + 1:]))
# Create CDS query file, old data will be overwritten by the "w" parameter
new_cds_query = open("cds_query.fasta", "w")
new_cds_query.write(region_pair[0][0] + "\n") # Appending the CDS header
new_cds_query.write(mod_current_cds + "\n") # Appending the CDS nucleotide sequence
new_cds_query.close()
# Create current protein subject
mod_current_prot = region_pair[1][1]
mod_current_prot = "".join(mod_current_prot)
# Create protein query file, old data will be overwritten by the "w" parameter
new_prot_sbjct = open("prot_sbjct.fasta", "w")
new_prot_sbjct.write(region_pair[1][0] + "\n")
new_prot_sbjct.write(mod_current_prot + "\n")
new_prot_sbjct.close()
# Run exonerate with the current data, the python script waits till the shell command is finished
os.system("exonerate -m protein2dna --showvulgar true -Q protein -T dna --showalignment false --verbose 0 "
"-q prot_sbjct.fasta -t cds_query.fasta >exonerate_temp_output.txt")
# Read in the exonerate output, file online consisting of one vulgar-format line
new_exonerate_output = open("exonerate_temp_output.txt")
for line in new_exonerate_output:
for elem in line:
if elem == "F": # vulgar format for frameshift detected
frameshift_detection_count += 1
break
break
detection_rate = frameshift_detection_count / (len(region_pair[0][1]) - 100) # calculate the detection rate
# Appends the detection rate of the protein, which means how many of the inserted frameshift are detected
frameshift_detectionrate_list.append(detection_rate)
process_count += 1 # Increase the progress count
# Saving the results in a file and calculating the mean
mean_detection_percentage = 0
output_file = open("exonerate_output.txt", "w")
for x in frameshift_detectionrate_list:
mean_detection_percentage += x
output_file.write(str(x) + "\n")
output_file.close()
print("--> The average mean detection rate is: ",
mean_detection_percentage / len(frameshift_detectionrate_list))
print("--> The average mean detection percentage is: ",
(mean_detection_percentage / len(frameshift_detectionrate_list)) * 100, "%")
return None
def complete_prot_exonerate_ins(cds_list, protein_file_path):
"""
(create frameshift with insertion version)
This functions determines the frameshift detection rate in CDS regions by Exonerate, for this the first 50 and last
50 nucleotides are skipped, to exclude the alignment cuttoff problem.
:param cds_list: list containing the unmodified cds regions, which are later used as modified queries
:param protein_file_path: path to the protein file containing the amino acid sequences
:return: None, but an output file containing the results named exonerate_output is created
"""
# Reading in the protein sequences
# Reading in the queries out of the cds file
protein_sequences = []
protein_file = open(protein_file_path)
protein_header = ""
current_protein_seq = ["#####"]
print("--> read in protein file")
for line in protein_file:
if line[0] != "#": # To exclude comments
if line[0] != ">":
for amin_acid in line.strip(): # Filling up a new sequence
current_protein_seq.append(amin_acid)
else:
protein_sequences.append([protein_header, current_protein_seq])
# Reset list/ append new first argument
current_protein_seq = []
protein_header = line.strip()
# Appending the last remaining region if possible
protein_sequences.append([protein_header, current_protein_seq])
protein_file.close()
# Remove the first initialising region
protein_sequences.pop(0)
# Searching the fitting protein for each CDS
# Consisting of lists with the sublist with the protein data, and a matching sublist with the CDS data
combined_cds_protein_list = [] # [[cds, protein], ...]
print("--> search matching CDS-protein pairs")
# Searches the matching pairs and appends them into the combined list
for cds_region in cds_list:
cds_id = cds_region[0].split("protein_id=")[1].split("]")[0]
for protein in protein_sequences:
protein_id = protein[0].split()[0][1:]
if cds_id == protein_id:
combined_cds_protein_list.append([cds_region, protein])
break
# Initialising
list_length = len(combined_cds_protein_list)
process_count = 0
frameshift_detectionrate_list = [] # Saves at what position a frameshift was detected in each query
base_list = ["A", "T", "G", "C"]
print("--> start with exonerate runs")
# Run to detect the detection threshold at the beginning
for region_pair in combined_cds_protein_list:
# bad programmed process messages
if process_count == int(list_length * 0.1):
print("--> 10% finished")
elif process_count == int(list_length * 0.25):
print("--> 25% finished")
elif process_count == int(list_length * 0.5):
print("--> 50% finished")
elif process_count == int(list_length * 0.75):
print("--> 75% finished")
# counts how many of the inserted frameshifts are detected
frameshift_detection_count = 0
# excludes the first and last 50 nucleotide positions to exclude the cuttoff problems
for position in range(50, (len(region_pair[0][1]) - 50)):
insertion_base = random.choice(base_list) # choose a random base
# Create current CDS query
# Insert the frameshift on the current position as a insertion
mod_current_cds = ("".join(region_pair[0][1][:position])) + insertion_base + \
("".join(region_pair[0][1][position:]))
# Create CDS query file, old data will be overwritten by the "w" parameter
new_cds_query = open("cds_query.fasta", "w")
new_cds_query.write(region_pair[0][0] + "\n") # Appending the CDS header
new_cds_query.write(mod_current_cds + "\n") # Appending the CDS nucleotide sequence
new_cds_query.close()
# Create current protein subject
mod_current_prot = region_pair[1][1]
mod_current_prot = "".join(mod_current_prot)
# Create protein query file, old data will be overwritten by the "w" parameter
new_prot_sbjct = open("prot_sbjct.fasta", "w")
new_prot_sbjct.write(region_pair[1][0] + "\n")
new_prot_sbjct.write(mod_current_prot + "\n")
new_prot_sbjct.close()
# Run exonerate with the current data, the python script waits till the shell command is finished
os.system("exonerate -m protein2dna --showvulgar true -Q protein -T dna --showalignment false --verbose 0 "
"-q prot_sbjct.fasta -t cds_query.fasta >exonerate_temp_output.txt")
# Read in the exonerate output, file online consisting of one vulgar-format line
new_exonerate_output = open("exonerate_temp_output.txt")
for line in new_exonerate_output:
for elem in line:
if elem == "F": # vulgar format for frameshift detected
frameshift_detection_count += 1
break
break
detection_rate = frameshift_detection_count / (len(region_pair[0][1]) - 100) # calculate the detection rate
# Appends the detection rate of the protein, which means how many of the inserted frameshift are detected
frameshift_detectionrate_list.append(detection_rate)
process_count += 1 # Increase the progress count
# Saving the results in a file and calculating the mean
mean_detection_percentage = 0
output_file = open("exonerate_output.txt", "w")
for x in frameshift_detectionrate_list:
mean_detection_percentage += x
output_file.write(str(x) + "\n")
output_file.close()
print("--> The average mean detection rate is: ",
mean_detection_percentage / len(frameshift_detectionrate_list))
print("--> The average mean detection percentage is: ",
(mean_detection_percentage / len(frameshift_detectionrate_list)) * 100, "%")
return None
def complete_prot_diamond_del(query_list, protein_database):
"""
(create frameshift with deletion version)
This functions determines the frameshift detection rate in CDS regions by Diamond, for this the first 50 and last
50 nucleotides are skipped, to exclude the alignment cuttoff problem.
:param query_list: List containing all used CDS regions for the benchmark
:param protein_database: path to the protein database for the diamond blastx runs
:return: None, but an output file containing the results named diamond_output is created
"""
# Initialising
list_length = len(query_list)
process_count = 0
frameshift_detectionrate_list = [] # Saves the frameshift detection rate for each CDS
print("--> start with diamond runs")
# Run to detect the detection threshold at the beginning
for cds_region in query_list:
# bad programmed process messages
if process_count == int(list_length * 0.1):
print("--> 10% finished")
elif process_count == int(list_length * 0.25):
print("--> 25% finished")
elif process_count == int(list_length * 0.5):
print("--> 50% finished")
elif process_count == int(list_length * 0.75):
print("--> 75% finished")
# counts how many of the inserted frameshifts are detected
frameshift_detection_count = 0
# skipps the first and last 50 nucleotide positions to exclude the cuttoff problems
for position in range(50, (len(cds_region[1]) - 50)):
# Create current CDS query
# Insert the frameshift on the current position as a deletion
mod_current_cds = ("".join(cds_region[1][:position])) + ("".join(cds_region[1][position + 1:]))
# Create CDS query file, old data will be overwritten by the "w" parameter
new_cds_query = open("cds_query.fasta", "w")
new_cds_query.write(cds_region[0] + "\n") # Appending the CDS header
new_cds_query.write(mod_current_cds + "\n") # Appending the CDS nucleotide sequence
new_cds_query.close()
# Run diamond with the current data, the python script waits till the shell command is finished
os.system("diamond blastx -d " + protein_database + " -q cds_query.fasta -o diamond_temp_output.txt -k 1 "
"--quiet -F 15 -f 0 ")
# Reset the parameter
frameshift_detected = False
# Read in the exonerate output, file online consisting of one vulgar-format line
new_diamond_output = open("diamond_temp_output.txt")
for line in new_diamond_output:
for char in line.strip():
if len(line) > 5: # to exclude empty lines
if line[0:6] == "Query ": # case for alignment line
if (char == "/") or (char == "\\"): # frameshift detected
frameshift_detected = True
frameshift_detection_count += 1
break
# Inner break if a frameshift is detected
if frameshift_detected:
break
detection_rate = frameshift_detection_count / (len(cds_region[1]) - 100) # calculate the detection rate
# Appends the detection rate of the protein, which means how many of the inserted frameshift are detected
frameshift_detectionrate_list.append(detection_rate)
process_count += 1 # Increase the progress count
# Saving the results in a file and calculating the mean
mean_detection_percentage = 0
output_file = open("diamond_output.txt", "w")
for x in frameshift_detectionrate_list:
mean_detection_percentage += x
output_file.write(str(x) + "\n")
output_file.close()
print("--> The average mean detection rate is: ",
mean_detection_percentage / len(frameshift_detectionrate_list))
print("--> The average mean detection percentage is: ",
(mean_detection_percentage / len(frameshift_detectionrate_list)) * 100, "%")
return None
def complete_prot_diamond_ins(query_list, protein_database):
"""
(create frameshift with insertion version)
This functions determines the frameshift detection rate in CDS regions by Diamond, for this the first 50 and last
50 nucleotides are skipped, to exclude the alignment cuttoff problem.
:param query_list: List containing all used CDS regions for the benchmark
:param protein_database: path to the protein database for the diamond blastx runs
:return: None, but an output file containing the results named diamond_output is created
"""
# Initialising
list_length = len(query_list)
process_count = 0
frameshift_detectionrate_list = [] # Saves the frameshift detection rate for each CDS
base_list = ["A", "T", "G", "C"]
print("--> start with diamond runs")
# Run to detect the detection threshold at the beginning
for cds_region in query_list:
# bad programmed process messages
if process_count == int(list_length * 0.1):
print("--> 10% finished")
elif process_count == int(list_length * 0.25):
print("--> 25% finished")
elif process_count == int(list_length * 0.5):
print("--> 50% finished")
elif process_count == int(list_length * 0.75):
print("--> 75% finished")
# counts how many of the inserted frameshifts are detected
frameshift_detection_count = 0
# skipps the first and last 50 nucleotide positions to exclude the cuttoff problems
for position in range(50, (len(cds_region[1]) - 50)):
insertion_base = random.choice(base_list) # choose a random base
# Create current CDS query
# Insert the frameshift on the current position as a insertion
mod_current_cds = ("".join(cds_region[1][:position])) + insertion_base + \
("".join(cds_region[1][position:]))
# Create CDS query file, old data will be overwritten by the "w" parameter
new_cds_query = open("cds_query.fasta", "w")
new_cds_query.write(cds_region[0] + "\n") # Appending the CDS header
new_cds_query.write(mod_current_cds + "\n") # Appending the CDS nucleotide sequence
new_cds_query.close()
# Run diamond with the current data, the python script waits till the shell command is finished
os.system("diamond blastx -d " + protein_database + " -q cds_query.fasta -o diamond_temp_output.txt -k 1 "
"--quiet -F 15 -f 0 ")
# Reset the parameter
frameshift_detected = False
# Read in the exonerate output, file online consisting of one vulgar-format line
new_diamond_output = open("diamond_temp_output.txt")
for line in new_diamond_output:
for char in line.strip():
if len(line) > 5: # to exclude empty lines
if line[0:6] == "Query ": # case for alignment line
if (char == "/") or (char == "\\"): # frameshift detected
frameshift_detected = True
frameshift_detection_count += 1
break
# Inner break if a frameshift is detected
if frameshift_detected:
break
detection_rate = frameshift_detection_count / (len(cds_region[1]) - 100) # calculate the detection rate
# Appends the detection rate of the protein, which means how many of the inserted frameshift are detected
frameshift_detectionrate_list.append(detection_rate)
process_count += 1 # Increase the progress count
# Saving the results in a file and calculating the mean
mean_detection_percentage = 0
output_file = open("diamond_output.txt", "w")
for x in frameshift_detectionrate_list:
mean_detection_percentage += x
output_file.write(str(x) + "\n")
output_file.close()
print("--> The average mean detection rate is: ",
mean_detection_percentage / len(frameshift_detectionrate_list))
print("--> The average mean detection percentage is: ",
(mean_detection_percentage / len(frameshift_detectionrate_list)) * 100, "%")
return None
def main():
print("benchOne MAIN FUNCTION called")
"""start_time = time.time()
cds_file_path = "/share/project/johannes/bachelor/exonerate_vs_diamond_benchmark/NCBI_files/" \
"GCF_000001735.4_TAIR10.1_cds_from_genomic.fna"
queries = create_queries(cds_file_path, 100, 800)
protein_file_path = "/share/project/johannes/bachelor/exonerate_vs_diamond_benchmark/NCBI_files/modified_file/" \
"GCF_000001735.4_TAIR10.1_protein_mod.faa"
#database_comp_exonerate_ins(queries, protein_file_path)
complete_prot_exonerate_ins(queries, protein_file_path)
stop_time = time.time()
print("runtime: ", stop_time-start_time, " seconds")"""
"""start_time = time.time()
#prot_database = "/share/project/johannes/bachelor/exonerate_vs_diamond_benchmark/diamond_database/diamond_db.dmnd"
prot_database = "/home/johannes/Desktop/diamond_database/diamond_db.dmnd"
cds_file_path = "/share/project/johannes/bachelor/exonerate_vs_diamond_benchmark/NCBI_files/" \
"GCF_000001735.4_TAIR10.1_cds_from_genomic.fna"
queries = create_queries(cds_file_path, 50, 800)
#database_comp_diamond_ins(queries, prot_database)
complete_prot_diamond_ins(queries, prot_database)
stop_time = time.time()
print("runtime: ", stop_time - start_time, " seconds")"""
#input_file_1 = "/share/project/johannes/bachelor/exonerate_vs_diamond_benchmark/exonerate_del_run/exonerate_output.txt"
#create_output_plots(input_file_1, "counts_exonerate_del")
#input_file_2 = "/share/project/johannes/bachelor/exonerate_vs_diamond_benchmark/diamond_ins_run/diamond_output.txt"
#create_output_plots(input_file_2, "counts_diamond_ins")
if __name__ == '__main__':
main() | PypiClean |
/Lagranto-0.3.1.tar.gz/Lagranto-0.3.1/docs/lagranto.rst | .. _lagranto-package:
Basic examples
--------------
In a first step, let's simply read the trajectories::
>>> from lagranto import Tra
>>> filename = 'lsl_20110123_10'
>>> trajs = Tra()
>>> trajs.load_ascii(filename)
or to read a netcdf file::
>>> filename = 'lsl_20110123_10.4'
>>> trajs.load_netcdf(filename)
The proprieties of the trajectories can be shown as follow::
>>> print(trajs)
24 trajectories with 41 time steps.
Available fields: time/lon/lat/p/Q/RH/TH/BLH
total duration: -14400.0 minutes
>>> print(trajs.variables())
['time', 'lon', 'lat', 'p', 'Q', 'RH', 'TH', 'BLH']
>>> print(trajs['Q'].shape)
(24, 41)
DocStrings
----------
Tra
~~~
.. autoclass:: lagranto.Tra
:members:
LagrantoRun
~~~~~~~~~~~
.. autoclass:: lagranto.LagrantoRun
:members:
| PypiClean |
/GearMess_server-0.1.1-py3-none-any.whl/server_src/server.py | from socket import socket, AF_INET, SOCK_STREAM, timeout
from os import urandom
from queue import Queue
from threading import Thread
import sys
from server_src.handlers import StorageHandler
from server_src.models import session
from server_src.JIM.JIMs import Jim, MessageConverter
from server_src.JIM.jim_config import *
from server_src.crypto.crypto import *
class Server:
""" Simple echo-server_src. Making socket. Forwarding messages from clients to every-listening.
Takes ADDRESS and PORT to listen from on initialisation as required.
Takes time_out=0.2, buf_size=2048, max_clients=15, code_format='UTF-8' as not required"""
def __init__(self, address, port_, time_out=0.2, buf_size=2048, max_clients=15, code_format='UTF-8'):
""" :param address: address to listen from, :type: string, example '192.168.1.1'
:param port_: port to listen from, :type: int, example 7777
"""
self.addr = address, port_
self.soc = None
# через гуй добавить возможность отключать
self._is_alive = False
self.chats_messages = Queue()
self.addressed_messages = Queue()
self.connected_clients = []
self.authorised_clients = []
self.online_users = {}
self.converter = MessageConverter()
self.handler = ServerHandler(self.connected_clients, self.authorised_clients, self.online_users,
self.addressed_messages, self.chats_messages)
self._timeout = time_out
self._buf_size = buf_size
self._clients_count = max_clients
self._code_format = code_format
self.addressed_messages_thread = Thread(target=self.send_addressed_messages)
self.chat_messages_thread = Thread(target=self.send_chat_messages)
def listen(self):
""" To make socket using initialised parameters"""
self.soc = socket(AF_INET, SOCK_STREAM)
self.soc.bind(self.addr)
self.soc.settimeout(self._timeout)
self.soc.listen(self._clients_count)
def send_chat_messages(self):
""" Method to use in thread to send chat-messages. """
while self._is_alive:
try:
message = self.chats_messages.get()
except:
pass
else:
bmessage = self.converter(message)
for user in self.online_users.values():
user.send(bmessage)
def send_addressed_messages(self):
""" To send addressed messages, tacking message from queue and sending it if user is on-line,
if not - puts it back to queue"""
while self._is_alive:
try:
message = self.addressed_messages.get()
except:
pass
else:
recipient = message[TO]
conn = self.online_users.get(recipient)
if conn:
message_ = self.converter(message)
conn.send(message_)
else:
# TODO: в базу и ставить пометку не доставлено а при коннекте проверять есть ли не доставленные
# а то потеряются если сервер упадет
self.addressed_messages.put(message)
def _accept(self):
""" Handle every connected user. """
try:
conn, addr = self.soc.accept()
except OSError:
pass
else:
self.connected_clients.append(conn) # а оно теперь нужно?
Thread(target=self.handle_conn, args=(conn,)).start()
def handle_conn(self, conn):
"""
Using in thread to receive every request from certain user and initiate handling of each.
:param conn: - socket of connected user :type: socket.
"""
while conn in self.connected_clients:
try:
message = conn.recv(self._buf_size)
except timeout:
pass
else:
if message:
self.handler.handle(message, conn)
def run(self):
""" To start server working. """
self._is_alive = True
self.listen()
self.addressed_messages_thread.start()
self.chat_messages_thread.start()
while True:
self._accept()
class ServerHandler:
def __init__(self, connected_clients, authorised_clients, online_users, addressed_messages, chat_messages):
self.connected_clients = connected_clients
self.authorised_clients = authorised_clients
self.online_users = online_users
self.chats_messages = chat_messages
self.addressed_messages = addressed_messages
self.storage_handler = StorageHandler(session)
self.converter = MessageConverter()
self.responder = Jim()
def authorise(self, user, conn):
word = b64encode(urandom(32)).decode('utf-8')
request = self.responder.create(response=OK, alert=word)
request = self.converter(request)
conn.send(request)
cl_answ = conn.recv(1024)
cl_answ = self.converter(cl_answ)
answ = cl_answ[ANSWER]
key = self.storage_handler.get_password(user)
if key and check_word(key, answ, word):
resp_ = self.responder.create(response=OK, alert='authorised')
self.authorised_clients.append(conn)
else:
resp_ = self.responder.create(response=WRONG_LOGIN_INFO, alert=WRONG_LOGIN_OR_PASSWORD)
return resp_
def registration(self, user, password, conn):
resp, alert = self.storage_handler.registration(user, password)
resp_ = self.responder.create(response=resp, alert=alert)
self.authorised_clients.append(conn)
return resp_
def check_authorisation(self, conn):
if conn in self.authorised_clients:
return True
else:
resp = self.responder.create(response=NOT_AUTHORISED)
resp = self.converter(resp)
conn.send(resp)
# time.sleep(0.2)
conn.close()
return False
def get_contacts(self, user):
quantity = self.storage_handler.count_contacts(user)
contact_list = self.storage_handler.get_contacts(user)
resp_ = self.responder.create(action=CONTACT_LIST, quantity=quantity, contact_list=contact_list)
return resp_
def add_contact(self, user, contact):
resp, alert = self.storage_handler.add_contact(user, contact)
resp_ = self.responder.create(response=resp, alert=alert)
return resp_
def del_contact(self, user, contact):
resp, alert = self.storage_handler.del_contact(user, contact)
resp_ = self.responder.create(response=resp, alert=alert)
return resp_
def presence(self, user, conn):
self.storage_handler.presence(user, conn.getsockname()[0])
self.online_users[user] = conn
def unknown_action(self):
resp = self.responder.create(response=WRONG_REQUEST, alert=UNKNOWN_ACTION)
return resp
def income_message(self, message):
if message[TO].startswith('#'):
self.chats_messages.put(message)
else:
self.addressed_messages.put(message)
def quit(self, user):
conn_ = self.online_users.pop(user)
self.connected_clients.remove(conn_)
self.authorised_clients.remove(conn_)
conn_.close()
# сюда проверку авторизации?
def handle(self, message, conn):
message = self.converter(message)
action = message.get(ACTION)
resp = None
if action != AUTHORISE and action != REGISTER:
self.check_authorisation(conn)
if action == MSG:
self.income_message(message)
elif action == PRESENCE:
self.presence(message[USER], conn)
elif action == GET_CONTACTS:
resp = self.get_contacts(message[USER])
elif action == ADD_CONTACT:
resp = self.add_contact(message[USER], message[CONTACT])
elif action == DEL_CONTACT:
resp = self.del_contact(message[USER], message[CONTACT])
elif action == AUTHORISE:
resp = self.authorise(message[USER], conn)
elif action == REGISTER:
resp = self.registration(message[USER], message[PASSWORD], conn)
elif action == QUIT:
self.quit(message[USER])
else:
resp = self.unknown_action()
if resp:
resp_ = self.converter(resp)
conn.send(resp_)
def start_server():
try:
addr = sys.argv[1]
except IndexError:
addr = ''
try:
port = int(sys.argv[2])
except IndexError:
port = 7777
except ValueError:
print('Порт должен быть целым числом')
sys.exit(0)
server = Server(addr, port)
server.run()
if __name__ == '__main__':
server = Server('', 7777)
server.run() | PypiClean |
/HiCExplorer-2.2.1.1-py3-none-any.whl/hicexplorer/hicAdjustMatrix.py | from __future__ import division
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import argparse
from hicmatrix import HiCMatrix as hm
from hicexplorer._version import __version__
from hicmatrix.HiCMatrix import check_cooler
import numpy as np
import logging
log = logging.getLogger(__name__)
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
description="""
""")
parserRequired = parser.add_argument_group('Required arguments')
parserRequired.add_argument('--matrix', '-m',
help='The matrix to adjust. '
'HiCExplorer supports the following file formats: h5 (native HiCExplorer format) '
'and cool.',
required=True)
parserRequired.add_argument('--outFileName', '-o',
help='File name to save the adjusted matrix.',
required=True)
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--chromosomes', '-c',
nargs='+',
help='List of chromosomes to keep / remove')
parserOpt.add_argument('--action',
help='Keep, remove or mask the list of specified chromosomes / regions ',
default='keep',
choices=['keep', 'remove', 'mask']
)
parserOpt.add_argument('--regions', '-r',
help='BED file which stores a list of regions to keep / remove')
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
if args.chromosomes is not None and args.regions is not None:
log.error('Please specify either --chromosomes or --regions.')
exit(1)
hic_ma = None
if args.chromosomes:
if check_cooler(args.matrix) and len(args.chromosomes) == 1 and args.action == 'keep':
hic_ma = hm.hiCMatrix(args.matrix, pChrnameList=args.chromosomes)
else:
hic_ma = hm.hiCMatrix(args.matrix)
if args.action == 'keep':
hic_ma.reorderChromosomes(args.chromosomes)
elif args.action == 'remove':
chromosomes = list(hic_ma.chrBinBoundaries)
for chromosome in args.chromosomes:
if chromosome in chromosomes:
chromosomes.remove(chromosome)
hic_ma.reorderChromosomes(chromosomes)
elif args.action == 'mask':
hic_ma.maskChromosomes(args.chromosomes)
elif args.regions:
hic_ma = hm.hiCMatrix(args.matrix)
genomic_regions = []
with open(args.regions, 'r') as file:
for line in file.readlines():
_line = line.strip().split('\t')
if len(line) == 0:
continue
if len(_line) == 3:
chrom, start, end = _line[0], _line[1], int(_line[2]) - 1
genomic_regions.append((chrom, start, end))
# log.debug('genomic_regions {}'.format(genomic_regions))
matrix_indices_regions = []
for region in genomic_regions:
_regionBinRange = hic_ma.getRegionBinRange(region[0], region[1], region[2])
if _regionBinRange is not None:
start, end = _regionBinRange
matrix_indices_regions.extend(list(range(start, end)))
# log.debug('matrix_indices_regions {}'.format(matrix_indices_regions))
if args.action == 'keep':
hic_ma.reorderBins(matrix_indices_regions)
elif args.action == 'mask':
hic_ma.maskBins(matrix_indices_regions)
elif args.action == 'remove':
full_matrix_range = np.array(range(0, max(hic_ma.matrix.shape[0], hic_ma.matrix.shape[1])))
matrix_indices_regions = np.array(matrix_indices_regions)
full_matrix_range[matrix_indices_regions] = -1
mask = full_matrix_range != -1
full_matrix_range = full_matrix_range[mask]
hic_ma.reorderBins(full_matrix_range)
else:
log.info('No data to adjust given. Please specify either --chromosomes or --region parameter.')
if hic_ma is not None:
hic_ma.save(args.outFileName) | PypiClean |
/AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_tensorflow/private_layers.py | from typing import Union
from ai4water.backend import tf
layers = tf.keras.layers
Dense = tf.keras.layers.Dense
Layer = tf.keras.layers.Layer
activations = tf.keras.activations
K = tf.keras.backend
constraints = tf.keras.constraints
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
from tensorflow.python.ops import array_ops
from .attention_layers import ChannelAttention, SpatialAttention, regularized_padded_conv
def _get_tensor_shape(t):
return t.shape
class ConditionalRNN(tf.keras.layers.Layer):
# Arguments to the RNN like return_sequences, return_state...
def __init__(self, units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
dropout=0.0,
recurrent_dropout=0.0,
kernel_regularizer=None,
recurrent_regularizer=None,
cell=tf.keras.layers.LSTMCell, *args,
**kwargs):
"""
Conditional RNN. Conditions time series on categorical data.
:param units: int, The number of units in the RNN Cell
:param cell: string, cell class or object (pre-instantiated). In the case of string, 'GRU',
'LSTM' and 'RNN' are supported.
:param args: Any parameters of the tf.keras.layers.RNN class, such as return_sequences,
return_state, stateful, unroll...
"""
super().__init__()
self.units = units
self.final_states = None
self.init_state = None
if isinstance(cell, str):
if cell.upper() == 'GRU':
cell = tf.keras.layers.GRUCell
elif cell.upper() == 'LSTM':
cell = tf.keras.layers.LSTMCell
elif cell.upper() == 'RNN':
cell = tf.keras.layers.SimpleRNNCell
else:
raise Exception('Only GRU, LSTM and RNN are supported as cells.')
self._cell = cell if hasattr(cell, 'units') else cell(units=units,
activation=activation,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
recurrent_activation=recurrent_activation,
kernel_initializer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
use_bias=use_bias
)
self.rnn = tf.keras.layers.RNN(cell=self._cell, *args, **kwargs)
# single cond
self.cond_to_init_state_dense_1 = tf.keras.layers.Dense(units=self.units)
# multi cond
max_num_conditions = 10
self.multi_cond_to_init_state_dense = []
for _ in range(max_num_conditions):
self.multi_cond_to_init_state_dense.append(tf.keras.layers.Dense(units=self.units))
self.multi_cond_p = tf.keras.layers.Dense(1, activation=None, use_bias=True)
def _standardize_condition(self, initial_cond):
initial_cond_shape = initial_cond.shape
if len(initial_cond_shape) == 2:
initial_cond = tf.expand_dims(initial_cond, axis=0)
first_cond_dim = initial_cond.shape[0]
if isinstance(self._cell, tf.keras.layers.LSTMCell):
if first_cond_dim == 1:
initial_cond = tf.tile(initial_cond, [2, 1, 1])
elif first_cond_dim != 2:
raise Exception('Initial cond should have shape: [2, batch_size, hidden_size] '
'or [batch_size, hidden_size]. Shapes do not match.', initial_cond_shape)
elif isinstance(self._cell, tf.keras.layers.GRUCell) or isinstance(self._cell, tf.keras.layers.SimpleRNNCell):
if first_cond_dim != 1:
raise Exception('Initial cond should have shape: [1, batch_size, hidden_size] '
'or [batch_size, hidden_size]. Shapes do not match.', initial_cond_shape)
else:
raise Exception('Only GRU, LSTM and RNN are supported as cells.')
return initial_cond
def __call__(self, inputs, *args, **kwargs):
"""
:param inputs: List of n elements:
- [0] 3-D Tensor with shape [batch_size, time_steps, input_dim]. The inputs.
- [1:] list of tensors with shape [batch_size, cond_dim]. The conditions.
In the case of a list, the tensors can have a different cond_dim.
:return: outputs, states or outputs (if return_state=False)
"""
assert (isinstance(inputs, list) or isinstance(inputs, tuple)) and len(inputs) >= 2, f"{type(inputs)}"
x = inputs[0]
cond = inputs[1:]
if len(cond) > 1: # multiple conditions.
init_state_list = []
for ii, c in enumerate(cond):
init_state_list.append(self.multi_cond_to_init_state_dense[ii](self._standardize_condition(c)))
multi_cond_state = self.multi_cond_p(tf.stack(init_state_list, axis=-1))
multi_cond_state = tf.squeeze(multi_cond_state, axis=-1)
self.init_state = tf.unstack(multi_cond_state, axis=0)
else:
cond = self._standardize_condition(cond[0])
if cond is not None:
self.init_state = self.cond_to_init_state_dense_1(cond)
self.init_state = tf.unstack(self.init_state, axis=0)
out = self.rnn(x, initial_state=self.init_state, *args, **kwargs)
if self.rnn.return_state:
outputs, h, c = out
final_states = tf.stack([h, c])
return outputs, final_states
else:
return out
class BasicBlock(layers.Layer):
"""
The official implementation is at https://github.com/Jongchan/attention-module/blob/master/MODELS/cbam.py
The implementation of [1] does not have two conv and bn paris. They just applied channel attention followed by
spatial attention on inputs.
[1] https://github.com/kobiso/CBAM-tensorflow/blob/master/attention_module.py#L39
"""
expansion = 1
def __init__(self, conv_dim, out_channels=32, stride=1, **kwargs):
super(BasicBlock, self).__init__(**kwargs)
# 1. BasicBlock模块中的共有2个卷积;BasicBlock模块中的第1个卷积层;
self.conv1 = regularized_padded_conv(conv_dim, out_channels, kernel_size=3, strides=stride)
self.bn1 = layers.BatchNormalization()
# 2. 第2个;第1个卷积如果做stride就会有一个下采样,在这个里面就不做下采样了。这一块始终保持size一致,把stride固定为1
self.conv2 = regularized_padded_conv(conv_dim, out_channels, kernel_size=3, strides=1)
self.bn2 = layers.BatchNormalization()
# ############################## 注意力机制 ###############################
self.ca = ChannelAttention(conv_dim=conv_dim, in_planes=out_channels)
self.sa = SpatialAttention(conv_dim=conv_dim)
# # 3. 判断stride是否等于1,如果为1就是没有降采样。
# if stride != 1 or in_channels != self.expansion * out_channels:
# self.shortcut = Sequential([regularized_padded_conv(self.expansion * out_channels,
# kernel_size=1, strides=stride),
# layers.BatchNormalization()])
# else:
# self.shortcut = lambda x, _: x
def call(self, inputs, training=False):
out = self.conv1(inputs)
out = self.bn1(out, training=training)
out = tf.nn.relu(out)
out = self.conv2(out)
out = self.bn2(out, training=training)
# ############################## 注意力机制 ###############################
out = self.ca(out) * out
out = self.sa(out) * out
# out = out + self.shortcut(inputs, training)
# out = tf.nn.relu(out)
return out
class scaled_dot_product_attention(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __call__(self, q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1, name='scaled_dot_prod_attn_weights') # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v, name='scaled_dot_prod_attn_outs') # (..., seq_len_q, depth_v)
return output, attention_weights
MHW_COUNTER = 0
ENC_COUNTER = 0
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
global MHW_COUNTER
MHW_COUNTER += 1
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model, name=f"wq_{MHW_COUNTER}")
self.wk = tf.keras.layers.Dense(d_model, name=f"wk_{MHW_COUNTER}")
self.wv = tf.keras.layers.Dense(d_model, name=f"wv_{MHW_COUNTER}")
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def __call__(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
)(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='swish', name='swished_dense'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model, name='ffn_output') # (batch_size, seq_len, d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1, **kwargs):
super(EncoderLayer, self).__init__(**kwargs)
global MHW_COUNTER
MHW_COUNTER += 1
self.mha = MultiHeadAttention(d_model, num_heads)
# self.ffn = point_wise_feed_forward_network(d_model, dff)
self.swished_dense = layers.Dense(dff, activation='swish', name=f'swished_dense_{MHW_COUNTER}')
self.ffn_output = layers.Dense(d_model, name=f'ffn_output_{MHW_COUNTER}')
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def __call__(self, x, training=True, mask=None):
attn_output, attn_weights = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
# ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
temp = self.swished_dense(out1)
ffn_output = self.ffn_output(temp)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2, attn_weights
class TransformerBlocks(tf.keras.layers.Layer):
"""
This layer stacks Transformers on top of each other.
Example
-------
>>> import numpy as np
>>> from tensorflow.keras.models import Model
>>> from tensorflow.keras.layers import Input, Dense
>>> from ai4water.models._tensorflow import TransformerBlocks
>>> inp = Input(shape=(10, 32))
>>> out, _ = TransformerBlocks(4, 4, 32)(inp)
>>> out = Dense(1)(out)
>>> model = Model(inputs=inp, outputs=out)
>>> model.compile(optimizer="Adam", loss="mse")
>>> x = np.random.random((100, 10, 32))
>>> y = np.random.random(100)
>>> h = model.fit(x,y)
"""
def __init__(
self,
num_blocks:int,
num_heads:int,
embed_dim:int,
name:str = "TransformerBlocks",
**kwargs
):
"""
Parameters
-----------
num_blocks : int
num_heads : int
embed_dim : int
**kwargs :
additional keyword arguments for :class:`ai4water.models.tensorflow.Transformer`
"""
super(TransformerBlocks, self).__init__(name=name)
self.num_blocks = num_blocks
self.num_heads = num_heads
self.embed_dim = embed_dim
self.blocks = []
for n in range(num_blocks):
self.blocks.append(Transformer(num_heads, embed_dim, **kwargs))
def get_config(self)->dict:
config = {
"num_blocks": self.num_blocks,
"num_heads": self.num_heads,
"embed_dim": self.embed_dim
}
return config
def __call__(self, inputs, *args, **kwargs):
attn_weights_list = []
for transformer in self.blocks:
inputs, attn_weights = transformer(inputs)
attn_weights_list.append(tf.reduce_sum(attn_weights[:, :, 0, :]))
importances = tf.reduce_sum(tf.stack(attn_weights_list), axis=0) / (
self.num_blocks * self.num_heads)
return inputs, importances
class Transformer(tf.keras.layers.Layer):
"""
A basic transformer block consisting of
LayerNormalization -> Add -> MultiheadAttention -> MLP ->
Example
-------
>>> import numpy as np
>>> from tensorflow.keras.models import Model
>>> from tensorflow.keras.layers import Input, Dense
>>> from ai4water.models._tensorflow import Transformer
>>> inp = Input(shape=(10, 32))
>>> out, _ = Transformer(4, 32)(inp)
>>> out = Dense(1)(out)
>>> model = Model(inputs=inp, outputs=out)
>>> model.compile(optimizer="Adam", loss="mse")
>>> x = np.random.random((100, 10, 32))
>>> y = np.random.random(100)
>>> h = model.fit(x,y)
"""
def __init__(
self,
num_heads:int = 4,
embed_dim:int=32,
dropout=0.1,
post_norm:bool = True,
prenorm_mlp:bool = False,
num_dense_lyrs:int = 1,
seed:int = 313,
*args,
**kwargs
):
"""
Parameters
-----------
num_heads : int
number of attention heads
embed_dim : int
embedding dimension. This value is also used for units/neurons in MLP blocl
dropout : float
dropout rate in MLP blocl
post_norm : bool (default=True)
whether to apply LayerNormalization on the outputs or not.
prenorm_mlp : bool
whether to apply LayerNormalization on inputs of MLP or not
num_dense_lyrs : int
number of Dense layers in MLP block.
"""
super(Transformer, self).__init__(*args, **kwargs)
self.num_heads = num_heads
self.embed_dim = embed_dim
self.dropout = dropout
self.post_norm = post_norm
self.prenorm_mlp = prenorm_mlp
self.seed = seed
assert num_dense_lyrs <= 2
self.num_dense_lyrs = num_dense_lyrs
self.att = tf.keras.layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim,
dropout=dropout
)
self.skip1 = tf.keras.layers.Add()
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.ffn = self._make_mlp()
self.skip2 = tf.keras.layers.Add()
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
def _make_mlp(self):
lyrs = []
if self.prenorm_mlp:
lyrs += [tf.keras.layers.LayerNormalization(epsilon=1e-6)]
lyrs += [
Dense(self.embed_dim, activation=tf.keras.activations.gelu),
tf.keras.layers.Dropout(self.dropout, seed=self.seed),
]
if self.num_dense_lyrs>1:
lyrs += [tf.keras.layers.Dense(self.embed_dim)]
return tf.keras.Sequential(lyrs)
def get_config(self)->dict:
config = {
"num_heads": self.num_heads,
"embed_dim": self.embed_dim,
"dropout": self.dropout,
"post_norm": self.post_norm,
"pre_norm_mlp": self.prenorm_mlp,
"seed": self.seed,
"num_dense_lyrs": self.num_dense_lyrs
}
return config
def __call__(self, inputs, *args, **kwargs):
inputs = self.layernorm1(inputs)
attention_output, att_weights = self.att(
inputs, inputs, return_attention_scores=True
)
attention_output = self.skip1([inputs, attention_output])
feedforward_output = self.ffn(attention_output)
outputs = self.skip2([feedforward_output, attention_output])
if self.post_norm:
return self.layernorm2(outputs), att_weights
return outputs, att_weights
class NumericalEmbeddings(layers.Layer):
def __init__(
self,
num_features,
emb_dim,
*args,
**kwargs
):
self.num_features = num_features
self.emb_dim = emb_dim
super(NumericalEmbeddings, self).__init__(*args, **kwargs)
def build(self, input_shape):
w_init = tf.random_normal_initializer()
# features, n_bins, emb_dim
self.linear_w = tf.Variable(
initial_value=w_init(
shape=(self.num_features, 1, self.emb_dim), dtype='float32'
), trainable=True, name="NumEmbeddingWeights")
# features, n_bins, emb_dim
self.linear_b = tf.Variable(
w_init(
shape=(self.num_features, 1), dtype='float32'
), trainable=True, name="NumEmbeddingBias")
return
def get_config(self)->dict:
config = {
"num_features": self.num_features,
"emb_dim": self.emb_dim
}
return config
def call(self, X, *args, **kwargs):
embs = tf.einsum('f n e, b f -> bfe', self.linear_w, X)
embs = tf.nn.relu(embs + self.linear_b)
return embs
class CatEmbeddings(layers.Layer):
"""
The layer to encode categorical features.
Parameters
-----------
vocabulary : dict
embed_dim : int
dimention of embedding for each categorical feature
lookup_kws : dict
keyword arguments that will go to StringLookup layer
"""
def __init__(
self,
vocabulary:dict,
embed_dim:int = 32,
lookup_kws:dict = None,
*args,
**kwargs
):
super(CatEmbeddings, self).__init__(*args, **kwargs)
self.vocabulary = vocabulary
self.embed_dim = embed_dim
self.lookup_kws = lookup_kws
self.lookups = {}
self.embedding_lyrs = {}
self.feature_names = []
_lookup_kws = dict(mask_token=None,
num_oov_indices=0,
output_mode="int")
if lookup_kws is not None:
_lookup_kws.update(lookup_kws)
for feature_name, vocab in vocabulary.items():
lookup = layers.StringLookup(
vocabulary=vocab,
**_lookup_kws
)
self.lookups[feature_name] = lookup
embedding = layers.Embedding(
input_dim=len(vocab), output_dim=embed_dim
)
self.embedding_lyrs[feature_name] = embedding
self.feature_names.append(feature_name)
def get_config(self)->dict:
config = {
"lookup_kws": self.lookup_kws,
"embed_dim": self.embed_dim,
"vocabulary": self.vocabulary
}
return config
def call(self, inputs, *args, **kwargs):
"""
The tensors in `inputs` list must be in same
order as in the `vocabulary` dictionary.
Parameters
-------------
inputs : list
a list of tensors of shape (None,)
Returns
-------
a tensor of shape (None, num_cat_features, embed_dim)
"""
encoded_features = []
for idx, feat_name in enumerate(self.feature_names):
feat_input = inputs[:, idx]
lookup = self.lookups[feat_name]
encoded_feature = lookup(feat_input)
embedding = self.embedding_lyrs[feat_name]
encoded_categorical_feature = embedding(encoded_feature)
encoded_features.append(encoded_categorical_feature)
cat_embeddings = tf.stack(encoded_features, axis=1)
return cat_embeddings
class TabTransformer(layers.Layer):
"""
tensorflow/keras layer which implements logic of TabTransformer model.
The TabTransformer layer converts categorical features into contextual embeddings
by passing them into Transformer block. The output of Transformer block is
concatenated with numerical features and passed through an MLP to
get the final model output.
It is available only in tensorflow >= 2.6
"""
def __init__(
self,
num_numeric_features: int,
cat_vocabulary: dict,
hidden_units=32,
lookup_kws:dict=None,
num_heads: int = 4,
depth: int = 4,
dropout: float = 0.1,
num_dense_lyrs: int = 2,
prenorm_mlp: bool = True,
post_norm: bool = True,
final_mlp_units = 16,
final_mpl_activation:str = "selu",
seed: int = 313,
*args, **kwargs
):
"""
Parameters
----------
num_numeric_features : int
number of numeric features to be used as input.
cat_vocabulary : dict
a dictionary whose keys are names of categorical features and values
are lists which consist of unique values of categorical features.
You can use the function :py:meth:`ai4water.models.utils.gen_cat_vocab`
to create this for your own data. The length of dictionary should be
equal to number of categorical features. If it is None, then this
layer expects only numeri features
hidden_units : int, optional (default=32)
number of hidden units
num_heads : int, optional (default=4)
number of attention heads
depth : int (default=4)
number of transformer blocks to be stacked on top of each other
dropout : int, optional (default=0.1)
droput rate in transformer
post_norm : bool (default=True)
prenorm_mlp : bool (default=True)
num_dense_lyrs : int (default=2)
number of dense layers in MLP block inside the Transformer
final_mlp_units : int (default=16)
number of units/neurons in final MLP layer i.e. the MLP layer
after Transformer block
"""
super(TabTransformer, self).__init__(*args, **kwargs)
self.cat_vocabulary = cat_vocabulary
self.num_numeric_inputs = num_numeric_features
self.hidden_units = hidden_units
self.lookup_kws = lookup_kws
self.num_heads = num_heads
self.depth = depth
self.dropout = dropout
self.final_mlp_units = final_mlp_units
self.final_mpl_activation = final_mpl_activation
self.seed = seed
self.cat_embs = CatEmbeddings(
vocabulary=cat_vocabulary,
embed_dim=hidden_units,
lookup_kws=lookup_kws
)
# layer normalization of numerical features
self.lyr_norm = layers.LayerNormalization(epsilon=1e-6)
self.transformers = TransformerBlocks(
embed_dim=hidden_units,
num_heads=num_heads,
num_blocks=depth,
num_dense_lyrs=num_dense_lyrs,
post_norm=post_norm,
prenorm_mlp=prenorm_mlp,
dropout=dropout,
seed=seed
)
self.flatten = layers.Flatten()
self.concat = layers.Concatenate()
self.mlp = self.create_mlp(
activation=self.final_mpl_activation,
normalization_layer=layers.BatchNormalization(),
name="MLP",
)
# Implement an MLP block
def create_mlp(
self,
activation,
normalization_layer,
name=None
):
if isinstance(self.final_mlp_units, int):
hidden_units = [self.final_mlp_units]
else:
assert isinstance(self.final_mlp_units, list)
hidden_units = self.final_mlp_units
mlp_layers = []
for units in hidden_units:
mlp_layers.append(normalization_layer),
mlp_layers.append(layers.Dense(units, activation=activation))
mlp_layers.append(layers.Dropout(self.dropout, seed=self.seed))
return tf.keras.Sequential(mlp_layers, name=name)
def __call__(self, inputs:list, *args, **kwargs):
"""
inputs :
list of 2. The first tensor is numerical inputs and second
tensor is categorical inputs
"""
num_inputs = inputs[0]
cat_inputs = inputs[1]
cat_embs = self.cat_embs(cat_inputs)
transformer_outputs, imp = self.transformers(cat_embs)
flat_transformer_outputs = self.flatten(transformer_outputs)
num_embs = self.lyr_norm(num_inputs)
x = self.concat([num_embs, flat_transformer_outputs])
return self.mlp(x), imp
class FTTransformer(layers.Layer):
"""
tensorflow/keras layer which implements logic of FTTransformer model.
In FTTransformer, both categorical and numerical features are passed
through transformer block and then passed through MLP layer to get
the final model prediction.
"""
def __init__(
self,
num_numeric_features: int,
cat_vocabulary: Union[dict, None] = None,
hidden_units=32,
num_heads: int = 4,
depth: int = 4,
dropout: float = 0.1,
lookup_kws:dict = None,
num_dense_lyrs: int = 2,
post_norm: bool = True,
final_mlp_units: int = 16,
with_cls_token:bool = False,
seed: int = 313,
*args,
**kwargs
):
"""
Parameters
----------
num_numeric_features : int
number of numeric features to be used as input.
cat_vocabulary : dict/None
a dictionary whose keys are names of categorical features and values
are lists which consist of unique values of categorical features.
You can use the function :py:meth:`ai4water.models.utils.gen_cat_vocab`
to create this for your own data. The length of dictionary should be
equal to number of categorical features. If it is None, then this
layer expects only numeri features
hidden_units : int, optional (default=32)
number of hidden units
num_heads : int, optional (default=4)
number of attention heads
depth : int (default=4)
number of transformer blocks to be stacked on top of each other
dropout : float, optional (default=0.1)
droput rate in transformer
lookup_kws : dict
keyword arguments for lookup layer
post_norm : bool (default=True)
num_dense_lyrs : int (default=2)
number of dense layers in MLP block inside the Transformer
final_mlp_units : int (default=16)
number of units/neurons in final MLP layer i.e. the MLP layer
after Transformer block
with_cls_token : bool (default=False)
whether to use cls token or not
seed : int
seed for reproducibility
"""
super(FTTransformer, self).__init__(*args, **kwargs)
self.cat_vocabulary = cat_vocabulary
self.num_numeric_inputs = num_numeric_features
self.hidden_units = hidden_units
self.num_heads = num_heads
self.depth = depth
self.dropout = dropout
self.final_mlp_units = final_mlp_units
self.with_cls_token = with_cls_token
self.seed = seed
if cat_vocabulary is not None:
self.cat_embs = CatEmbeddings(
vocabulary=cat_vocabulary,
embed_dim=hidden_units,
lookup_kws=lookup_kws
)
self.num_embs = NumericalEmbeddings(
num_features=num_numeric_features,
emb_dim=hidden_units
)
if cat_vocabulary is not None:
self.concat = layers.Concatenate(axis=1)
self.transformers = TransformerBlocks(
embed_dim=hidden_units,
num_heads=num_heads,
num_blocks=depth,
num_dense_lyrs=num_dense_lyrs,
post_norm=post_norm,
dropout=dropout,
seed=seed
)
self.lmbda = tf.keras.layers.Lambda(lambda x: x[:, 0, :])
self.lyr_norm = layers.LayerNormalization(epsilon=1e-6)
self.mlp = layers.Dense(final_mlp_units)
def build(self, input_shape):
if self.with_cls_token:
# CLS token
w_init = tf.random_normal_initializer()
self.cls_weights = tf.Variable(
initial_value=w_init(shape=(1, self.hidden_units), dtype="float32"),
trainable=True,
)
return
def __call__(self, inputs:list, *args, **kwargs):
"""
inputs :
If categorical variables are considered then inputs is a list of 2.
The first tensor is numerical inputs and second tensor is categorical inputs.
If categorical variables are not considered then inputs is just a single
tensor!
"""
if self.cat_vocabulary is None:
if isinstance(inputs, list):
assert len(inputs) == 1
num_inputs = inputs[0]
else:
num_inputs = inputs
else:
assert len(inputs) == 2
num_inputs = inputs[0]
cat_inputs = inputs[1]
# cls_tokens = tf.repeat(self.cls_weights, repeats=tf.shape(inputs[self.numerical[0]])[0], axis=0)
# cls_tokens = tf.expand_dims(cls_tokens, axis=1)
num_embs = self.num_embs(num_inputs)
if self.cat_vocabulary is None:
embs = num_embs
else:
cat_embs = self.cat_embs(cat_inputs)
embs = self.concat([num_embs, cat_embs])
x, imp = self.transformers(embs)
x = self.lmbda(x)
x = self.lyr_norm(x)
return self.mlp(x), imp
class Conditionalize(tf.keras.layers.Layer):
"""Mimics the behaviour of cond_rnn of Philipperemy but puts the logic
of condition in a separate layer so that it becomes easier to use it.
Example
--------
>>> from ai4water.models._tensorflow import Conditionalize
>>> from tensorflow.keras.layers import Input, LSTM
>>> i = Input(shape=(10, 3))
>>> raw_conditions = Input(shape=(14,))
>>> processed_conds = Conditionalize(32)([raw_conditions, raw_conditions, raw_conditions])
>>> rnn = LSTM(32)(i, initial_state=[processed_conds, processed_conds])
This layer can also be used in ai4water model when defining the model
using declarative model definition style
>>> from ai4water import Model
>>> import numpy as np
>>> model = Model(model={"layers": {
... "Input": {"shape": (10, 3)},
... "Input_cat": {"shape": (10,)},
... "Conditionalize": {"config": {"units": 32, "name": "h_state"},
... "inputs": "Input_cat"},
... "LSTM": {"config": {"units": 32},
... "inputs": "Input",
... 'call_args': {'initial_state': ['h_state', 'h_state']}},
... "Dense": {"units": 1}}},
... ts_args={"lookback": 10}, verbosity=0, epochs=1)
... # define the input and call the .fit method
>>> x1 = np.random.random((100, 10, 3))
>>> x2 = np.random.random((100, 10))
>>> y = np.random.random(100)
>>> h = model.fit(x=[x1, x2], y=y)
"""
def __init__(self, units,
max_num_cond=10,
use_bias:bool = True,
**kwargs):
self.units = units
super().__init__(**kwargs)
# single cond
self.cond_to_init_state_dense_1 = tf.keras.layers.Dense(units=self.units,
use_bias=use_bias,
name="conditional_dense")
# multi cond
self.multi_cond_to_init_state_dense = []
for i in range(max_num_cond):
self.multi_cond_to_init_state_dense.append(tf.keras.layers.Dense(
units=self.units,
use_bias=use_bias,
name=f"conditional_dense{i}"))
self.multi_cond_p = tf.keras.layers.Dense(1, activation=None, use_bias=True, name="conditional_dense_out")
@staticmethod
def _standardize_condition(initial_cond):
assert len(initial_cond.shape) == 2, initial_cond.shape
return initial_cond
def __call__(self, inputs, *args, **kwargs):
if args or kwargs:
raise ValueError(f"Unrecognized input arguments\n args: {args} \nkwargs: {kwargs}")
if inputs.__class__.__name__ in ("Tensor", "KerasTensor"):
inputs = [inputs]
assert isinstance(inputs, (list, tuple)) and len(inputs) >= 1, f"{type(inputs)}"
cond = inputs
if len(cond) > 1: # multiple conditions.
init_state_list = []
for idx, c in enumerate(cond):
init_state_list.append(self.multi_cond_to_init_state_dense[idx](self._standardize_condition(c)))
multi_cond_state = tf.stack(init_state_list, axis=-1) # -> (?, units, num_conds)
multi_cond_state = self.multi_cond_p(multi_cond_state) # -> (?, units, 1)
cond_state = tf.squeeze(multi_cond_state, axis=-1) # -> (?, units)
else:
cond = self._standardize_condition(cond[0])
cond_state = self.cond_to_init_state_dense_1(cond) # -> (?, units)
return cond_state
class _NormalizedGate(Layer):
_Normalizers = {
'relu': tf.nn.relu,
'sigmoid': tf.nn.sigmoid
}
def __init__(self, in_features, out_shape, normalizer="relu"):
super(_NormalizedGate, self).__init__()
self.in_features = in_features
self.out_shape = out_shape
self.normalizer = self._Normalizers[normalizer]
self.fc = Dense(out_shape[0]*out_shape[1],
use_bias=True,
kernel_initializer="Orthogonal",
bias_initializer="zeros")
def call(self, inputs):
h = self.fc(inputs)
h = tf.reshape(h, (-1, *self.out_shape))
h = self.normalizer(h)
normalized, _ = tf.linalg.normalize(h, axis=-1)
return normalized
class _MCLSTMCell(Layer):
"""
Examples
--------
m_inp = tf.range(50, dtype=tf.float32)
m_inp = tf.reshape(m_inp, (5, 10, 1))
aux_inp = tf.range(150, dtype=tf.float32)
aux_inp = tf.reshape(aux_inp, (5, 10, 3))
cell = _MCLSTMCell(1, 3, 8)
m_out_, ct_ = cell(m_inp, aux_inp)
"""
def __init__(
self,
mass_input_size,
aux_input_size,
units,
time_major:bool = False,
):
super(_MCLSTMCell, self).__init__()
self.units = units
self.time_major = time_major
gate_inputs = aux_input_size + self.units + mass_input_size
self.output_gate = Dense(self.units,
activation="sigmoid",
kernel_initializer="Orthogonal",
bias_initializer="zeros",
name="sigmoid_gate")
self.input_gate = _NormalizedGate(gate_inputs,
(mass_input_size, self.units),
"sigmoid")
self.redistribution = _NormalizedGate(gate_inputs,
(self.units, self.units),
"relu")
def call(self, x_m, x_a, ct=None):
if not self.time_major:
# (batch_size, lookback, input_features) -> (lookback, batch_size, input_features)
x_m = tf.transpose(x_m, [1, 0, 2])
x_a = tf.transpose(x_a, [1, 0, 2])
lookback_steps, batch_size, _ = x_m.shape
if ct is None:
ct = tf.zeros((batch_size, self.units))
m_out, c = [], []
for time_step in range(lookback_steps):
mt_out, ct = self._step(x_m[time_step], x_a[time_step], ct)
m_out.append(mt_out)
c.append(ct)
m_out, c = tf.stack(m_out), tf.stack(c) # (lookback, batch_size, units)
return m_out, c
def _step(self, xt_m, xt_a, c):
features = tf.concat([xt_m, xt_a, c / (tf.norm(c) + 1e-5)], axis=-1) # (examples, ?)
# compute gate activations
i = self.input_gate(features) # (examples, 1, units)
r = self.redistribution(features) # (examples, units, units)
o = self.output_gate(features) # (examples, units)
m_in = tf.squeeze(tf.matmul(tf.expand_dims(xt_m, axis=-2), i), axis=-2)
m_sys = tf.squeeze(tf.matmul(tf.expand_dims(c, axis=-2), r), axis=-2)
m_new = m_in + m_sys
return tf.multiply(o, m_new), tf.multiply(tf.subtract(1.0, o), m_new)
class MCLSTM(Layer):
"""Mass-Conserving LSTM model from Hoedt et al. [1]_.
This implementation follows of NeuralHydrology's implementation of MCLSTM
with some changes:
1) reduced sum is not performed for over the units
2) time_major argument is added
3) no implementation of Embedding
Examples
--------
>>> from ai4water.models._tensorflow import MCLSTM
>>> import tensorflow as tf
>>> inputs = tf.range(150, dtype=tf.float32)
>>> inputs = tf.reshape(inputs, (10, 5, 3))
>>> mc = MCLSTM(1, 2, 8, 1)
>>> h = mc(inputs) # (batch, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, return_sequences=True)
>>> h = mc(inputs) # (batch, lookback, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, return_state=True)
>>> _h, _o, _c = mc(inputs) # (batch, lookback, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, return_state=True, return_sequences=True)
>>> _h, _o, _c = mc(inputs) # (batch, lookback, units)
...
... # with time_major as True
>>> inputs = tf.range(150, dtype=tf.float32)
>>> inputs = tf.reshape(inputs, (5, 10, 3))
>>> mc = MCLSTM(1, 2, 8, 1, time_major=True)
>>> _h = mc(inputs) # (batch, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, time_major=True, return_sequences=True)
>>> _h = mc(inputs) # (lookback, batch, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, time_major=True, return_state=True)
>>> _h, _o, _c = mc(inputs) # (batch, units), ..., (lookback, batch, units)
...
... # end to end keras Model
>>> from tensorflow.keras.layers import Dense, Input
>>> from tensorflow.keras.models import Model
>>> import numpy as np
...
>>> inp = Input(batch_shape=(32, 10, 3))
>>> lstm = MCLSTM(1, 2, 8)(inp)
>>> out = Dense(1)(lstm)
...
>>> model = Model(inputs=inp, outputs=out)
>>> model.compile(loss='mse')
...
>>> x = np.random.random((320, 10, 3))
>>> y = np.random.random((320, 1))
>>> y = model.fit(x=x, y=y)
References
----------
.. [1] https://arxiv.org/abs/2101.05186
"""
def __init__(
self,
num_mass_inputs,
dynamic_inputs,
units,
num_targets=1,
time_major:bool = False,
return_sequences:bool = False,
return_state:bool = False,
name="MCLSTM",
**kwargs
):
"""
Parameters
----------
num_targets : int
number of inputs for which mass balance is to be reserved.
dynamic_inputs :
number of inpts other than mass_targets
units :
hidden size, determines the size of weight matrix
time_major : bool, optional (default=True)
if True, the data is expected to be of shape (lookback, batch_size, input_features)
otherwise, data is expected of shape (batch_size, lookback, input_features)
"""
super(MCLSTM, self).__init__(name=name, **kwargs)
assert num_mass_inputs ==1
assert units>1
assert num_targets==1
self.n_mass_inputs = num_mass_inputs
self.units = units
self.n_aux_inputs = dynamic_inputs
self.time_major = time_major
self.return_sequences = return_sequences
self.return_state = return_state
self.mclstm = _MCLSTMCell(
self.n_mass_inputs,
self.n_aux_inputs,
self.units,
self.time_major,
)
def call(self, inputs):
x_m = inputs[:, :, :self.n_mass_inputs] # (batch, lookback, 1)
x_a = inputs[:, :, self.n_mass_inputs:] # (batch, lookback, dynamic_inputs)
output, c = self.mclstm(x_m, x_a) # (lookback, batch, units)
# unlike NeuralHydrology, we don't preform reduced sum over units
# to keep with the convention in keras/lstm
#output = tf.math.reduce_sum(output[:, :, 1:], axis=-1, keepdims=True)
if self.time_major:
h, m_out, c = output, output, c
if not self.return_sequences:
h = h[-1]
else:
h = tf.transpose(output, [1, 0, 2]) # -> (batch_size, lookback, 1)
#m_out = tf.transpose(output, [1, 0, 2]) # -> (batch_size, lookback, 1)
c = tf.transpose(c, [1, 0, 2]) # -> (batch_size, lookback, units)
if not self.return_sequences:
h = h[:, -1]
if self.return_state:
return h, h, c
return h
class EALSTM(Layer):
"""Entity Aware LSTM as proposed by Kratzert et al., 2019 [1]_
The difference here is that a Dense layer is not applied on cell state as done in
original implementation in NeuralHydrology [2]_. This is left to user's discretion.
Examples
--------
>>> from ai4water.models._tensorflow import EALSTM
>>> import tensorflow as tf
>>> batch_size, lookback, num_dyn_inputs, num_static_inputs, units = 10, 5, 3, 2, 8
>>> inputs = tf.range(batch_size*lookback*num_dyn_inputs, dtype=tf.float32)
>>> inputs = tf.reshape(inputs, (batch_size, lookback, num_dyn_inputs))
>>> stat_inputs = tf.range(batch_size*num_static_inputs, dtype=tf.float32)
>>> stat_inputs = tf.reshape(stat_inputs, (batch_size, num_static_inputs))
>>> lstm = EALSTM(units, num_static_inputs)
>>> h_n = lstm(inputs, stat_inputs) # -> (batch_size, units)
...
... # with return sequences
>>> lstm = EALSTM(units, num_static_inputs, return_sequences=True)
>>> h_n = lstm(inputs, stat_inputs) # -> (batch, lookback, units)
...
... # with return sequences and return_state
>>> lstm = EALSTM(units, num_static_inputs, return_sequences=True, return_state=True)
>>> h_n, [c_n, y_hat] = lstm(inputs, stat_inputs) # -> (batch, lookback, units), [(), ()]
...
... # end to end Keras model
>>> from tensorflow.keras.models import Model
>>> from tensorflow.keras.layers import Input, Dense
>>> import numpy as np
>>> inp_dyn = Input(batch_shape=(batch_size, lookback, num_dyn_inputs))
>>> inp_static = Input(batch_shape=(batch_size, num_static_inputs))
>>> lstm = EALSTM(units, num_static_inputs)(inp_dyn, inp_static)
>>> out = Dense(1)(lstm)
>>> model = Model(inputs=[inp_dyn, inp_static], outputs=out)
>>> model.compile(loss='mse')
>>> print(model.summary())
... # generate hypothetical data and train it
>>> dyn_x = np.random.random((100, lookback, num_dyn_inputs))
>>> static_x = np.random.random((100, num_static_inputs))
>>> y = np.random.random((100, 1))
>>> h = model.fit(x=[dyn_x, static_x], y=y, batch_size=batch_size)
References
----------
.. [1] https://doi.org/10.5194/hess-23-5089-2019
.. [2] https://github.com/neuralhydrology/neuralhydrology
"""
def __init__(
self,
units:int,
num_static_inputs:int,
use_bias:bool=True,
activation = "tanh",
recurrent_activation="sigmoid",
static_activation="sigmoid",
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
static_initializer = "glorot_uniform",
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
static_constraint=None,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
static_regularizer=None,
return_state=False,
return_sequences=False,
time_major=False,
**kwargs
):
"""
Parameters
----------
units : int
number of units
num_static_inputs : int
number of static features
static_activation :
activation function for static input gate
static_regularizer :
static_constraint :
static_initializer :
"""
super(EALSTM, self).__init__(**kwargs)
self.units = units
self.num_static_inputs = num_static_inputs
self.activation = activations.get(activation)
self.rec_activation = activations.get(recurrent_activation)
self.static_activation = static_activation
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.static_initializer = initializers.get(static_initializer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.static_constraint = static_constraint
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.static_regularizer = static_regularizer
self.return_state = return_state
self.return_sequences = return_sequences
self.time_major=time_major
self.input_gate = Dense(units,
use_bias=self.use_bias,
kernel_initializer=self.static_initializer,
bias_initializer=self.bias_initializer,
activation=self.static_activation,
kernel_constraint=self.static_constraint,
bias_constraint=self.bias_constraint,
kernel_regularizer=self.static_regularizer,
bias_regularizer=self.bias_regularizer,
name="input_gate")
def call(self, inputs, static_inputs, initial_state=None, **kwargs):
"""
static_inputs :
of shape (batch, num_static_inputs)
"""
if not self.time_major:
inputs = tf.transpose(inputs, [1, 0, 2])
lookback, batch_size, _ = inputs.shape
if initial_state is None:
initial_state = tf.zeros((batch_size, self.units)) # todo
state = [initial_state, initial_state]
else:
state = initial_state
# calculate input gate only once because inputs are static
inp_g = self.input_gate(static_inputs) # (batch, num_static_inputs) -> (batch, units)
outputs, states = [], []
for time_step in range(lookback):
_out, state = self.cell(inputs[time_step], inp_g, state)
outputs.append(_out)
states.append(state)
outputs = tf.stack(outputs)
h_s = tf.stack([states[i][0] for i in range(lookback)])
c_s = tf.stack([states[i][1] for i in range(lookback)])
if not self.time_major:
outputs = tf.transpose(outputs, [1, 0, 2])
h_s = tf.transpose(h_s, [1, 0, 2])
c_s = tf.transpose(c_s, [1, 0, 2])
states = [h_s, c_s]
last_output = outputs[:, -1]
else:
states = [h_s, c_s]
last_output = outputs[-1]
h = last_output
if self.return_sequences:
h = outputs
if self.return_state:
return h, states
return h
def cell(self, inputs, i, states):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
k_f, k_c, k_o = array_ops.split(self.kernel, num_or_size_splits=3, axis=1)
x_f = K.dot(inputs, k_f)
x_c = K.dot(inputs, k_c)
x_o = K.dot(inputs, k_o)
if self.use_bias:
b_f, b_c, b_o = array_ops.split(
self.bias, num_or_size_splits=3, axis=0)
x_f = K.bias_add(x_f, b_f)
x_c = K.bias_add(x_c, b_c)
x_o = K.bias_add(x_o, b_o)
# forget gate
f = self.rec_activation(x_f + K.dot(h_tm1, self.rec_kernel[:, :self.units]))
# cell state
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1, self.rec_kernel[:, self.units:self.units * 2]))
# output gate
o = self.rec_activation(x_o + K.dot(h_tm1, self.rec_kernel[:, self.units * 2:]))
h = o * self.activation(c)
return h, [h, c]
def build(self, input_shape):
"""
kernel, recurrent_kernel and bias are initiated for 3 gates instead
of 4 gates as in original LSTM
"""
input_dim = input_shape[-1]
self.bias = self.add_weight(
shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
constraint=self.bias_constraint,
regularizer=self.bias_regularizer
)
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
constraint=self.kernel_constraint,
regularizer=self.kernel_regularizer
)
self.rec_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
constraint=self.recurrent_constraint,
regularizer=self.recurrent_regularizer
)
self.built = True
return
class PrivateLayers(object):
class layers:
BasicBlock = BasicBlock
CONDRNN = ConditionalRNN
Conditionalize = Conditionalize
MCLSTM = MCLSTM
EALSTM = EALSTM
CatEmbeddings = CatEmbeddings
TransformerBlocks = TransformerBlocks
NumericalEmbeddings = NumericalEmbeddings
TabTransformer = TabTransformer
FTTransformer = FTTransformer | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/SConf.py | from __future__ import print_function
__revision__ = "src/engine/SCons/SConf.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import SCons.compat
import io
import os
import re
import sys
import traceback
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode)
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', str(target[0]).upper())
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents().decode())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def NeedConfigHBuilder():
if len(_ac_config_hs) == 0:
return False
else:
return True
def CreateConfigHBuilder(env):
"""Called if necessary just before the building targets phase begins."""
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in list(_ac_config_hs.keys()):
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents().decode())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
source[0].get_contents().decode().replace( '\n', "\n |" ) )
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
__slots__ = ('result', 'string')
def __init__(self):
self.result = None # -> 0/None -> no error, != 0 error
self.string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer(object):
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = io.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
try:
self.s.write(str)
except TypeError as e:
# "unicode argument expected" bug in IOStream (python 2.x)
self.s.write(str.decode())
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
# TODO pylint E0704: bare raise not inside except
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
sys.excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception as e:
for t in self.targets:
binfo = SConfBuildInfo()
binfo.merge(t.get_binfo())
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = SConfBuildInfo()
binfo.merge(t.get_binfo())
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase(object):
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitly. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitly cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictionary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
# Now create isolated override so setting source_decider doesn't affect parent Environment
if cache_mode == FORCE:
self.original_env = env
self.env = env.Clone()
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
repo_node=None,
env_decider=env.decide_source):
try:
env_decider(dependency, target, prev_ni, repo_node)
except Exception as e:
raise e
return True
if self.env.decide_source.__code__ is not force_build.__code__:
self.env.Decider(force_build)
else:
self.env = env
# print("Override env:%s"%env)
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise SCons.Errors.UserError
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
'CheckProg' : CheckProg,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the header, to explain the meaning of the value
(appropriate C comments will be added automatically).
"""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines)
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = 0
if not hasattr(n, 'attributes'):
n.attributes = SCons.Node.Node.Attrs()
n.attributes.keep_targetinfo = 1
if True:
# Some checkers have intermediate files (for example anything that compiles a c file into a program to run
# Those files need to be set to not release their target info, otherwise taskmaster will throw a
# Nonetype not callable
for c in n.children(scan=False):
# Keep debug code here.
# print("Checking [%s] for builders and then setting keep_targetinfo"%c)
if c.has_builder():
n.store_info = 0
if not hasattr(c, 'attributes'):
c.attributes = SCons.Node.Node.Attrs()
c.attributes.keep_targetinfo = 1
# pass
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_text_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.get_internal_path()
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = SCons.Util.to_str(output.get_contents())
return( 1, outputStr)
return (0, "")
class TestWrapper(object):
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise SCons.Errors.UserError
context = CheckContext(self.sconf)
ret = self.test(context, *args, **kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in list(tests.keys()):
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if self.logfile in _ac_config_logs:
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# Now reset the decider if we changed it due to --config=force
# We saved original Environment passed in and cloned it to isolate
# it from being changed.
if cache_mode == FORCE:
self.env.Decider(self.original_env.decide_source)
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if self.config_h is not None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext(object):
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...):
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. If res is not a string, displays
'yes' or 'no' depending on whether res is evaluated as true or false.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, str):
text = res
elif res:
text = "yes"
else:
text = "no"
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return self.sconf.TryBuild(*args, **kw)
def TryAction(self, *args, **kw):
return self.sconf.TryAction(*args, **kw)
def TryCompile(self, *args, **kw):
return self.sconf.TryCompile(*args, **kw)
def TryLink(self, *args, **kw):
return self.sconf.TryLink(*args, **kw)
def TryRun(self, *args, **kw):
return self.sconf.TryRun(*args, **kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError("CheckContext instance has no attribute '%s'" % attr)
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return SConfBase(*args, **kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return ''.join(l), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if not library:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
def CheckProg(context, prog_name):
"""Simple check if a program exists in the path. Returns the path
for the application, or None if not found.
"""
res = SCons.Conftest.CheckProg(context, prog_name)
context.did_show_result = 1
return res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/BanzaiDB-0.3.0.tar.gz/BanzaiDB-0.3.0/docs/_build/html/_static/sidebar.js | $(function() {
// global elements used by the functions.
// the 'sidebarbutton' element is defined as global after its
// creation, in the add_sidebar_button function
var bodywrapper = $('.bodywrapper');
var sidebar = $('.sphinxsidebar');
var sidebarwrapper = $('.sphinxsidebarwrapper');
// for some reason, the document has no sidebar; do not run into errors
if (!sidebar.length) return;
// original margin-left of the bodywrapper and width of the sidebar
// with the sidebar expanded
var bw_margin_expanded = bodywrapper.css('margin-left');
var ssb_width_expanded = sidebar.width();
// margin-left of the bodywrapper and width of the sidebar
// with the sidebar collapsed
var bw_margin_collapsed = '.8em';
var ssb_width_collapsed = '.8em';
// colors used by the current theme
var dark_color = $('.related').css('background-color');
var light_color = $('.document').css('background-color');
function sidebar_is_collapsed() {
return sidebarwrapper.is(':not(:visible)');
}
function toggle_sidebar() {
if (sidebar_is_collapsed())
expand_sidebar();
else
collapse_sidebar();
}
function collapse_sidebar() {
sidebarwrapper.hide();
sidebar.css('width', ssb_width_collapsed);
bodywrapper.css('margin-left', bw_margin_collapsed);
sidebarbutton.css({
'margin-left': '0',
'height': bodywrapper.height()
});
sidebarbutton.find('span').text('»');
sidebarbutton.attr('title', _('Expand sidebar'));
document.cookie = 'sidebar=collapsed';
}
function expand_sidebar() {
bodywrapper.css('margin-left', bw_margin_expanded);
sidebar.css('width', ssb_width_expanded);
sidebarwrapper.show();
sidebarbutton.css({
'margin-left': ssb_width_expanded-12,
'height': bodywrapper.height()
});
sidebarbutton.find('span').text('«');
sidebarbutton.attr('title', _('Collapse sidebar'));
document.cookie = 'sidebar=expanded';
}
function add_sidebar_button() {
sidebarwrapper.css({
'float': 'left',
'margin-right': '0',
'width': ssb_width_expanded - 28
});
// create the button
sidebar.append(
'<div id="sidebarbutton"><span>«</span></div>'
);
var sidebarbutton = $('#sidebarbutton');
light_color = sidebarbutton.css('background-color');
// find the height of the viewport to center the '<<' in the page
var viewport_height;
if (window.innerHeight)
viewport_height = window.innerHeight;
else
viewport_height = $(window).height();
sidebarbutton.find('span').css({
'display': 'block',
'margin-top': (viewport_height - sidebar.position().top - 20) / 2
});
sidebarbutton.click(toggle_sidebar);
sidebarbutton.attr('title', _('Collapse sidebar'));
sidebarbutton.css({
'color': '#FFFFFF',
'border-left': '1px solid ' + dark_color,
'font-size': '1.2em',
'cursor': 'pointer',
'height': bodywrapper.height(),
'padding-top': '1px',
'margin-left': ssb_width_expanded - 12
});
sidebarbutton.hover(
function () {
$(this).css('background-color', dark_color);
},
function () {
$(this).css('background-color', light_color);
}
);
}
function set_position_from_cookie() {
if (!document.cookie)
return;
var items = document.cookie.split(';');
for(var k=0; k<items.length; k++) {
var key_val = items[k].split('=');
var key = key_val[0].replace(/ /, ""); // strip leading spaces
if (key == 'sidebar') {
var value = key_val[1];
if ((value == 'collapsed') && (!sidebar_is_collapsed()))
collapse_sidebar();
else if ((value == 'expanded') && (sidebar_is_collapsed()))
expand_sidebar();
}
}
}
add_sidebar_button();
var sidebarbutton = $('#sidebarbutton');
set_position_from_cookie();
}); | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/task.py | import collections.abc
import contextlib
import copy
import itertools
import random
import string
import threading
from functools import total_ordering, wraps
from typing import TYPE_CHECKING, Iterable, List, Optional, Union
from loguru import logger
from sqlalchemy import Column, Integer, String, Unicode
from flexget import config_schema, db_schema
from flexget.db_schema import VersionedBaseMeta
from flexget.entry import Entry, EntryState, EntryUnicodeError
from flexget.event import event, fire_event
from flexget.manager import Session
from flexget.plugin import (
DependencyError,
PluginError,
PluginWarning,
get_plugins,
phase_methods,
plugin_schemas,
task_phases,
)
from flexget.plugin import plugins as all_plugins
from flexget.terminal import capture_console
from flexget.utils import requests
from flexget.utils.database import with_session
from flexget.utils.simple_persistence import SimpleTaskPersistence
from flexget.utils.sqlalchemy_utils import ContextSession
from flexget.utils.template import FlexGetTemplate, render_from_task
from flexget.utils.tools import MergeException, get_config_hash, merge_dict_from_to
logger = logger.bind(name='task')
if TYPE_CHECKING:
Base = VersionedBaseMeta
else:
Base = db_schema.versioned_base('feed', 0)
class TaskConfigHash(Base):
"""Stores the config hash for tasks so that we can tell if the config has changed since last run."""
__tablename__ = 'feed_config_hash'
id = Column(Integer, primary_key=True)
task = Column('name', Unicode, index=True, nullable=False)
hash = Column('hash', String)
def __repr__(self) -> str:
return f'<TaskConfigHash(task={self.task},hash={self.hash})>'
@with_session
def config_changed(task: Optional[str] = None, session: ContextSession = None) -> None:
"""
Forces config_modified flag to come out true on next run of `task`. Used when the db changes, and all
entries need to be reprocessed.
.. WARNING: DO NOT (FURTHER) USE FROM PLUGINS
:param task: Name of the task. If `None`, will be set for all tasks.
:param session: sqlalchemy Session instance
"""
logger.debug('Marking config for {} as changed.', (task or 'all tasks'))
task_hash = session.query(TaskConfigHash)
if task:
task_hash = task_hash.filter(TaskConfigHash.task == task)
task_hash.delete()
def use_task_logging(func):
@wraps(func)
def wrapper(self, *args, **kw):
# Set the appropriate logger context while running task
cms = [logger.contextualize(task=self.name, task_id=self.id, session_id=self.session_id)]
# Capture console output if configured to do so
if self.output:
cms.append(capture_console(self.output))
with contextlib.ExitStack() as stack:
for cm in cms:
stack.enter_context(cm)
return func(self, *args, **kw)
return wrapper
class EntryIterator:
"""An iterator over a subset of entries to emulate old task.accepted/rejected/failed/entries properties."""
def __init__(self, entries: List[Entry], states: Union[EntryState, Iterable[EntryState]]):
self.all_entries = entries
if isinstance(states, EntryState):
states = [states]
self.filter = lambda e: e._state in states
def __iter__(self) -> Iterable[Entry]:
return filter(self.filter, self.all_entries)
def __bool__(self):
return any(e for e in self)
def __len__(self):
return sum(1 for _e in self)
def __add__(self, other):
return itertools.chain(self, other)
def __radd__(self, other):
return itertools.chain(other, self)
def __getitem__(self, item) -> Union[Entry, Iterable[Entry]]:
if isinstance(item, slice):
return list(itertools.islice(self, item.start, item.stop))
if not isinstance(item, int):
raise ValueError('Index must be integer.')
for index, entry in enumerate(self):
if index == item:
return entry
else:
raise IndexError(f'{item} is out of bounds')
def reverse(self):
self.all_entries.sort(reverse=True)
def sort(self, *args, **kwargs):
self.all_entries.sort(*args, **kwargs)
class EntryContainer(list):
"""Container for a list of entries, also contains accepted, rejected failed iterators over them."""
def __init__(self, iterable: Optional[list] = None):
list.__init__(self, iterable or [])
self._entries = EntryIterator(self, [EntryState.UNDECIDED, EntryState.ACCEPTED])
self._accepted = EntryIterator(
self, EntryState.ACCEPTED
) # accepted entries, can still be rejected
self._rejected = EntryIterator(
self, EntryState.REJECTED
) # rejected entries, can not be accepted
self._failed = EntryIterator(self, EntryState.FAILED) # failed entries
self._undecided = EntryIterator(self, EntryState.UNDECIDED) # undecided entries (default)
# Make these read-only properties
entries: EntryIterator = property(lambda self: self._entries)
accepted: EntryIterator = property(lambda self: self._accepted)
rejected: EntryIterator = property(lambda self: self._rejected)
failed: EntryIterator = property(lambda self: self._failed)
undecided: EntryIterator = property(lambda self: self._undecided)
def __repr__(self) -> str:
return f'<EntryContainer({list.__repr__(self)})>'
class TaskAbort(Exception):
def __init__(self, reason: str, silent: bool = False) -> None:
self.reason = reason
self.silent = silent
def __repr__(self):
return f'TaskAbort(reason={self.reason}, silent={self.silent})'
@total_ordering
class Task:
"""
Represents one task in the configuration.
**Fires events:**
* task.execute.before_plugin
Before a plugin is about to be executed. Note that since this will also include all
builtin plugins the amount of calls can be quite high
``parameters: task, keyword``
* task.execute.after_plugin
After a plugin has been executed.
``parameters: task, keyword``
* task.execute.started
Before a task starts execution
* task.execute.completed
After task execution has been completed
``parameters: task``
"""
# Used to determine task order, when priority is the same
_counter = itertools.count()
RERUN_DEFAULT = 5
RERUN_MAX = 100
def __init__(
self,
manager,
name,
config=None,
options=None,
output=None,
session_id=None,
priority=None,
suppress_warnings=None,
):
"""
:param Manager manager: Manager instance.
:param string name: Name of the task.
:param dict config: Task configuration.
:param options: dict or argparse namespace with options for this task
:param output: A filelike that all console output will be sent to for this task.
:param session_id: Session id that will be attached to all log messages for filtering
:param priority: If multiple tasks are waiting to run, the task with the lowest priority will be run first.
The default is 0, if the cron option is set though, the default is lowered to 10.
:param suppress_warnings: Allows suppressing log warning about missing plugin in key phases
"""
self.name = str(name)
self.id = ''.join(random.choice(string.digits) for _ in range(6))
self.manager = manager
if config is None:
config = manager.config['tasks'].get(name, {})
self.config = copy.deepcopy(config)
self.prepared_config = None
if options is None:
options = copy.copy(self.manager.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.manager.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
# If execution hasn't specifically set the `allow_manual` flag, set it to False by default
if not hasattr(options, 'allow_manual'):
options.allow_manual = False
self.options = options
self.output = output
self.session_id = session_id
self.suppress_warnings = suppress_warnings or []
if priority is None:
self.priority = 10 if self.options.cron else 0
else:
self.priority = priority
self.priority = priority
self._count = next(self._counter)
self.finished_event = threading.Event()
# simple persistence
self.simple_persistence = SimpleTaskPersistence(self)
# rerun related flags and values
self._rerun_count = 0
self._max_reruns = Task.RERUN_DEFAULT
self._reruns_locked = False
self.config_modified = None
self.enabled = not self.name.startswith('_')
# These are just to query what happened in task. Call task.abort to set.
self.aborted = False
self.abort_reason = None
self.silent_abort = False
self.session = None
self.requests = requests.Session()
# List of all entries in the task
self._all_entries = EntryContainer()
self._rerun = False
self.disabled_phases = []
self.disabled_plugins = []
# current state
self.current_phase = None
self.current_plugin = None
self.traceback: Optional[str] = None
@property
def max_reruns(self):
"""How many times task can be rerunned before stopping"""
return self._max_reruns
@max_reruns.setter
def max_reruns(self, value):
"""Set new maximum value for reruns unless property has been locked"""
if not self._reruns_locked:
self._max_reruns = value
else:
logger.debug('max_reruns is locked, {} tried to modify it', self.current_plugin)
def lock_reruns(self):
"""Prevent modification of max_reruns property"""
logger.debug('Enabling rerun lock')
self._reruns_locked = True
def unlock_reruns(self):
"""Allow modification of max_reruns property"""
logger.debug('Releasing rerun lock')
self._reruns_locked = False
@property
def reruns_locked(self):
return self._reruns_locked
@property
def is_rerun(self):
return bool(self._rerun_count)
@property
def rerun_count(self):
return self._rerun_count
@property
def undecided(self):
"""
.. deprecated:: Use API v3
.. note:: We did not migrate to v3
If I remember correctly the idea was to make v3 signature
on_task_xxx(task, config, entries)
Param entries would be EntryContainer, which has convenience
iterator methods:
- entries.accepted
- entries.failed
- etc, which you see here
"""
return self.all_entries.undecided
@property
def failed(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.failed
@property
def rejected(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.rejected
@property
def accepted(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.accepted
@property
def entries(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.entries
@property
def all_entries(self):
"""
.. deprecated:: Use API v3
"""
return self._all_entries
def __lt__(self, other):
return (self.priority, self._count) < (other.priority, other._count)
def __eq__(self, other):
return (self.priority, self._count) == (other.priority, other._count)
def __str__(self):
return f'<Task(name={self.name},aborted={self.aborted})>'
def disable_phase(self, phase):
"""Disable ``phase`` from execution.
:param string phase: Name of ``phase``
:raises ValueError: *phase* could not be found.
"""
if phase not in task_phases:
raise ValueError('%s is not a valid phase' % phase)
if phase not in self.disabled_phases:
logger.debug('Disabling {} phase', phase)
self.disabled_phases.append(phase)
def disable_plugin(self, plugin):
"""Disable ``plugin`` from execution.
:param string plugin: Name of ``plugin``
:raises ValueError: *plugin* could not be found.
"""
if plugin not in all_plugins:
raise ValueError(f'`{plugin}` is not a valid plugin.')
self.disabled_plugins.append(plugin)
def abort(self, reason='Unknown', silent=False, traceback: Optional[str] = None):
"""Abort this task execution, no more plugins will be executed except the abort handling ones."""
self.aborted = True
self.abort_reason = reason
self.silent_abort = silent
self.traceback = traceback
if not self.silent_abort:
logger.warning('Aborting task (plugin: {})', self.current_plugin)
else:
logger.debug('Aborting task (plugin: {})', self.current_plugin)
raise TaskAbort(reason, silent=silent)
def find_entry(self, category='entries', **values):
"""
Find and return :class:`~flexget.entry.Entry` with given attributes from task or None
:param string category: entries, accepted, rejected or failed. Defaults to entries.
:param values: Key values of entries to be searched
:return: Entry or None
"""
cat = getattr(self, category)
if not isinstance(cat, EntryIterator):
raise TypeError('category must be a EntryIterator')
for entry in cat:
for k, v in values.items():
if not (k in entry and entry[k] == v):
break
else:
return entry
return None
def plugins(self, phase=None):
"""Get currently enabled plugins.
:param string phase:
Optional, limits to plugins currently configured on given phase, sorted in phase order.
:return:
An iterator over configured :class:`flexget.plugin.PluginInfo` instances enabled on this task.
"""
if phase:
plugins = sorted(
get_plugins(phase=phase), key=lambda p: p.phase_handlers[phase], reverse=True
)
else:
plugins = iter(all_plugins.values())
return (p for p in plugins if p.name in self.config or p.builtin)
def __run_task_phase(self, phase):
"""Executes task phase, ie. call all enabled plugins on the task.
Fires events:
* task.execute.before_plugin
* task.execute.after_plugin
:param string phase: Name of the phase
"""
if phase not in phase_methods:
raise Exception('%s is not a valid task phase' % phase)
# warn if no inputs, filters or outputs in the task
if phase in ['input', 'filter', 'output']:
if not self.manager.unit_test:
# Check that there is at least one manually configured plugin for these phases
for p in self.plugins(phase):
if not p.builtin:
break
else:
if phase not in self.suppress_warnings:
if phase == 'filter':
logger.warning(
'Task does not have any filter plugins to accept entries. '
'You need at least one to accept the entries you want.'
)
else:
logger.warning(
'Task doesn\'t have any {} plugins, you should add (at least) one!',
phase,
)
for plugin in self.plugins(phase):
# Abort this phase if one of the plugins disables it
if phase in self.disabled_phases:
return
if plugin.name in self.disabled_plugins:
continue
# store execute info, except during entry events
self.current_phase = phase
self.current_plugin = plugin.name
if plugin.api_ver == 1:
# backwards compatibility
# pass method only task (old behaviour)
args = (self,)
else:
# pass method task, copy of config (so plugin cannot modify it)
args = (self, copy.copy(self.config.get(plugin.name)))
# Hack to make task.session only active for a single plugin
with Session() as session:
self.session = session
try:
fire_event('task.execute.before_plugin', self, plugin.name)
response = self.__run_plugin(plugin, phase, args)
if phase == 'input' and response:
# add entries returned by input to self.all_entries
for e in response:
e.task = self
self.all_entries.append(e)
finally:
fire_event('task.execute.after_plugin', self, plugin.name)
self.session = None
# check config hash for changes at the end of 'prepare' phase
if phase == 'prepare':
self.check_config_hash()
def __run_plugin(self, plugin, phase, args=None, kwargs=None):
"""
Execute given plugins phase method, with supplied args and kwargs.
If plugin throws unexpected exceptions :meth:`abort` will be called.
:param PluginInfo plugin: Plugin to be executed
:param string phase: Name of the phase to be executed
:param args: Passed to the plugin
:param kwargs: Passed to the plugin
"""
keyword = plugin.name
method = plugin.phase_handlers[phase]
if args is None:
args = []
if kwargs is None:
kwargs = {}
# log.trace('Running %s method %s' % (keyword, method))
# call the plugin
try:
result = method(*args, **kwargs)
# We exhaust any iterator inputs here to make sure we catch exceptions properly.
if isinstance(result, collections.abc.Iterable):
result = list(result)
return result
except TaskAbort:
raise
except PluginWarning as warn:
# check if this warning should be logged only once (may keep repeating)
if warn.kwargs.get('log_once', False):
from flexget.utils.log import log_once
log_once(warn.value, warn.logger)
else:
warn.logger.warning(warn)
except EntryUnicodeError as eue:
msg = 'Plugin {} tried to create non-unicode compatible entry (key: {}, value: {!r})'.format(
keyword,
eue.key,
eue.value,
)
logger.critical(msg)
self.abort(msg)
except PluginError as err:
err.logger.critical(err.value)
self.abort(err.value)
except DependencyError as e:
msg = f'Plugin `{keyword}` cannot be used because dependency `{e.missing}` is missing.'
logger.critical(e.message)
self.abort(msg)
except Warning as e:
# If warnings have been elevated to errors
msg = f'Warning during plugin {keyword}: {e}'
logger.exception(msg)
self.abort(msg)
except Exception as e:
msg = f'BUG: Unhandled error in plugin {keyword}: {e}'
logger.opt(exception=True).critical(msg)
traceback = self.manager.crash_report()
self.abort(msg, traceback=traceback)
def rerun(self, plugin=None, reason=None):
"""
Immediately re-run the task after execute has completed,
task can be re-run up to :attr:`.max_reruns` times.
:param str plugin: Plugin name
:param str reason: Why the rerun is done
"""
msg = 'Plugin {} has requested task to be ran again after execution has completed.'.format(
self.current_plugin if plugin is None else plugin
)
if reason:
msg += f' Reason: {reason}'
# Only print the first request for a rerun to the info log
if self._rerun:
logger.debug(msg)
else:
logger.info(msg)
self._rerun = True
def config_changed(self):
"""
Sets config_modified flag to True for the remainder of this run.
Used when the db changes, and all entries need to be reprocessed.
"""
self.config_modified = True
def merge_config(self, new_config):
try:
merge_dict_from_to(new_config, self.config)
except MergeException as e:
raise PluginError(f'Failed to merge configs for task {self.name}: {e}')
def check_config_hash(self):
"""
Checks the task's config hash and updates the hash if necessary.
"""
# Save current config hash and set config_modified flag
config_hash = get_config_hash(self.config)
if self.is_rerun:
# Restore the config to state right after start phase
if self.prepared_config:
self.config = copy.deepcopy(self.prepared_config)
else:
logger.error('BUG: No prepared_config on rerun, please report.')
with Session() as session:
last_hash = (
session.query(TaskConfigHash).filter(TaskConfigHash.task == self.name).first()
)
if not last_hash:
session.add(TaskConfigHash(task=self.name, hash=config_hash))
self.config_changed()
elif last_hash.hash != config_hash:
last_hash.hash = config_hash
self.config_changed()
def _execute(self):
"""Executes the task without rerunning."""
if not self.enabled:
logger.debug('Not running disabled task {}', self.name)
return
logger.debug('executing {}', self.name)
# Handle keyword args
if self.options.learn:
logger.info('Disabling download and output phases because of --learn')
self.disable_phase('download')
self.disable_phase('output')
if self.options.disable_phases:
list(map(self.disable_phase, self.options.disable_phases))
if self.options.inject:
# If entries are passed for this execution (eg. rerun), disable the input phase
self.disable_phase('input')
self.all_entries.extend(copy.deepcopy(self.options.inject))
# run phases
try:
for phase in task_phases:
if phase in self.disabled_phases:
# log keywords not executed
if phase not in self.suppress_warnings:
for plugin in self.plugins(phase):
if plugin.name in self.config:
logger.info(
'Plugin {} is not executed in {} phase because the phase is disabled '
'(e.g. --test, --inject)',
plugin.name,
phase,
)
continue
if phase in ('start', 'prepare') and self.is_rerun:
logger.debug('skipping phase {} during rerun', phase)
continue
if phase == 'exit':
# Make sure we run the entry complete hook before exit phase. These hooks may call for a rerun,
# which would mean we should skip the exit phase during this run.
for entry in self.all_entries:
entry.complete()
if self._rerun and self._rerun_count < self.max_reruns:
logger.debug('not running task_exit yet because task will rerun')
continue
# run all plugins with this phase
self.__run_task_phase(phase)
if phase == 'start':
# Store a copy of the config state after start phase to restore for reruns
self.prepared_config = copy.deepcopy(self.config)
except TaskAbort:
try:
self.__run_task_phase('abort')
except TaskAbort as e:
logger.exception('abort handlers aborted: {}', e)
raise
@use_task_logging
def execute(self):
"""
Executes the the task.
If :attr:`.enabled` is False task is not executed. Certain :attr:`.options`
affect how execution is handled.
- :attr:`.options.disable_phases` is a list of phases that are not enabled
for this execution.
- :attr:`.options.inject` is a list of :class:`Entry` instances used instead
of running input phase.
"""
self.finished_event.clear()
try:
if self.options.cron:
self.manager.db_cleanup()
fire_event('task.execute.started', self)
while True:
self._execute()
# rerun task
if (
self._rerun
and self._rerun_count < self.max_reruns
and self._rerun_count < Task.RERUN_MAX
):
logger.info('Rerunning the task in case better resolution can be achieved.')
self._rerun_count += 1
self._all_entries = EntryContainer()
self._rerun = False
continue
elif self._rerun:
logger.info(
'Task has been re-run {} times already, stopping for now',
self._rerun_count,
)
break
fire_event('task.execute.completed', self)
finally:
self.finished_event.set()
self.requests.close()
@staticmethod
def validate_config(config):
schema = plugin_schemas(interface='task')
# Don't validate commented out plugins
schema['patternProperties'] = {'^_': {}}
return config_schema.process_config(config, schema)
def __copy__(self):
new = type(self)(self.manager, self.name, self.config, self.options)
# Update all the variables of new instance to match our own
new.__dict__.update(self.__dict__)
# Some mutable objects need to be copies
new.options = copy.copy(self.options)
new.config = copy.deepcopy(self.config)
return new
copy = __copy__
def render(self, template):
"""
Renders a template string based on fields in the entry.
:param template: A template string or FlexGetTemplate that uses jinja2 or python string replacement format.
:return: The result of the rendering.
:rtype: string
:raises RenderError: If there is a problem.
"""
if not isinstance(template, (str, FlexGetTemplate)):
raise ValueError(
'Trying to render non string template or unrecognized template format, got %s'
% repr(template)
)
logger.trace('rendering: {}', template)
return render_from_task(template, self)
@event('config.register')
def register_config_key():
task_config_schema = {
'type': 'object',
'additionalProperties': plugin_schemas(interface='task'),
}
config_schema.register_config_key('tasks', task_config_schema, required=True) | PypiClean |
/Fabric-with-working-dependencies-1.0.1.tar.gz/Fabric-with-working-dependencies-1.0.1/fabric/sftp.py | from __future__ import with_statement
import hashlib
import os
import stat
import tempfile
from fnmatch import filter as fnfilter
from fabric.state import output, connections, env
from fabric.utils import warn
class SFTP(object):
"""
SFTP helper class, which is also a facade for paramiko.SFTPClient.
"""
def __init__(self, host_string):
self.ftp = connections[host_string].open_sftp()
# Recall that __getattr__ is the "fallback" attribute getter, and is thus
# pretty safe to use for facade-like behavior as we're doing here.
def __getattr__(self, attr):
return getattr(self.ftp, attr)
def isdir(self, path):
try:
return stat.S_ISDIR(self.ftp.lstat(path).st_mode)
except IOError:
return False
def islink(self, path):
try:
return stat.S_ISLNK(self.ftp.lstat(path).st_mode)
except IOError:
return False
def exists(self, path):
try:
self.ftp.lstat(path).st_mode
except IOError:
return False
return True
def glob(self, path):
from fabric.state import win32
dirpart, pattern = os.path.split(path)
rlist = self.ftp.listdir(dirpart)
names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
ret = [path]
if len(names):
s = '/'
ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
if not win32:
ret = [os.path.join(dirpart, name) for name in names]
return ret
def walk(self, top, topdown=True, onerror=None, followlinks=False):
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't get a
# list of the files the directory contains. os.path.walk always
# suppressed the exception then, rather than blow up for a minor reason
# when (say) a thousand readable directories are still left to visit.
# That logic is copied here.
try:
# Note that listdir and error are globals in this module due to
# earlier import-*.
names = self.ftp.listdir(top)
except Exception, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not self.islink(path):
for x in self.walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mkdir(self, path, use_sudo):
from fabric.api import sudo, hide
if use_sudo:
with hide('everything'):
sudo('mkdir %s' % path)
else:
self.ftp.mkdir(path)
def get(self, remote_path, local_path, local_is_path, rremote=None):
# rremote => relative remote path, so get(/var/log) would result in
# this function being called with
# remote_path=/var/log/apache2/access.log and
# rremote=apache2/access.log
rremote = rremote if rremote is not None else remote_path
# Handle format string interpolation (e.g. %(dirname)s)
path_vars = {
'host': env.host_string.replace(':', '-'),
'basename': os.path.basename(rremote),
'dirname': os.path.dirname(rremote),
'path': rremote
}
if local_is_path:
# Interpolate, then abspath (to make sure any /// are compressed)
local_path = os.path.abspath(local_path % path_vars)
# Ensure we give Paramiko a file by prepending and/or creating
# local directories as appropriate.
dirpath, filepath = os.path.split(local_path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isdir(local_path):
local_path = os.path.join(local_path, path_vars['basename'])
if output.running:
print("[%s] download: %s <- %s" % (
env.host_string,
local_path if local_is_path else "<file obj>",
remote_path
))
# Warn about overwrites, but keep going
if local_is_path and os.path.exists(local_path):
msg = "Local file %s already exists and is being overwritten."
warn(msg % local_path)
# Have to bounce off FS if doing file-like objects
fd, real_local_path = None, local_path
if not local_is_path:
fd, real_local_path = tempfile.mkstemp()
self.ftp.get(remote_path, real_local_path)
# Return file contents (if it needs stuffing into a file-like obj)
# or the final local file path (otherwise)
result = None
if not local_is_path:
file_obj = os.fdopen(fd)
result = file_obj.read()
# Clean up temporary file
file_obj.close()
os.remove(real_local_path)
else:
result = real_local_path
return result
def get_dir(self, remote_path, local_path):
# Decide what needs to be stripped from remote paths so they're all
# relative to the given remote_path
if os.path.basename(remote_path):
strip = os.path.dirname(remote_path)
else:
strip = os.path.dirname(os.path.dirname(remote_path))
# Store all paths gotten so we can return them when done
result = []
# Use our facsimile of os.walk to find all files within remote_path
for context, dirs, files in self.walk(remote_path):
# Normalize current directory to be relative
# E.g. remote_path of /var/log and current dir of /var/log/apache2
# would be turned into just 'apache2'
lcontext = rcontext = context.replace(strip, '', 1).lstrip('/')
# Prepend local path to that to arrive at the local mirrored
# version of this directory. So if local_path was 'mylogs', we'd
# end up with 'mylogs/apache2'
lcontext = os.path.join(local_path, lcontext)
# Download any files in current directory
for f in files:
# Construct full and relative remote paths to this file
rpath = os.path.join(context, f)
rremote = os.path.join(rcontext, f)
# If local_path isn't using a format string that expands to
# include its remote path, we need to add it here.
if "%(path)s" not in local_path \
and "%(dirname)s" not in local_path:
lpath = os.path.join(lcontext, f)
# Otherwise, just passthrough local_path to self.get()
else:
lpath = local_path
# Now we can make a call to self.get() with specific file paths
# on both ends.
result.append(self.get(rpath, lpath, True, rremote))
return result
def put(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
local_is_path):
from fabric.api import sudo, hide
pre = self.ftp.getcwd()
pre = pre if pre else ''
if local_is_path and self.isdir(remote_path):
basename = os.path.basename(local_path)
remote_path = os.path.join(remote_path, basename)
if output.running:
print("[%s] put: %s -> %s" % (
env.host_string,
local_path if local_is_path else '<file obj>',
os.path.join(pre, remote_path)
))
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(mv) it later.
if use_sudo:
target_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(target_path)
remote_path = hasher.hexdigest()
# Have to bounce off FS if doing file-like objects
fd, real_local_path = None, local_path
if not local_is_path:
fd, real_local_path = tempfile.mkstemp()
old_pointer = local_path.tell()
local_path.seek(0)
file_obj = os.fdopen(fd, 'wb')
file_obj.write(local_path.read())
file_obj.close()
local_path.seek(old_pointer)
rattrs = self.ftp.put(real_local_path, remote_path)
# Clean up
if not local_is_path:
os.remove(real_local_path)
# Handle modes if necessary
if local_is_path and (mirror_local_mode or mode is not None):
lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
lmode = lmode & 07777
rmode = rattrs.st_mode & 07777
if lmode != rmode:
if use_sudo:
with hide('everything'):
sudo('chmod %o \"%s\"' % (lmode, remote_path))
else:
self.ftp.chmod(remote_path, lmode)
if use_sudo:
with hide('everything'):
sudo("mv \"%s\" \"%s\"" % (remote_path, target_path))
# Revert to original remote_path for return value's sake
remote_path = target_path
return remote_path
def put_dir(self, local_path, remote_path, use_sudo, mirror_local_mode,
mode):
if os.path.basename(local_path):
strip = os.path.dirname(local_path)
else:
strip = os.path.dirname(os.path.dirname(local_path))
remote_paths = []
for context, dirs, files in os.walk(local_path):
rcontext = context.replace(strip, '', 1)
rcontext = rcontext.lstrip('/')
rcontext = os.path.join(remote_path, rcontext)
if not self.exists(rcontext):
self.mkdir(rcontext, use_sudo)
for d in dirs:
n = os.path.join(rcontext,d)
if not self.exists(n):
self.mkdir(n, use_sudo)
for f in files:
local_path = os.path.join(context,f)
n = os.path.join(rcontext,f)
p = self.put(local_path, n, use_sudo, mirror_local_mode, mode,
True)
remote_paths.append(p)
return remote_paths | PypiClean |
/DendroPy-4.6.1.tar.gz/DendroPy-4.6.1/src/dendropy/utility/textprocessing.py |
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Various text-manipulating and formatting utilities.
"""
import re
import sys
import time
import itertools
import locale
import codecs
###############################################################################
## Cross-version compatibility
try:
from StringIO import StringIO # Python 2 legacy support: StringIO in this module is the one needed (not io)
except ImportError:
from io import StringIO # Python 3
###############################################################################
## Unicode/String Conversions
try:
ENCODING = locale.getencoding()
except:
try:
ENCODING = locale.getdefaultlocale()[1]
except ValueError:
ENCODING = None # let default value be assigned below
if ENCODING == None:
ENCODING = 'UTF-8'
def bytes_to_text(s):
"""
Converts a byte string (as read from, e.g., standard input)
to a text string.
In Python 3, this is from type ``bytes`` to ``str``.
In Python 2, this is, confusingly, from type ``str`` to ``unicode``.
"""
s = codecs.decode(s, ENCODING)
if sys.hexversion < 0x03000000:
s = codecs.encode(s, "utf-8")
return s
def parse_curie_standard_qualified_name(prefixed_name, sep=":"):
if sep not in prefixed_name:
raise ValueError("'{}' is not a valid CURIE-standard qualified name".format(prefixed_name))
return prefixed_name.split(":", 1)
## From:
# The Peyotl module of the Open Tree of Life Project
# Mark T. Holder
# https://github.com/mtholder/peyotl
# https://github.com/mtholder/peyotl/blob/c3a544211edc669e664bae28095d52cecfa004f3/peyotl/utility/str_util.py#L5-L25
if sys.version_info.major == 2:
import __builtin__ as builtins # extra verbosity to mollify linter
def is_str_type(x):
return isinstance(x, builtins.basestring)
else:
def is_str_type(x):
return isinstance(x, str)
###############################################################################
##
def camel_case(s):
components = s.split('_')
return components[0] + "".join(x.title() for x in components[1:])
def snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
###############################################################################
##
def unique_taxon_label_map(taxa, taxon_label_map=None, max_label_len=0, logger=None):
"""
Given a list of taxa, returns a dictionary with the Taxon objects as
keys and string labels as values, where the labels are guaranteed to
be unique. If ``taxon_label_map`` is pre-populated (as <Taxon> : 'label'),
then those labels will be used as the basis for the label composition,
otherwise the original taxon object label will be used. ``max_label_len``
can be used to restrict the maximum length of the labels.
"""
if taxon_label_map is None:
taxon_label_map = {}
for t in taxa:
taxon_label_map[t] = t.label
labels = []
for t in taxon_label_map:
label = taxon_label_map[t]
idx = 1
if label in labels:
candidate_label = label
while candidate_label in labels:
idx += 1
if max_label_len > 0:
k = max_label_len - len(str(idx))
if k < 1:
raise ValueError("Unable to make labels unique with maximum label length of %d" % max_label_len)
candidate_label = label[:k] + str(idx)
else:
candidate_label = label + str(idx)
label = candidate_label
labels.append(label)
taxon_label_map[t] = label
return taxon_label_map
###############################################################################
##
def format_dict_table(*args, **kwargs):
"""
Returns a (single) string representation of a tuple of dictionaries in a
table format. This method can read the column names directly off the
dictionary keys, but if a tuple of these keys is provided in the
'column_names' variable, then the order of column_names will follow the
order of the fields/keys in that variable.
"""
display = format_dict_table_rows(*args, **kwargs)
if display:
return "\n".join(display)
else:
return ""
def format_dict_table_rows(rows, column_names=None, max_column_width=None, border_style=2):
"""
Returns a string representation of a tuple of dictionaries in a
table format. This method can read the column names directly off the
dictionary keys, but if a tuple of these keys is provided in the
'column_names' variable, then the order of column_names will follow
the order of the fields/keys in that variable.
"""
if column_names or len(rows) > 0:
lengths = {}
rules = {}
if column_names:
column_list = column_names
else:
try:
column_list = rows[0].keys()
except:
column_list = None
if column_list:
# characters that make up the table rules
border_style = int(border_style)
#border_style = 0
if border_style == 0:
vertical_rule = ' '
horizontal_rule = ''
rule_junction = ''
elif border_style == 1:
vertical_rule = ' '
horizontal_rule = '-'
rule_junction = '-'
else:
vertical_rule = ' | '
horizontal_rule = '-'
rule_junction = '-+-'
if border_style >= 3:
left_table_edge_rule = '| '
right_table_edge_rule = ' |'
left_table_edge_rule_junction = '+-'
right_table_edge_rule_junction = '-+'
else:
left_table_edge_rule = ''
right_table_edge_rule = ''
left_table_edge_rule_junction = ''
right_table_edge_rule_junction = ''
if max_column_width:
column_list = [c[:max_column_width] for c in column_list]
trunc_rows = []
for row in rows:
new_row = {}
for k in row.keys():
new_row[k[:max_column_width]] = str(row[k])[:max_column_width]
trunc_rows.append(new_row)
rows = trunc_rows
for col in column_list:
rls = [len(str(row[col])) for row in rows]
lengths[col] = max(rls+[len(col)])
rules[col] = horizontal_rule*lengths[col]
template_elements = ["%%(%s)-%ss" % (col, lengths[col]) for col in column_list]
row_template = vertical_rule.join(template_elements)
border_template = rule_junction.join(template_elements)
full_line = left_table_edge_rule_junction + (border_template % rules) + right_table_edge_rule_junction
display = []
if border_style > 0:
display.append(full_line)
display.append(left_table_edge_rule + (row_template % dict(zip(column_list, column_list))) + right_table_edge_rule)
if border_style > 0:
display.append(full_line)
for row in rows:
display.append(left_table_edge_rule + (row_template % row) + right_table_edge_rule)
if border_style > 0:
display.append(full_line)
return display
else:
return ''
else:
return '' | PypiClean |
/MJOLNIR-1.3.1.tar.gz/MJOLNIR-1.3.1/docs/Contribution.rst | After the initial upstart phase you are more than welcome to contribute to the software. This is best done by:
* First create an issue on the GitHub page describing the scope of the contribution
* Title: *Contribution: Title of contribution*.
* Short description of features.
* List of package dependencies.
* After discussion of feature scope you are welcome to start the programming
* Suggested changes are submitted through a pull request
* Each contribution needs to include:
* source code in a suitable sub-folder (see Software structure)
* Documentation of code located in the docs-folder having identical structure to the modules added
* Suitable tests for the new functionality added to the .travis.yml-file
* Needed packages added to the requirements.txt file
Contribution Example:
_____________________
Title: Extension of fitting routine
Description: An extension of the fitting module is needed to allow
users to incorporate Bayesian update of parameters fitted in 3D
with given priors. This is to be done by adding a subroutine to
the fit object.
Initial thoughts: It is believed that building upon the XXX package
where this feature was already created.
| PypiClean |
/Fabric-with-working-dependencies-1.0.1.tar.gz/Fabric-with-working-dependencies-1.0.1/docs/usage/fabfiles.rst | ============================
Fabfile construction and use
============================
This document contains miscellaneous sections about fabfiles, both how to best
write them, and how to use them once written.
.. _fabfile-discovery:
Fabfile discovery
=================
Fabric is capable of loading Python modules (e.g. ``fabfile.py``) or packages
(e.g. a ``fabfile/`` directory containing an ``__init__.py``). By default, it
looks for something named either ``fabfile`` or ``fabfile.py``.
The fabfile discovery algorithm searches in the invoking user's current working
directory or any parent directories. Thus, it is oriented around "project" use,
where one keeps e.g. a ``fabfile.py`` at the root of a source code tree. Such a
fabfile will then be discovered no matter where in the tree the user invokes
``fab``.
The specific name to be searched for may be overridden on the command-line with
the :option:`-f` option, or by adding a :ref:`fabricrc <fabricrc>` line which
sets the value of ``fabfile``. For example, if you wanted to name your fabfile
``fab_tasks.py``, you could create such a file and then call ``fab -f
fab_tasks.py <task name>``, or add ``fabfile = fab_tasks.py`` to
``~/.fabricrc``.
If the given fabfile name contains path elements other than a filename (e.g.
``../fabfile.py`` or ``/dir1/dir2/custom_fabfile``) it will be treated as a
file path and directly checked for existence without any sort of searching.
When in this mode, tilde-expansion will be applied, so one may refer to e.g.
``~/personal_fabfile.py``.
.. note::
Fabric does a normal ``import`` (actually an ``__import__``) of your
fabfile in order to access its contents -- it does not do any ``eval``-ing
or similar. In order for this to work, Fabric temporarily adds the found
fabfile's containing folder to the Python load path (and removes it
immediately afterwards.)
.. versionchanged:: 0.9.2
The ability to load package fabfiles.
.. _importing-the-api:
Importing Fabric
================
Because Fabric is just Python, you *can* import its components any way you
want. However, for the purposes of encapsulation and convenience (and to make
life easier for Fabric's packaging script) Fabric's public API is maintained in
the ``fabric.api`` module.
All of Fabric's :doc:`../api/core/operations`,
:doc:`../api/core/context_managers`, :doc:`../api/core/decorators` and
:doc:`../api/core/utils` are included in this module as a single, flat
namespace. This enables a very simple and consistent interface to Fabric within
your fabfiles::
from fabric.api import *
# call run(), sudo(), etc etc
This is not technically best practices (for `a
number of reasons`_) and if you're only using a couple of
Fab API calls, it *is* probably a good idea to explicitly ``from fabric.api
import env, run`` or similar. However, in most nontrivial fabfiles, you'll be
using all or most of the API, and the star import::
from fabric.api import *
will be a lot easier to write and read than::
from fabric.api import abort, cd, env, get, hide, hosts, local, prompt, \
put, require, roles, run, runs_once, settings, show, sudo, warn
so in this case we feel pragmatism overrides best practices.
.. _a number of reasons: http://python.net/~goodger/projects/pycon/2007/idiomatic/handout.html#importing
Defining tasks and importing callables
======================================
For important information on what exactly Fabric will consider as a task when
it loads your fabfile, as well as notes on how best to import other code,
please see :ref:`tasks-and-imports` in the :doc:`execution` documentation.
| PypiClean |
/MnemoPwd-1.2.1-py3-none-any.whl/mnemopwd/server/clients/protocol/StateSCC.py |
# Copyright (c) 2015-2016, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
State SCC : Challenge Controller
"""
import hashlib
import base64
from ....pyelliptic import hmac_sha512
from ....pyelliptic import hmac_sha256
class StateSCC:
"""Challenge controller and others useful methods"""
def control_challenge(self, client, data, var):
"""Action of the state SCC: control the challenge answer"""
try:
echallenge = data[:169] # Encrypted challenge
challenge = client.ephecc.decrypt(echallenge) # Decrypting
# Compute challenge
challenge_bis = hmac_sha256(client.ms, client.session + var)
if challenge != challenge_bis:
# Send challenge rejected
msg = b'ERROR;application protocol error'
client.loop.call_soon_threadsafe(client.transport.write, msg)
raise Exception(var.decode() + " challenge rejected")
except Exception as exc:
# Schedule a callback to client exception handler
client.loop.call_soon_threadsafe(client.exception_handler, exc)
return False
else:
return True
def compute_client_id(self, ms, login):
"""Compute a client id"""
ho = hashlib.sha256()
ho.update(hmac_sha512(ms, ms + login))
return ho.digest()
def compute_client_filename(self, id, ms, login):
"""Compute a client database filename"""
# Compute login hash
ho = hashlib.sha256()
ho.update(hmac_sha512(ms, login))
hlogin = ho.digest()
# Filename construction
filename = (base64.b32encode(hlogin))[:52] + (base64.b32encode(id))[:52]
return filename.decode() # Return client database filename (a string) | PypiClean |
/AudioAugmentation-0.10.0.tar.gz/AudioAugmentation-0.10.0/README.txt | # AudioAugmentation
Libreria python para aumentar audios haciendo transformaciones sobre los audios,
de esta manera mediante algunas transformaciones sobre los audios se recibe un audio y se multiplica por 9 salidas
## Initialization
```bash
pip install AudioAugmentation
```
A `AudioAugmentation` object should be created and use its attributes.
```python
from AudioAugmentation import Audio_K
aumentedAudio = Audio_K(audio_file=file_path, save=save, graph=grafica)
todos_audios = aumentedAudio.aumentar()
```
### Audio_K methods
* `aumentar()`: returns all transforms about a audio, you can save the new audios o only show
| PypiClean |
/Flask-Azure-Storage-0.2.1.tar.gz/Flask-Azure-Storage-0.2.1/flask_azure_storage.py | from azure.storage import CloudStorageAccount
import six
from collections import defaultdict
import logging
import os
import re
from flask import current_app
from flask import url_for as flask_url_for
logger = logging.getLogger('Flask_Azure_Storage')
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
def url_for(endpoint, **values):
app = current_app
if app.config.get('TESTING', False):
return flask_url_for(endpoint, **values)
if 'AZURE_STORAGE_CONTAINER_NAME' not in app.config:
raise ValueError("AZURE_STORAGE_CONTAINER_NAME not found in app configuration.")
if endpoint == 'static' or endpoint.endswith('.static'):
scheme = 'https'
if not app.config.get("AZURE_STORAGE_USE_HTTPS", True):
scheme = 'http'
# allow per url override for scheme
scheme = values.pop('_scheme', scheme)
# manage other special values, all have no meaning for static urls
values.pop('_external', False) # external has no meaning here
values.pop('_anchor', None) # anchor as well
values.pop('_method', None) # method too
url_format = '%(container_domain)s/%(container_name)s/%(virtual_folder)s'
bucket_path = url_format % {
'container_domain': app.config['AZURE_STORAGE_DOMAIN'],
'container_name': app.config['AZURE_STORAGE_CONTAINER_NAME'],
'virtual_folder': app.config['AZURE_STORAGE_VIRTUAL_FOLDER_NAME']
}
urls = app.url_map.bind(bucket_path, url_scheme=scheme)
return urls.build(endpoint, values=values, force_external=True)
return flask_url_for(endpoint, **values)
def _path_to_relative_url(path):
return os.path.splitdrive(path)[1].replace('\\', '/')
def _static_folder_path(static_url, static_folder, static_asset, app):
# first get the asset path relative to the static folder.
# static_asset is not simply a filename because it could be
# sub-directory then file etc.
if not static_asset.startswith(static_folder):
raise ValueError("%s static asset must be under %s static folder" %
(static_asset, static_folder))
rel_asset = static_asset[len(static_folder):]
# Now bolt the static url path and the relative asset location together
path = '%s/%s' % (static_url.rstrip('/'), rel_asset.lstrip('/'))
# Skip static folder name
# return path.split(static_folder.split('/')[-1])[1]
return path
def _write_files(blob_service, app, static_url_loc, static_folder, files,
container_name):
import mimetypes
import hashlib
from azure.storage.blob import ContentSettings
static_folder_rel = _path_to_relative_url(static_folder)
for file_path in files:
asset_loc = _path_to_relative_url(file_path)
full_key_name = _static_folder_path(static_url_loc, static_folder_rel,
asset_loc, app)
key_name = full_key_name.lstrip("/")
virtual_folder_name = app.config.get('AZURE_STORAGE_VIRTUAL_FOLDER_NAME')
if virtual_folder_name:
key_name = virtual_folder_name.rstrip('/').lstrip('/') + '/' + key_name
hasher = hashlib.sha1()
with open(file_path, 'rb') as file:
buf = file.read(65536)
while len(buf) > 0:
hasher.update(buf)
buf = file.read(65536)
file.close()
hash = hasher.hexdigest()
try:
if blob_service._get_blob(container_name, key_name).\
metadata.get('hash') == hash:
# skip file..
continue
except:
pass
# upload..
blob_service.create_blob_from_path(
container_name,
key_name,
file_path,
content_settings=ContentSettings(
content_type=mimetypes.MimeTypes().guess_type(key_name)[0]),
metadata={'hash': hash})
def _bp_static_url(blueprint):
u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or ''))
return u
def _gather_files(app, hidden):
dirs = [(six.u(app.static_folder), app.static_url_path)]
if hasattr(app, 'blueprints'):
blueprints = app.blueprints.values()
bp_details = lambda x: (x.static_folder, _bp_static_url(x))
dirs.extend([bp_details(x) for x in blueprints if x.static_folder])
valid_files = defaultdict(list)
for static_folder, static_url_loc in dirs:
if not os.path.isdir(static_folder):
logger.warning("WARNING - [%s does not exist]" % static_folder)
else:
logger.debug("Checking static folder: %s" % static_folder)
for root, _, files in os.walk(static_folder):
relative_folder = re.sub(r'^\/',
'',
root.replace(static_folder, ''))
files = [os.path.join(root, x)
for x in files if (hidden or x[0] != '.')]
if files:
valid_files[(static_folder, static_url_loc)].extend(files)
return valid_files
def create_all(app, account_name=None, account_key=None, container_name=None,
include_hidden=False):
account_name = account_name or app.config.get('AZURE_STORAGE_ACCOUNT_NAME')
account_key = account_key or app.config.get('AZURE_STORAGE_ACCOUNT_KEY')
container_name = container_name or app.config.get('AZURE_STORAGE_CONTAINER_NAME')
if not container_name:
raise ValueError("No container name provided.")
# build list of static files
all_files = _gather_files(app, include_hidden)
logger.debug("All valid files: %s" % all_files)
# connect to azure
azure = CloudStorageAccount(
account_name=account_name,
account_key=account_key
)
# create blob service
blob_service = azure.create_block_blob_service()
# get_or_create container
if not blob_service.exists(container_name):
blob_service.create_container(container_name)
prefix = app.config.get('AZURE_STORAGE_PREFIX', '').lstrip('/').rstrip('/')
for (static_folder, static_url), names in six.iteritems(all_files):
static_upload_url = '%s/%s' % (prefix.rstrip('/'), static_url.lstrip('/'))
_write_files(blob_service, app, static_upload_url, static_folder,
names, container_name)
class FlaskAzureStorage(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app, **kwargs):
app.config.setdefault('AZURE_STORAGE_ACCOUNT_NAME', None)
app.config.setdefault('AZURE_STORAGE_ACCOUNT_KEY', None)
app.config.setdefault('AZURE_STORAGE_CONTAINER_NAME', '')
app.config.setdefault('AZURE_STORAGE_DOMAIN', '')
app.config.setdefault('AZURE_STORAGE_VIRTUAL_FOLDER_NAME', '')
# Use the newstyle teardown_appcontext if it's available,
# otherwise fall back to the request context
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
if app.debug:
app.config['AZURE_STORAGE_ACTIVE'] = False
else:
app.config['AZURE_STORAGE_ACTIVE'] = True
if app.config['AZURE_STORAGE_ACTIVE']:
app.jinja_env.globals['url_for'] = url_for
def teardown(self, exception):
ctx = stack.top
if hasattr(ctx, 'azure_storage_account'):
ctx.azure_storage_account = None
if hasattr(ctx, 'azure_block_blob_service'):
ctx.azure_block_blob_service = None
if hasattr(ctx, 'azure_page_blob_service'):
ctx.azure_page_blob_service = None
if hasattr(ctx, 'azure_append_blob_service'):
ctx.azure_append_blob_service = None
if hasattr(ctx, 'azure_queue_service'):
ctx.azure_queue_service = None
if hasattr(ctx, 'azure_table_service'):
ctx.azure_table_service = None
if hasattr(ctx, 'azure_file_service'):
ctx.azure_file_service = None
@property
def account(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'azure_storage_account'):
ctx.azure_storage_account = CloudStorageAccount(
account_name=ctx.app.config.get('AZURE_STORAGE_ACCOUNT_NAME'),
account_key=ctx.app.config.get('AZURE_STORAGE_ACCOUNT_KEY')
)
return ctx.azure_storage_account
@property
def block_blob_service(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'azure_block_blob_service'):
ctx.azure_block_blob_service = self.account.create_block_blob_service()
return ctx.azure_block_blob_service
@property
def page_blob_service(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'azure_page_blob_service'):
ctx.azure_page_blob_service = self.account.create_page_blob_service()
return ctx.azure_page_blob_service
@property
def append_blob_service(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'azure_append_blob_service'):
ctx.azure_append_blob_service = self.account.create_append_blob_service()
return ctx.azure_append_blob_service
@property
def queue_service(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'azure_queue_service'):
ctx.azure_queue_service = self.account.create_queue_service()
return ctx.azure_queue_service
@property
def table_service(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'azure_table_service'):
ctx.azure_table_service = self.account.create_table_service()
return ctx.azure_table_service
@property
def file_service(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'azure_file_service'):
ctx.azure_file_service = self.account.create_file_service()
return ctx.azure_file_service | PypiClean |
/CNVpytor-1.3.1.tar.gz/CNVpytor-1.3.1/cnvpytor/__main__.py | from __future__ import print_function
from .root import *
from .viewer import *
from .version import __version__
from .fasta import *
from .export import *
from .trio import *
import sys
import os
import logging
import argparse
import matplotlib.pyplot as plt
def main():
""" main()
CNVpytor main commandline program.
"""
parser = argparse.ArgumentParser(
description="Lite version of the CNVnator written in Python.\nA tool for CNV discovery from depth of read mapping.")
parser.add_argument('-version', '--version', action='store_true', help='show version number and exit')
parser.add_argument('-root', '--root', type=str, nargs="+",
help="CNVnator hd5 file: data storage for all calculations", default=None)
parser.add_argument('-download', '--download_resources', action='store_true', help='download resource files')
parser.add_argument('-chrom', '--chrom', type=str, nargs="+", help="list of chromosomes to apply calculation",
default=[])
parser.add_argument('-v', '--verbose', type=str,
choices=["none", "debug", "info", "warning", "error", "d", "e", "i", "w"],
help="verbose level: debug, info (default), warning, error", default="info")
parser.add_argument('-log', '--log_file', type=str, help='log file')
parser.add_argument('-j', '--max_cores', type=int,
help="maximal number of cores to use in calculation", default=8)
parser.add_argument('-rd', '--rd', nargs="+", type=str, help="read bam/sam/cram and store read depth information")
parser.add_argument('-T', '--reference_filename', type=str, help="reference fasta for CRAM")
parser.add_argument('-gc', '--gc', type=str, help="read fasta file and store GC/AT content")
parser.add_argument('-cgc', '--copy_gc', type=str, help="copy GC/AT content from another cnvnator file")
parser.add_argument('-his', '--his', type=binsize_type, nargs="+",
help="create histograms for specified bin size (multiple bin sizes separate by space)")
parser.add_argument('-snp2his', '--his_from_snp', type=binsize_type, nargs="+",
help="create histograms for specified bin size (multiple bin sizes separate by space)")
parser.add_argument('-stat', '--stat', type=binsize_type, nargs="+",
help="calculate statistics for specified bin size (multiple bin sizes separate by space)")
parser.add_argument('-partition', '--partition', type=binsize_type, nargs="+",
help="calculate segmentation for specified bin size (multiple bin sizes separate by space)")
parser.add_argument('-call', '--call', type=str, nargs="+",
help="CNV caller: [baf] bin_size [bin_size2 ...] (multiple bin sizes separate by space)")
parser.add_argument('-vcf', '-snp', '--vcf', nargs="+", type=str, help="read SNP data from vcf files")
parser.add_argument('-somatic_snv', '--somatic_snv', nargs="+", type=str, help="read SNP data from vcf files")
parser.add_argument('-minc', '--min_count', type=int,
help="minimal count of haterozygous SNPs", default=None)
parser.add_argument('-vcf2rd', '--rd_from_vcf', type=str, help="read SNP data from vcf files")
parser.add_argument('-noAD', '--no_snp_counts', action='store_true',
help="read positions of variants, not counts (AD tag)")
parser.add_argument('-nofilter', '--no_filter', action='store_true',
help="read all variants (not only PASS)")
parser.add_argument('-ad', '--ad_tag', type=str, help="counts tag (default: AD)", default="AD")
parser.add_argument('-gt', '--gt_tag', type=str, help="genotype tag (default: GT)", default="GT")
parser.add_argument('-dp', '--dp_tag', type=str, help="read depth tag (default: DP)", default="DP")
parser.add_argument('-callset', '--callset', type=str, help="name for somatic VCF signal", default=None)
parser.add_argument('-maxcn', '--max_copy_number', type=int, help="maximal copy number", default=10)
parser.add_argument('-mindbaf', '--baf_threshold', type=float, help="threshold for change in BAF level",
default=0.0)
parser.add_argument('-bafres', '--baf_resolution', type=int, help="Resolution for unphased BAF likelihood",
default=200)
parser.add_argument('-nolh', '--no_save_likelihood', action='store_true',
help="do not save likelihood histograms (reduce size of pytor file)")
parser.add_argument('-oth', '--overlap_threshold', type=float, help="likelihood overlap threshold",
default=None)
parser.add_argument('-mincf', '--min_cell_fraction', type=float, help="minimal cell fraction", default=0.0)
parser.add_argument('-pileup', '--pileup_bam', nargs="+", type=str, help="calculate SNP counts from bam files")
parser.add_argument('-snp2rd', '--rd_from_snp', action='store_true', help="calculate RD from SNP counts")
parser.add_argument('-sbin', '--s_bin_size', type=binsize_type, help="Super bin size (use with -snp2rd)",
default=10000)
parser.add_argument('-mask', '--mask', type=str, help="read fasta mask file and flag SNPs in P region")
parser.add_argument('-mask_snps', '--mask_snps', action='store_true', help="flag SNPs in P region")
parser.add_argument('-trio_phase', '--trio_phase', action='store_true', help="Phase trio")
parser.add_argument('-parents', '--phase_parents', action='store_true', help="Phase parents")
parser.add_argument('-mask_snvs', '--mask_snvs', type=str, help="flag SNVs in P region")
parser.add_argument('-idvar', '--idvar', type=str, help="read vcf file and flag SNPs that exist in database file")
parser.add_argument('-random_phase', '--random_phase', action='store_true', help="randomly phase SNPs")
parser.add_argument('-baf', '--baf', type=binsize_type, nargs="+",
help="create BAF histograms for specified bin size (multiple bin sizes separate by space)")
parser.add_argument('-nomask', '--no_mask', action='store_true', help="do not use P mask in BAF histograms")
parser.add_argument('-useid', '--use_id', action='store_true', help="use id flag filtering in SNP histograms")
parser.add_argument('-usehom', '--use_hom', action='store_true', help="use hom")
parser.add_argument('-usephase', '--use_phase', action='store_true',
help="use information about phase while processing SNP data")
parser.add_argument('-reducenoise', '--reduce_noise', action='store_true',
help="reduce noise in processing SNP data")
parser.add_argument('-blw', '--baf_likelihood_width', type=float,
help="likelihood width used in processing SNP data (default=0.8)", default=0.8)
parser.add_argument('-altc', '--alt_corr', action='store_true',
help="Remove alt/ref bias")
parser.add_argument('-plot', '--plot', type=str, nargs="+", help="plotting")
parser.add_argument('-view', '--view', type=binsize_type,
help="Enters interactive ploting mode")
parser.add_argument('-agg', '--force_agg', action='store_true', help="Force Agg matplotlib backend")
parser.add_argument('-panels', '--panels', type=str, nargs="+", default=["rd"], choices=["rd", "baf", "likelihood"],
help="plot panels (with -plot regions)")
parser.add_argument('-style', '--plot_style', type=str,
help="available plot styles: " + ", ".join(plt.style.available), choices=plt.style.available)
parser.add_argument('-o', '--plot_output_file', type=str, help="output filename prefix and extension", default="")
parser.add_argument('-anim', '--animation', type=str, help="animation folder/prefix", default="")
parser.add_argument('-make_gc_file', '--make_gc_genome_file', action='store_true',
help="used with -gc will create genome gc file")
parser.add_argument('-make_mask_file', '--make_mask_genome_file', action='store_true',
help="used with -mask will create genome mask file")
parser.add_argument('-rd_use_mask', '--use_mask_with_rd', action='store_true', help="used P mask in RD histograms")
parser.add_argument('-nogc', '--no_gc_corr', action='store_true', help="do not use GC correction in RD histograms")
parser.add_argument('-rg', '--reference_genome', type=str, help="Manually set reference genome", default=None)
parser.add_argument('-sample', '--vcf_sample', type=str, help="Sample name in vcf file", default="")
parser.add_argument('-conf', '--reference_genomes_conf', type=str, help="Configuration with reference genomes",
default=None)
parser.add_argument('-ls', '--ls', action='store_true', help='list pytor file(s) content')
parser.add_argument('-gc_info', '--gc_info', action='store_true', help='list pytor file(s) gc content stat')
parser.add_argument('-rg_info', '--rg_info', action='store_true', help='list loaded reference gnomes')
parser.add_argument('-info', '--info', type=binsize_type, nargs="*", help='print statistics for pythor file(s)')
parser.add_argument('-qc', '--qc', type=binsize_type, nargs="*", help='print quality control statistics')
parser.add_argument('-rdqc', '--rd_qc', type=binsize_type, nargs="*",
help='print quality control statistics without SNP data')
parser.add_argument('-comp', '--compare', type=str, nargs="*", help='compere two regions: -comp reg1 reg2 [n_bins]')
parser.add_argument('-genotype', '--genotype', type=str, nargs="*")
parser.add_argument('-a', '--all', action='store_true', help='Genotype with all columns')
parser.add_argument('-meta', '--metadata', action='store_true', help='list Metadata')
parser.add_argument('-fasta2rg', '--reference_genome_template', type=str,
help="create template for reference genome using chromosome lengths from fasta file")
parser.add_argument('-export', '--export', type=str, nargs="*", help='Export to jbrowse and cnvnator')
args = parser.parse_args(sys.argv[1:])
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
if args.verbose in {"debug", "d"}:
level = logging.DEBUG
elif args.verbose in {"info", "i"}:
level = logging.INFO
elif args.verbose in {"warning", "w"}:
level = logging.WARNING
elif args.verbose in {"error", "e"}:
level = logging.ERROR
else:
level = logging.CRITICAL
if args.log_file:
logging.basicConfig(filename=args.log_file, level=logging.DEBUG, format=log_format)
logger = logging.getLogger('cnvpytor')
ch = logging.StreamHandler()
formatter = logging.Formatter(log_format)
ch.setFormatter(formatter)
ch.setLevel(level)
logger.addHandler(ch)
else:
logging.basicConfig(level=level, format=log_format)
logger = logging.getLogger('cnvpytor')
logger.debug("Start logging...")
if args.reference_genome_template is not None:
Fasta(args.reference_genome_template).print_reference_genome_template()
if args.download_resources:
Genome.download_resources()
return 0
if not Genome.check_resources():
logger.error("Some reference genome resource files are missing. "
"Run 'cnvpytor -download' as same user who has installed cnvpytor.")
return 0
if args.version:
print('CNVpytor {}'.format(__version__))
return 0
if args.reference_genomes_conf:
Genome.load_reference_genomes(args.reference_genomes_conf)
elif os.path.exists(os.path.expanduser('~/.cnvpytor/reference_genomes_conf.py')):
Genome.load_reference_genomes(os.path.expanduser('~/.cnvpytor/reference_genomes_conf.py'))
if args.rg_info:
Genome.print_reference_genomes()
if args.root is not None:
if args.ls:
show = Show(args.root)
show.ls()
if args.gc_info:
show = Show(args.root)
show.gc_info()
if args.export:
if len(args.export) > 0:
dir_name_list = args.export[1:]
dir_name = ''
if len(dir_name_list) > 0:
dir_name = dir_name_list[0]
export_program = args.export[0].lower()
if export_program in ['jbrowse', 'cnvnator']:
if export_program == 'jbrowse':
export_j = ExportJBrowse(args.root, dir_name)
export_j.create_reference_json()
export_j.rd_signal()
export_j.snp_signal()
export_j.create_tracklist_json()
elif export_program == 'cnvnator':
logger.info("Under Development")
else:
logger.error("Incorrect export program name")
if args.metadata:
show = Show(args.root)
show.meta()
if args.info is not None:
show = Show(args.root)
show.info(args.info)
if args.genotype is not None:
params = {"output_filename": args.plot_output_file,
"chrom": args.chrom,
"panels": args.panels,
"snp_use_mask": not args.no_mask,
"snp_use_id": args.use_id,
"rd_use_mask": args.use_mask_with_rd
}
view = Viewer(args.root, params, force_agg=args.force_agg)
view.genotype_prompt(list(map(binsize_type, args.genotype)), all=args.all)
if args.qc is not None:
params = {"bin_size": binsize_type(args.qc[-1]),
"chrom": args.chrom,
"snp_use_mask": not args.no_mask,
"snp_use_id": args.use_id,
"rd_use_mask": args.use_mask_with_rd,
"rd_use_gc_corr": not args.no_gc_corr
}
view = Viewer(args.root, params, force_agg=args.force_agg)
view.qc()
if args.rd_qc is not None:
params = {"bin_size": binsize_type(args.rd_qc[-1]),
"chrom": args.chrom,
"snp_use_mask": not args.no_mask,
"snp_use_id": args.use_id,
"rd_use_mask": args.use_mask_with_rd,
"rd_use_gc_corr": not args.no_gc_corr
}
view = Viewer(args.root, params, force_agg=args.force_agg)
view.qc(snp_qc=False)
if args.compare is not None:
params = {"bin_size": binsize_type(args.compare[-1]),
"rd_use_gc_corr": not args.no_gc_corr,
"rd_use_mask": args.use_mask_with_rd
}
view = Viewer(args.root, params, force_agg=args.force_agg)
if len(args.compare) == 3:
view.compare(args.compare[0], args.compare[1])
elif len(args.compare) == 4:
view.compare(args.compare[0], args.compare[1], int(args.compare[2]))
if args.rd:
app = Root(args.root[0], create=True, max_cores=args.max_cores)
app.rd(args.rd, chroms=args.chrom, reference_filename=args.reference_filename)
if args.reference_genome:
app = Root(args.root[0], max_cores=args.max_cores)
app.set_reference_genome(args.reference_genome)
if args.plot:
params = {"output_filename": args.plot_output_file,
"chrom": args.chrom,
"panels": args.panels,
"snp_use_mask": not args.no_mask,
"snp_use_id": args.use_id,
"rd_use_mask": args.use_mask_with_rd,
"rd_use_gc_corr": not args.no_gc_corr
}
if args.plot_style:
params["style"] = args.plot_style
view = Viewer(args.root, params)
view.plot_command(args.plot)
if args.view:
params = {"bin_size": args.view,
"output_filename": args.plot_output_file,
"chrom": args.chrom,
"panels": args.panels,
"snp_use_mask": not args.no_mask,
"snp_use_id": args.use_id,
"rd_use_mask": args.use_mask_with_rd,
"rd_use_gc_corr": not args.no_gc_corr
}
if args.plot_style:
params["style"] = args.plot_style
view = Viewer(args.root, params, force_agg=args.force_agg)
view.prompt()
if args.gc:
app = Root(args.root[0], create=True, max_cores=args.max_cores)
app.gc(args.gc, chroms=args.chrom, make_gc_genome_file=args.make_gc_genome_file)
if args.copy_gc:
app = Root(args.root[0], create=True, max_cores=args.max_cores)
app.copy_gc(args.copy_gc, chroms=args.chrom)
if args.vcf:
app = Root(args.root[0], create=True, max_cores=args.max_cores)
app.vcf(args.vcf, chroms=args.chrom, sample=args.vcf_sample, no_counts=args.no_snp_counts,
ad_tag=args.ad_tag, gt_tag=args.gt_tag, filter=not args.no_filter)
if args.idvar:
app = Root(args.root[0], create=True, max_cores=args.max_cores)
app.variant_id(args.idvar, chroms=args.chrom)
if args.somatic_snv:
app = Root(args.root[0], create=True, max_cores=args.max_cores)
callset = "default" if args.callset is None else args.callset
app.vcf(args.somatic_snv, chroms=args.chrom, sample=args.vcf_sample, no_counts=args.no_snp_counts,
ad_tag=args.ad_tag, gt_tag=args.gt_tag, filter=not args.no_filter, callset=callset)
if args.rd_from_vcf:
app = Root(args.root[0], create=True, max_cores=args.max_cores)
app.rd_from_vcf(args.rd_from_vcf, chroms=args.chrom, sample=args.vcf_sample, ad_tag=args.ad_tag,
dp_tag=args.dp_tag)
if args.pileup_bam:
app = Root(args.root[0], max_cores=args.max_cores)
app.pileup(args.pileup_bam, chroms=args.chrom, reference_filename=args.reference_filename)
if args.rd_from_snp:
app = Root(args.root[0], max_cores=args.max_cores)
app.rd_from_snp(chroms=args.chrom, use_mask=not args.no_mask, use_id=args.use_id,
s_bin_size=args.s_bin_size)
if args.mask:
app = Root(args.root[0], create=True, max_cores=args.max_cores)
app.mask(args.mask, chroms=args.chrom, make_mask_genome_file=args.make_mask_genome_file)
if args.mask_snps:
app = Root(args.root[0], max_cores=args.max_cores)
app.mask_snps()
if args.mask_snvs:
app = Root(args.root[0], max_cores=args.max_cores)
app.mask_snps(callset=args.mask_snvs)
if args.random_phase:
app = Root(args.root[0], max_cores=args.max_cores)
app.random_phase()
if args.trio_phase:
app = Trio(args.root)
app.trio_phase(parents=args.phase_parents)
if args.stat:
app = Root(args.root[0], max_cores=args.max_cores)
app.rd_stat(chroms=args.chrom)
if args.his:
app = Root(args.root[0], max_cores=args.max_cores)
app.calculate_histograms(args.his, chroms=args.chrom)
if args.his_from_snp:
app = Root(args.root[0], max_cores=args.max_cores)
app.calculate_histograms_from_snp_counts(args.his_from_snp, chroms=args.chrom, use_mask=not args.no_mask,
use_id=args.use_id, callset=args.callset,
min_count=args.min_count)
if args.baf:
app = Root(args.root[0], max_cores=args.max_cores)
app.calculate_baf(args.baf, chroms=args.chrom, use_mask=not args.no_mask, use_id=args.use_id,
use_phase=args.use_phase, res=args.baf_resolution, reduce_noise=args.reduce_noise, blw=args.baf_likelihood_width,
use_hom=args.use_hom, alt_ref_correct=args.alt_corr, save_likelihood=not args.no_save_likelihood)
if args.partition:
app = Root(args.root[0], max_cores=args.max_cores)
app.partition(args.partition, chroms=args.chrom, use_gc_corr=not args.no_gc_corr,
use_mask=args.use_mask_with_rd)
if args.call:
app = Root(args.root[0], max_cores=args.max_cores)
if args.call[0] == "baf":
if args.call[1] in ["mosaic", "germline"]:
event_type = args.call[1]
bins = list(map(binsize_type, args.call[2:]))
else:
event_type = "both"
bins = list(map(binsize_type, args.call[1:]))
if args.use_phase:
app.call_baf_phased(bins, chroms=args.chrom, event_type=event_type, print_calls=True,
use_gc_corr=not args.no_gc_corr,
rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,
mcount=args.min_count, max_copy_number=args.max_copy_number,
min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,
omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)
else:
app.call_baf(bins, chroms=args.chrom, event_type=event_type, print_calls=True,
use_gc_corr=not args.no_gc_corr,
rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,
mcount=args.min_count, max_copy_number=args.max_copy_number,
min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,
omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)
#app.call_baf_old([binsize_type(x) for x in args.call[1:]], chroms=args.chrom, use_id=args.use_id,
# use_mask=not args.no_mask, mcount=args.min_count, anim=args.animation)
elif args.call[0] == "mosaic":
app.call_mosaic(list(map(binsize_type, args.call[1:])), chroms=args.chrom,
use_gc_corr=not args.no_gc_corr,
use_mask=args.use_mask_with_rd, anim=args.animation)
elif args.call[0] == "subclones":
bins = list(map(binsize_type, args.call[1:]))
app.call_subclones(bins, chroms=args.chrom, cnv_calls="calls combined", print_calls=True,
use_gc_corr=not args.no_gc_corr, rd_use_mask=args.use_mask_with_rd,
snp_use_mask=not args.no_mask, snp_use_id=args.use_id,
max_copy_number=args.max_copy_number,
min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold)
elif args.call[0] == "combined":
if args.call[1] in ["mosaic", "germline"]:
event_type = args.call[1]
bins = list(map(binsize_type, args.call[2:]))
else:
event_type = "both"
bins = list(map(binsize_type, args.call[1:]))
if args.use_phase:
app.call_2d_phased(bins, chroms=args.chrom, event_type=event_type, print_calls=True,
use_gc_corr=not args.no_gc_corr,
rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,
mcount=args.min_count, max_copy_number=args.max_copy_number,
min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,
omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)
else:
app.call_2d(bins, chroms=args.chrom, event_type=event_type, print_calls=True,
use_gc_corr=not args.no_gc_corr,
rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,
mcount=args.min_count, max_copy_number=args.max_copy_number,
min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,
omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)
else:
app.call(list(map(binsize_type, args.call)), chroms=args.chrom, print_calls=True,
use_gc_corr=not args.no_gc_corr, use_mask=args.use_mask_with_rd)
if __name__ == '__main__':
main() | PypiClean |
/CLI_processor-1.0.0-py3-none-any.whl/mypackage/__init__.py | import sys
import pandas as pd
from data_description import Description
from imputation import Imputation
from categorical import Categorial
from feature_scaling import Feature
from download import Download
class Preprocessing:
def __init__(self):
if(len(sys.argv)!=2 or sys.argv[1].endswith('.csv')!=1):
print("No of arguments are not meet its requirments.")
return
else:
print("Welcome to machine learnig cli tool")
def remove_column(self):
self.df = pd.read_csv('train.csv')
print(self.df.head())
print("Here is the list list of all the columns")
for i in self.df.columns:
print(i,end=' ')
print()
while True:
print("Enter the target column : ")
col = input()
print(col)
print("Are you Sure(y/n)")
res = input()
if res=='y':
if col in self.df.columns:
self.df.drop(col,axis=1,inplace=True)
print(self.df.head())
print(self.df.columns)
print("Done....")
break
else:
print("Entered value in not present in columns list , Re-enter the column")
continue
def tasks(self):
while True:
print("Tasks....")
print("1. Data Description")
print("2. Handling Null Values")
print("3. Encoding Categorical Data")
print("4. Feature Scaling of the Dataset")
print("5. Download the modified dataset")
print()
print()
print("What do you want to do?(Press -1 to exit) : ")
value = int(input())
print(value)
if value == 1:
print("yash")
obj1 = Description(self.df)
obj1.tasks()
elif value==2:
obj2 = Imputation(self.df)
obj2.tasks()
elif value==3:
obj3 = Categorial(self.df)
obj3.tasks()
elif value==4:
obj4 = Feature(self.df)
obj4.tasks()
elif value==5:
obj5 = Download(self.df)
obj5.download()
elif value == -1:
break
obj = Preprocessing()
obj.remove_column()
obj.tasks() | PypiClean |
/Jalapeno-Lite-0.1.3.tar.gz/Jalapeno-Lite-0.1.3/Jalapeno_data/Sites/first/Pages/blog/getstart.md | title: 使用Jalapeno快速搭建博客
date: 2017-01-19
tag: Flask
[TOC]
<!--Sidebar-->
上次我们讲了如何使用Flask系列来搭建静态博客,但是实际上功能仍然比较单一。为了省去大家重复造轮子的辛苦,老钱同志在今年年初发布了Jalapeno。由于偷懒原因(逃),官方文档一直未能发布。这次我们讲如何使用Jalapeno快速搭建自己的博客网站。

注:Jalapeno当前支持Mac/Linux, Windows目前尚未测试。
<!--More-->
##安装
在使用Jalapeno之前,我们需要先将所需的软件下载下来。如果已经安装或者电脑上带有,则跳过此环节
- Python3
- pip3(Python3的软件包管理器)
- Jalapeno(我们的博客生成器)
- git(将网页文件部署至Github/Coding)
###Python3/pip3 系列安装
我们在[Python](https://www.python.org/downloads/)的官方网站可以下载对应的版本,最新版本为3.6.0
###终端安装方法
我们以Fedora(Linux)为例
sudo dnf install python3 python3-pip
Ubuntu:
sudo apt-get install python3 python3-pip
Mac:
先安装homwbrew
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
使用brew(包管理器,类似于dnf/apt)
brew install python3
让我们来测试一下
在终端输入'python3':
localhost:pages Jakob$ python3
成功进入到Python3的交互界面,注意版本号,当前版本是3.5.1,对我们来说3.4以上的版本最好
Python 3.5.1 (default, Jan 22 2016, 08:52:08)
[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>>
再来看我们的pip3,在终端输入'pip3 -V'查看版本:
localhost:pages Jakob$ pip3 -V
得到(pip3版本号8以上即可)
pip 8.0.2 from /usr/local/lib/python3.5/site-packages (python 3.5)
大功告成!来看我们如何安装Jalapeno
###Jalapeno安装
Jalapeno的安装过程十分简单,你只需要在终端执行以下代码即可。如果你遇到无法解决的问题,在这里[提交问题](),我会尽快作出回应。
$sudo pip3 install Jalapeno
Collecting Jalapeno
Downloading Jalapeno-0.0.8.tar.gz (4.5MB)
100% |████████████████████████████████| 4.5MB 91kB/s
Installing collected packages: Jalapeno
Running setup.py install for Jalapeno ... done
Successfully installed Jalapeno-0.0.8
$
我们测试一下,直接在终端输入'Jalop',会得到
ERROR: Not enough or too many parameters. use "Jalop help" or "Jalop -h" for help
上面的报错是Jalop执行的结果,我们可以看到Jalop是已经被安装到电脑上的。
##初始化Jalapeno目录
首先我们在终端执行
$Jalop -s
或
$Jalop shortcuts
该指令可以在你的用户主目录(下载/文档/图片/...的上一级目录)下生成一个'Jalapeno'文件夹,但是你的文件夹上面会有一把锁子的图标,意味着你的文件夹需要访问权限,需要输入密码才能修改,你可能会说这多不方便啊。于是我们有了下面的操作,把这把锁子去掉
$Jalop -u
或
$Jalop unlock
好了,来看我们的目录文件夹吧。

##Jalapeno目录介绍
我们来看一下新的Jalapeno文件夹目录构成
Jalapeno
├── Profile
| └── profile.yaml
|
├── build
├── source
| ├── pages
| └── image
Jalapeno由三个文件夹构成,其中
- Profile存放配置文件及个人信息,目前版本所有信息存放于profile.yaml
- source 存放文档及资源文件
- pages存放文档,你将来写的博客文章存放在这里
- image存放图片,将来所有的图片都将存放在这里,建议大家在image中创建于文章名对应的文件夹并将图片放入其中,比如目前我们有一篇文章名字为test.md,我们就在image下创建test文件夹,并将test.md中使用的图片放在该目录下
##配置Jalapeno
在第一次运行前我们需要对Jalapeno进行简单的配置,例如你的个人信息什么的,在我们生成的Jalapeno文件夹的Profile子文件夹中有一个'profile.yaml'文件,用编辑器打开它,找到并修改以下内容
Name: 我的博客
Motto: 你总有一个坚持下去的理由
Github: https://github.com/ChenghaoQ
copyright: 版权归ChenghaoQ所有
除此之外,本主题含有的页面头像对应Jalapeno/source/image/theme/Selfie.jpg,如果需要更改使用其他文件替换即可
效果展示

##开始一篇博客
现在你准备好了吗?
首先我们要在'pages'文件夹下创建一个空白文档'test.md',这里test只是一个名字,你可以给你的文章起任何名字,它将会影响到你未来网页的链接地址:
yourwebsite.com/article/test
接着我们要编辑文档的开头,注意冒号后面要空格
title: 这里写文章标题
date: 这里写发表日期,格式为 YYYY-MM-DD
tag: 这里是你的文章分类/标签名称
接着我们编写正文,正文要与之前的开头用一个空行隔开
hello world!balabalabala....
balabalabala....
balabalabala....
在之前我们提到过图片都放在image文件夹下的文章同名子文件夹下,现在假设我们的testpic.jpg在image/test文件夹下,路径为
Jalapeno/source/image/test/testimg.jpg
我们配合Markdown引用图片的语法:
\!\[\]\(\图片地址)
而我们的图片地址表示方法为
\{\{image.子文件夹名.图片名}},
所以最后引用的方法为
hello world!balabalabala....
\!\[]\(\{\{image.test.testpic}})
balabalabala....
balabalabala....
如果你想在文章列表中显示摘要,我们使用<!\--More-->来进行分隔。<!\--More-->之前内容会被放到你的文章列表的摘要中
hello world!balabalabala....
<!\--More-->
balabalabala....
balabalabala....
如果你想在你的文章中启用索引/目录,我们使用\[TOC\]作为标示,将\[TOC\]放入你希望的位置,Jalapeno会在该位置生成目录。前提是你有使用'\#'号来注明各个子标题
\[TOC\]
hello world!balabalabala....
<!\--More-->
##第一个标题
balabalabala....
##第二个标题
balabalabala....
如果你想将目录放入侧边栏而不是正文,我们使用<!\--Siderbar-->进行标记,<!\--Siderbar-->上面的内容会被放入侧边栏目录中,注意,与\[TOC\]用空行隔开
\[TOC\]
<!\--Siderbar-->
hello world!balabalabala....
<!\--More-->
##第一个标题
balabalabala....
##第二个标题
balabalabala....
想要了解更多Markdown语法,参见[Markdown 语法说明](https://github.com/riku/Markdown-Syntax-CN/blob/master/syntax.md)
到这里,我们的博客就写完啦,在发布前我们需要对其测试
##本地测试
$Jalop -r
$Jalop run
终端显示
localhost:~ Jakob$ Jalop run
* Running on http://127.0.0.1:9999/ (Press CTRL+C to quit)
* Restarting with stat
* Debugger is active!
* Debugger pin code: 111-037-567
...
这时打开浏览器,进入127.0.0.1:9999,就可以看到我们的网站啦
预览效果





##网页生成
当我们在测试服务器上确认网页运行正常后,我们将要生成网页
执行
$Jalop -f
或
$Jalop freeze
生成后就可以看到我们生成的网页啦。

##部署
接着我们将生成的网页部署在
- 自己的服务器上
或托管在免费的
- [Github pages](https://pages.github.com)
- [Coding pages](https://coding.net/help/doc/pages/)
上.详细方法点击链接参见说明文档。过程很简单,就是将生成的文件上传至指定位置即可。
| PypiClean |
/Office365_REST_with_timeout-0.1.1-py3-none-any.whl/office365/directory/applications/application.py | from office365.directory.directory_object_collection import DirectoryObjectCollection
from office365.directory.directory_object import DirectoryObject
from office365.directory.extensions.extension_property import ExtensionProperty
from office365.directory.key_credential import KeyCredential
from office365.directory.password_credential import PasswordCredential
from office365.entity_collection import EntityCollection
from office365.runtime.client_result import ClientResult
from office365.runtime.client_value_collection import ClientValueCollection
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
class Application(DirectoryObject):
"""
Represents an application. Any application that outsources authentication to Azure Active Directory (Azure AD)
must be registered in a directory. Application registration involves telling Azure AD about your application,
including the URL where it's located, the URL to send replies after authentication,
the URI to identify your application, and more. For more information, see Basics of Registering
an Application in Azure AD
"""
def add_password(self, display_name):
"""Adds a strong password to an application.
:param str display_name: App display name
"""
params = PasswordCredential(displayName=display_name)
result = ClientResult(self.context, params)
qry = ServiceOperationQuery(self, "addPassword", None, params, None, result)
self.context.add_query(qry)
return result
def remove_password(self, key_id):
"""Remove a password from an application."""
qry = ServiceOperationQuery(self, "removePassword", None, {"keyId": key_id})
self.context.add_query(qry)
return self
def delete_object(self, permanent_delete=False):
"""
:param permanent_delete: Permanently deletes the application from directory
:type permanent_delete: bool
"""
super(Application, self).delete_object()
if permanent_delete:
deleted_item = self.context.directory.deleted_applications[self.id]
deleted_item.delete_object()
return self
def set_verified_publisher(self, verified_publisher_id):
"""Set the verifiedPublisher on an application.
For more information, including prerequisites to setting a verified publisher, see Publisher verification.
:param str verified_publisher_id: The Microsoft Partner Network ID (MPNID) of the verified publisher
to be set on the application, from the publisher's Partner Center account.
"""
qry = ServiceOperationQuery(self, "setVerifiedPublisher", None, {"verifiedPublisherId": verified_publisher_id})
self.context.add_query(qry)
return self
def unset_verified_publisher(self):
"""Unset the verifiedPublisher previously set on an application, removing all verified publisher properties.
For more information, see Publisher verification.
"""
qry = ServiceOperationQuery(self, "unsetVerifiedPublisher")
self.context.add_query(qry)
return self
def add_key(self, key_credential, password_credential, proof):
"""
Add a key credential to an application. This method, along with removeKey can be used by an application
to automate rolling its expiring keys.
:param KeyCredential key_credential: The new application key credential to add.
The type, usage and key are required properties for this usage. Supported key types are:
AsymmetricX509Cert: The usage must be Verify.
X509CertAndPassword: The usage must be Sign
:param PasswordCredential password_credential: Only secretText is required to be set which should contain the password
for the key. This property is required only for keys of type X509CertAndPassword. Set it to null otherwise.
:param str proof: A self-signed JWT token used as a proof of possession of the existing keys
"""
payload = {
"keyCredential": key_credential,
"passwordCredential": password_credential,
"proof": proof,
}
return_type = ClientResult(self.context, KeyCredential())
qry = ServiceOperationQuery(self, "addKey", None, payload, None, return_type)
self.context.add_query(qry)
return return_type
def remove_key(self, keyId, proof):
"""
Remove a key credential from an application.
This method along with addKey can be used by an application to automate rolling its expiring keys.
:param str keyId: The unique identifier for the password.
:param str proof: A self-signed JWT token used as a proof of possession of the existing keys.
This JWT token must be signed using the private key of one of the application's existing
valid certificates. The token should contain the following claims:
aud - Audience needs to be 00000002-0000-0000-c000-000000000000.
iss - Issuer needs to be the id of the application that is making the call.
nbf - Not before time.
exp - Expiration time should be "nbf" + 10 mins.
"""
qry = ServiceOperationQuery(self, "removeKey", None, {"keyId": keyId, "proof": proof})
self.context.add_query(qry)
return self
@property
def key_credentials(self):
"""The collection of key credentials associated with the application. Not nullable.
"""
return self.properties.get('keyCredentials', ClientValueCollection(KeyCredential))
@property
def display_name(self):
"""
The display name for the application.
Supports $filter (eq, ne, NOT, ge, le, in, startsWith), $search, and $orderBy.
:rtype: str or None
"""
return self.properties.get('displayName', None)
@property
def identifier_uris(self):
"""
The URIs that identify the application within its Azure AD tenant, or within a verified custom domain
if the application is multi-tenant. For more information see Application Objects and Service Principal Objects.
The any operator is required for filter expressions on multi-valued properties.
"""
return self.properties.get('identifierUris', ClientValueCollection(str))
@property
def signin_audience(self):
"""
Specifies the Microsoft accounts that are supported for the current application.
Supported values are: AzureADMyOrg, AzureADMultipleOrgs, AzureADandPersonalMicrosoftAccount,
PersonalMicrosoftAccount
:rtype: str or None
"""
return self.properties.get('signInAudience', None)
@property
def owners(self):
"""Directory objects that are owners of the application. Read-only.
:rtype: DirectoryObjectCollection
"""
return self.get_property('owners',
DirectoryObjectCollection(self.context, ResourcePath("owners", self.resource_path)))
@property
def extension_properties(self):
"""List extension properties on an application object.
:rtype: EntityCollection
"""
return self.get_property('extensionProperties',
EntityCollection(self.context, ExtensionProperty,
ResourcePath("extensionProperties", self.resource_path))) | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/insurance.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def insurance(path):
"""Numbers of Car Insurance claims
The data given in data frame `Insurance` consist of the numbers of
policyholders of an insurance company who were exposed to risk, and the
numbers of car insurance claims made by those policyholders in the third
quarter of 1973.
This data frame contains the following columns:
`District`
factor: district of residence of policyholder (1 to 4): 4 is major
cities.
`Group`
an ordered factor: group of car with levels <1 litre, 1–1.5 litre,
1.5–2 litre, >2 litre.
`Age`
an ordered factor: the age of the insured in 4 groups labelled <25,
25–29, 30–35, >35.
`Holders`
numbers of policyholders.
`Claims`
numbers of claims
L. A. Baxter, S. M. Coutts and G. A. F. Ross (1980) Applications of
linear models in motor insurance. *Proceedings of the 21st International
Congress of Actuaries, Zurich* pp. 11–29.
M. Aitkin, D. Anderson, B. Francis and J. Hinde (1989) *Statistical
Modelling in GLIM.* Oxford University Press.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `insurance.csv`.
Returns:
Tuple of np.ndarray `x_train` with 64 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'insurance.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/MASS/Insurance.csv'
maybe_download_and_extract(path, url,
save_file_name='insurance.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/Kyak2-Void-0.2.0.tar.gz/Kyak2-Void-0.2.0/README.md | Needed apps to make this app work. <br>
1- Tkinter <br>
2- Python3 <br>
3- Xscreensaver <br>
4- Xterm is needed for installer/uninstaller <br>
<br>
<br>
*This version is for only Void(Runit) Linux supported <br> <br>
* elogind needs to be installed, it may be already installed. <br>
*İf u install with PİP, app is installed at ~/.local/lib/python3.9/ <br>
*Hidden files can be revealed by CTRL+H, files start with . are hidden.
* # pip show Kyak2-Void to show all info.
*İnstaller is included it also uninstalls. | PypiClean |
/Mopidy-MusicBox-Webclient-3.1.0.tar.gz/Mopidy-MusicBox-Webclient-3.1.0/mopidy_musicbox_webclient/static/js/functionsvars.js | var mopidy
var syncedProgressTimer
// values for controls
var play = false
var random
var repeat
var consume
var single
var mute
var volumeChanging
var volumeSliding = false
var positionChanging
var initgui = true
var popupData = {} // TODO: Refactor into one shared cache
var songlength = 0
var artistsHtml = ''
var artistsText = ''
var albumHtml = ''
var albumText = ''
var songname = ''
var songdata = {'track': {}, 'tlid': -1}
var pageScrollPos = {}
var STREAMS_PLAYLIST_NAME = '[Radio Streams]'
var STREAMS_PLAYLIST_SCHEME = 'm3u'
var uriSchemes = {}
// array of cached playlists (not only user-playlists, also search, artist, album-playlists)
var playlists = {} // TODO: Refactor into one shared cache
var currentplaylist
var customTracklists = [] // TODO: Refactor into one shared cache
var browseStack = []
var ua = navigator.userAgent || navigator.vendor || window.opera
var isMobileSafari = /Mac/.test(ua) && /Mobile/.test(ua)
var isMobile = isMobileAll()
// constants
PROGRAM_NAME = $(document.body).data('program-name')
HOSTNAME = $(document.body).data('hostname')
ARTIST_TABLE = '#artiststable'
ALBUM_TABLE = '#albumstable'
BROWSE_TABLE = '#browsetable'
PLAYLIST_TABLE = '#playlisttracks'
CURRENT_PLAYLIST_TABLE = '#currenttable'
SEARCH_ALL_TABLE = '#allresulttable'
SEARCH_ALBUM_TABLE = '#albumresulttable'
SEARCH_ARTIST_TABLE = '#artistresulttable'
SEARCH_TRACK_TABLE = '#trackresulttable'
URI_SCHEME = 'mbw'
PLAY_NOW = 0
PLAY_NEXT = 1
ADD_THIS_BOTTOM = 2
ADD_ALL_BOTTOM = 3
PLAY_ALL = 4
DYNAMIC = 5
INSERT_AT_INDEX = 6
// the first part of Mopidy extensions which serve radio streams
var radioExtensionsList = ['somafm', 'tunein', 'dirble', 'audioaddict']
var uriClassList = [
['spotify', 'fa-spotify'],
['spotifytunigo', 'fa-spotify'],
['spotifyweb', 'fa-spotify'],
['local', 'fa-file-sound-o'],
['file', 'fa-file-sound-o'],
['m3u', 'fa-file-sound-o'],
['podcast', 'fa-rss-square'],
['podcast+file', 'fa-rss-square'],
['podcast+itunes', 'fa-apple'],
['dirble', 'fa-microphone'],
['tunein', 'fa-headphones'],
['soundcloud', 'fa-soundcloud'],
['sc', 'fa-soundcloud'],
['gmusic', 'fa-google'],
['internetarchive', 'fa-university'],
['somafm', 'fa-flask'],
['youtube', 'fa-youtube'],
['yt', 'fa-youtube'],
['audioaddict', 'fa-bullhorn'],
['subsonic', 'fa-folder-open']
]
// TODO: It should be possible to retrieve a user-friendly name for a given Mopidy scheme dynamically by
// calling mopidy.library.browse() on the root dir:
// 1. each backend contained in the result will have a 'name' attribute that can be shown as-is in the UI.
// 2. the URI prefix of the backend result should === mopidy.getUriSchemes(), which can be used for the mapping.
// 3. only backends that cannot be 'browsed' (e.g. youtube) should have a static mapping defined here.
var uriHumanList = [
['spotify', 'Spotify'],
['spotifytunigo', 'Spotify browse'],
['spotifyweb', 'Spotify browse'],
['local', 'Local media'],
['m3u', 'Local playlists'],
['podcast', 'Podcasts'],
['podcast+itunes', 'iTunes Store: Podcasts'],
['dirble', 'Dirble'],
['tunein', 'TuneIn'],
['soundcloud', 'SoundCloud'],
['gmusic', 'Google Music'],
['internetarchive', 'Internet Archive'],
['somafm', 'Soma FM'],
['youtube', 'YouTube'],
['audioaddict', 'AudioAddict'],
['subsonic', 'Subsonic']
]
// List of Mopidy URI schemes that should not be searched directly.
// Also blacklists 'yt' in favour of using the other 'youtube' supported scheme.
var searchBlacklist = [
'file',
'http',
'https',
'mms',
'rtmp',
'rtmps',
'rtsp',
'yt'
]
// List of known audio file extensions
// TODO: consider querying GStreamer for supported audio formats - see:https://discuss.mopidy.com/t/supported-codecs-file-formats/473
var audioExt = [
'aa', 'aax', // Audible.com
'aac', // Advanced Audio Coding format
'aiff', // Apple
'au', // Sun Microsystems
'flac', // Free Lossless Audio Codec
'gsm',
'iklax',
'ivs',
'm4a',
'm4b',
'm4p',
'mp3',
'mpc', // Musepack
'ogg', 'oga', 'mogg', // Ogg-Vorbis
'opus', // Internet Engineering Task Force (IETF)
'ra', 'rm', // RealAudio
'raw',
'tta', // True Audio
'vox',
'wav',
'wma', // Microsoft
'wv',
'webm' // HTML5 video
]
function scrollToTop () {
$('body,html').animate({
scrollTop: 0
}, 250)
}
function scrollToTracklist () {
var divtop = $('#playlisttracksdiv').offset().top - 120
$('body,html').animate({
scrollTop: divtop
}, 250)
}
function isMobileAll () {
// Checks for known mobile and tablet devices - see http://stackoverflow.com/questions/11381673/detecting-a-mobile-browser
var regexpMobile = /(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino|android|ipad|playbook|silk/i
var regexpTablet = /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i
var uaString = ua.substr(0, 4)
return isMobileSafari || regexpMobile.test(uaString) || regexpTablet.test(uaString)
}
// A hack to find the name of the first artist of a playlist. this is not yet returned by mopidy
// does not work wel with multiple artists of course
function getArtist (pl) {
for (var i = 0; i < pl.length; i++) {
for (var j = 0; j < pl[i].artists.length; j++) {
if (pl[i].artists[j].name !== '') {
return pl[i].artists[j].name
}
}
}
}
// A hack to find the first album of a playlist. this is not yet returned by mopidy
function getAlbum (pl) {
for (var i = 0; i < pl.length; i++) {
if (pl[i].album.name !== '') {
return pl[i].album.name
}
}
}
function artistsToString (artists, max) {
var result = ''
max = max || 3
if (artists && artists.length > 0) {
for (var i = 0; i < artists.length && i < max; i++) {
if (artists[i].name) {
if (i > 0) {
result += ', '
}
result += artists[i].name
}
}
}
return result
}
/** ******************************************************
* break up results and put them in album tables
*********************************************************/
function albumTracksToTable (pl, target, uri) {
var track, previousTrack, nextTrack
var html = ''
$(target).empty()
$(target).attr('data', uri)
for (var i = 0; i < pl.length; i++) {
previousTrack = track || undefined
nextTrack = i < pl.length - 1 ? pl[i + 1] : undefined
track = pl[i]
popupData[track.uri] = track
html += renderSongLi(previousTrack, track, nextTrack, uri, '', target, i, pl.length)
}
$(target).append(html)
updatePlayIcons(songdata.track.uri, songdata.tlid, controls.getIconForAction())
}
function renderSongLi (previousTrack, track, nextTrack, uri, tlid, target, currentIndex, listLength) {
var name
var tlidParameter = ''
var onClick = ''
var html = ''
track.name = validateTrackName(track, currentIndex)
// Streams
if (track.length === -1) {
html += '<li class="albumli"><a href="#"><h1><i class="' + getMediaClass(track) + '"></i> ' + track.name + ' [Stream]</h1></a></li>'
return html
}
if (target === CURRENT_PLAYLIST_TABLE && typeof tlid === 'number' && tlid >= 0) { // Current queue: Show popup menu icon. onClick plays track.
tlidParameter = '\',\'' + tlid
onClick = 'return controls.playQueueTrack(' + tlid + ');'
} else { // All other tracklist: Show default action icon. onClick performs default action
onClick = 'return controls.playTracks(\'\', mopidy, \'' + track.uri + '\', \'' + uri + '\');'
}
html += '<li class="song albumli" id="' + getjQueryID(target, track.uri) + '" tlid="' + tlid + '">'
if (isPlayable(track)) {
// Show popup icon for audio files or 'tracks' of other scheme types
html += '<a href="#" class="moreBtn" onclick="return popupTracks(event, \'' + uri + '\',\'' + track.uri + tlidParameter + '\');">' +
'<i class="fa fa-play-circle-o"></i></a>'
}
html += '<a href="#" onclick="' + onClick + '"><h1><i class="' + getMediaClass(track) + '"></i> ' + track.name + '</h1>'
if (listLength === 1 || (!hasSameAlbum(previousTrack, track) && !hasSameAlbum(track, nextTrack))) {
html += renderSongLiAlbumInfo(track)
}
html += '</a></li>'
return html
}
/* Tracklist renderer for track artist and album name. */
function renderSongLiAlbumInfo (track, target) {
var html = renderSongLiTrackArtists(track)
if (track.album && track.album.name) {
html += ' - <em>' + track.album.name + '</em></p>'
}
if (typeof target !== 'undefined' && target.length > 0) {
target = getjQueryID(target, track.uri, true)
$(target).children('a').eq(1).append(html)
}
return html
}
/* Tracklist renderer for track artist information. */
function renderSongLiTrackArtists (track) {
var html = ''
if (track.artists) {
for (var i = 0; i < track.artists.length; i++) {
html += track.artists[i].name
html += (i === track.artists.length - 1) ? '' : ' / '
// Stop after 3
if (i > 2) {
html += '...'
break
}
}
}
return html
}
/* Tracklist renderer to insert dividers between albums. */
function renderSongLiDivider (previousTrack, track, nextTrack, target) {
var html = ''
var imageID
// Render differently if part of an album.
if (!hasSameAlbum(previousTrack, track) && hasSameAlbum(track, nextTrack)) {
// Large divider with album cover.
showAlbum = ''
if (typeof track.album.uri !== 'undefined') {
showAlbum = 'onclick="return library.showAlbum(\'' + track.album.uri + '\', mopidy);"'
}
html +=
'<li class="albumdivider"><a href="#" ' + showAlbum + '>' +
'<img id="' + getjQueryID(target + '-cover', track.uri) + '" class="artistcover" width="30" height="30"/>' +
'<h1>' + track.album.name + '</h1><p>' +
renderSongLiTrackArtists(track) + '</p></a></li>'
// The element ID to populate with an album cover.
imageID = getjQueryID(target + '-cover', track.uri, true)
} else if (previousTrack && !hasSameAlbum(previousTrack, track)) {
// Small divider
html += '<li class="smalldivider"> </li>'
}
if (html.length > 0 && typeof target !== 'undefined' && target.length > 0) {
target = getjQueryID(target, track.uri, true)
$(target).before(html)
}
return [html, imageID]
}
function renderSongLiBackButton (results, target, onClick, optional) {
if (onClick && onClick.length > 0) {
if (!results || results.length === 0) {
$(target).empty()
$(target).append(
'<li class="song albumli"><a href="#" onclick="' + onClick + '"><h1><i></i>No tracks found...</h1></a></li>'
)
}
var opt = ''
if (optional) {
opt = ' backnav-optional'
}
$(target).prepend(
'<li class="backnav' + opt + '"><a href="#" onclick="' + onClick + '"><h1><i class="fa fa-arrow-circle-left"></i> Back</h1></a></li>'
)
}
}
function hasSameAlbum (track1, track2) {
// 'true' if album for each track exists and has the same name
var name1 = track1 ? (track1.album ? track1.album.name : undefined) : undefined
var name2 = track2 ? (track2.album ? track2.album.name : undefined) : undefined
return name1 && name2 && (name1 === name2)
}
function validateTrackName (track, trackNumber) {
// Create name if there is none
var name = ''
if (!track.name || track.name === '') {
name = track.uri.split('/')
name = decodeURI(name[name.length - 1]) || 'Track ' + String(trackNumber)
} else {
name = track.name
}
return name
}
function resultsToTables (results, target, uri, onClickBack, backIsOptional) {
$(target).empty()
renderSongLiBackButton(results, target, onClickBack, backIsOptional)
if (!results || results.length === 0) {
return
}
$(target).attr('data', uri)
var track, previousTrack, nextTrack, tlid
var html = ''
var requiredImages = {}
// Break into albums and put in tables
for (i = 0; i < results.length; i++) {
previousTrack = track || undefined
nextTrack = i < results.length - 1 ? results[i + 1] : undefined
track = results[i]
if (track) {
if ('tlid' in track) {
// Get track information from TlTrack instance
tlid = track.tlid
track = track.track
nextTrack = nextTrack ? nextTrack.track : undefined
}
popupData[track.uri] = track
var divider = renderSongLiDivider(previousTrack, track, nextTrack, target)
html += divider[0] + renderSongLi(previousTrack, track, nextTrack, uri, tlid, target, i, results.length)
requiredImages[track.uri] = divider[1]
}
}
$(target).append(html)
updatePlayIcons(songdata.track.uri, songdata.tlid, controls.getIconForAction())
images.setImages(requiredImages, mopidy, 'small')
}
function getPlaylistTracks (uri) {
if (playlists[uri] && playlists[uri].tracks) {
return Mopidy.when(playlists[uri].tracks)
} else {
showLoading(true)
return mopidy.playlists.lookup({'uri': uri}).then(function (playlist) {
return processPlaylistItems({'uri': uri, 'playlist': playlist})
}, console.error)
}
}
function getUris (tracks) {
var results = []
for (var i = 0; i < tracks.length; i++) {
results.push(tracks[i].uri)
}
return results
}
function getTracksFromUri (uri, full_track_data) {
var returnTracksOrUris = function (tracks) {
return full_track_data ? tracks : getUris(tracks)
}
if (customTracklists[uri]) {
return returnTracksOrUris(customTracklists[uri])
} else if (playlists[uri] && playlists[uri].tracks) {
return returnTracksOrUris(playlists[uri].tracks)
}
return []
}
// convert time to human readable format
function timeFromSeconds (length) {
var d = Number(length)
var h = Math.floor(d / 3600)
var m = Math.floor(d % 3600 / 60)
var s = Math.floor(d % 3600 % 60)
return ((h > 0 ? h + ':' : '') + (m > 0 ? (h > 0 && m < 10 ? '0' : '') + m + ':' : '0:') + (s < 10 ? '0' : '') + s)
}
/** ***** Toast ***/
function toast (message, delay, textOnly) {
textOnl = textOnly || false
message = message || 'Loading...'
delay = delay || 1000
$.mobile.loading('show', {
text: message,
textVisible: true,
theme: 'a',
textonly: textOnl
})
if (delay > 0) {
setTimeout(function () {
$.mobile.loading('hide')
}, delay)
}
}
/** ****************
* Modal dialogs *
******************/
function showLoading (on) {
if (on) {
$('body').css('cursor', 'progress')
$.mobile.loading('show', {
text: 'Loading data from ' + PROGRAM_NAME + ' on ' + HOSTNAME + '. Please wait...',
textVisible: true,
theme: 'a'
})
} else {
$('body').css('cursor', 'default')
$.mobile.loading('hide')
}
}
function showOffline (on) {
if (on) {
$.mobile.loading('show', {
text: 'Trying to reach ' + PROGRAM_NAME + ' on ' + HOSTNAME + '. Please wait...',
textVisible: true,
theme: 'a'
})
} else {
$.mobile.loading('hide')
}
}
// from http://dzone.com/snippets/validate-url-regexp
function validUri (uri) {
var regexp = /^(http|https|mms|rtmp|rtmps|rtsp):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?/
return regexp.test(uri)
}
function validServiceUri (str) {
return validUri(str) || isServiceUri(str)
}
function getScheme (uri) {
return uri.split(':')[0].toLowerCase()
}
function isPlayable (track) {
if (typeof track.type === 'undefined' || track.type === 'track') {
if (track.uri && getScheme(track.uri) === 'file') {
var ext = track.uri.split('.').pop().toLowerCase()
if ($.inArray(ext, audioExt) === -1) {
// Files must have the correct extension
return false
}
}
return true
}
return false
}
function isStreamUri (uri) {
return validUri(uri) || radioExtensionsList.indexOf(getScheme(uri)) >= 0
}
function getMediaClass (track) {
var defaultIcon = 'fa-file-sound-o'
var type = track.type
if (typeof type === 'undefined' || type === 'track') {
if (!isPlayable(track)) {
return 'fa fa-file-o' // Unplayable file
} else if (isStreamUri(track.uri)) {
return 'fa fa-rss' // Stream
}
} else if (type === 'directory') {
return 'fa fa-folder-o'
} else if (type === 'album') {
// return 'fa fa-bullseye' // Album
defaultIcon = 'fa-folder-o'
} else if (type === 'artist') {
// return 'fa fa-user-circle-o' // Artist
defaultIcon = 'fa-folder-o'
} else if (type === 'playlist') {
// return 'fa fa-star' // Playlist
}
if (track.uri) {
var scheme = getScheme(track.uri)
for (var i = 0; i < uriClassList.length; i++) {
if (scheme === uriClassList[i][0]) {
return 'fa ' + uriClassList[i][1]
}
}
return 'fa ' + defaultIcon
}
return ''
}
function getMediaHuman (uri) {
var scheme = getScheme(uri)
for (var i = 0; i < uriHumanList.length; i++) {
if (scheme.toLowerCase() === uriHumanList[i][0].toLowerCase()) {
return uriHumanList[i][1]
}
}
return ''
}
function isServiceUri (uri) {
var scheme = getScheme(uri)
var i = 0
for (i = 0; i < uriClassList.length; i++) {
if (scheme === uriClassList[i][0]) {
return true
}
}
for (i = 0; i < radioExtensionsList.length; i++) {
if (scheme === radioExtensionsList[i]) {
return true
}
}
return false
}
function isFavouritesPlaylist (playlist) {
return (playlist.name === STREAMS_PLAYLIST_NAME &&
getScheme(playlist.uri) === STREAMS_PLAYLIST_SCHEME)
}
function isSpotifyStarredPlaylist (playlist) {
var starredRegex = /spotify:user:.*:starred/g
return (starredRegex.test(playlist.uri) && playlist.name === 'Starred')
}
// Returns a string where {x} in template is replaced by tokens[x].
function stringFromTemplate (template, tokens) {
return template.replace(/{[^}]+}/g, function (match) {
return tokens[match.slice(1, -1)]
})
}
/**
* Converts a URI to a jQuery-safe identifier. jQuery identifiers need to be
* unique per page and cannot contain special characters.
*
* @param {string} identifier - Identifier string to prefix to the URI. Can
* be used to ensure that the generated ID will be unique for the page that
* it will be included on. Also accepts jQuery identifiers starting with '#'.
*
* @param {string} uri - URI to encode, usually the URI of a Mopidy track.
*
* @param {boolean} includePrefix - Will prefix the generated identifier
* with the '#' character if set to 'true', ready to be passed to $() or
* jQuery().
*
* @return {string} - a string in the format '[#]identifier-encodedURI' that
* is safe to use as a jQuery identifier.
*/
function getjQueryID (identifier, uri, includePrefix) {
if (identifier.charAt(0) === '#' && !includePrefix) {
identifier = identifier.substr(1)
} else if (identifier.charAt(0) !== '#' && includePrefix) {
identifier = '#' + identifier
}
return identifier + '-' + fixedEncodeURIComponent(uri).replace(/([;&,\.\+\*\~':"\!\^#$%@\[\]\(\)=>\|])/g, '') // eslint-disable-line no-useless-escape
}
// Strict URI encoding as per https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent
function fixedEncodeURIComponent (str) {
return encodeURIComponent(str).replace(/[!'()*]/g, function (c) {
return '%' + c.charCodeAt(0).toString(16)
})
} | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.