id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/triggers/combining.py | from __future__ import annotations
from abc import abstractmethod
from datetime import datetime, timedelta
from typing import Any
import attrs
from .._exceptions import MaxIterationsReached
from .._validators import as_timedelta, require_state_version
from ..abc import Trigger
from ..marshalling import marshal_object, unmarshal_object
@attrs.define
class BaseCombiningTrigger(Trigger):
triggers: list[Trigger]
_next_fire_times: list[datetime | None] = attrs.field(
init=False, eq=False, factory=list
)
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"triggers": [marshal_object(trigger) for trigger in self.triggers],
"next_fire_times": self._next_fire_times,
}
@abstractmethod
def __setstate__(self, state: dict[str, Any]) -> None:
self.triggers = [
unmarshal_object(*trigger_state) for trigger_state in state["triggers"]
]
self._next_fire_times = state["next_fire_times"]
@attrs.define
class AndTrigger(BaseCombiningTrigger):
"""
Fires on times produced by the enclosed triggers whenever the fire times are within
the given threshold.
If the produced fire times are not within the given threshold of each other, the
trigger(s) that produced the earliest fire time will be asked for their next fire
time and the iteration is restarted. If instead all the triggers agree on a fire
time, all the triggers are asked for their next fire times and the earliest of the
previously produced fire times will be returned.
This trigger will be finished when any of the enclosed trigger has finished.
:param triggers: triggers to combine
:param threshold: maximum time difference between the next fire times of the
triggers in order for the earliest of them to be returned from :meth:`next` (in
seconds, or as timedelta)
:param max_iterations: maximum number of iterations of fire time calculations before
giving up
"""
threshold: timedelta = attrs.field(converter=as_timedelta, default=1)
max_iterations: int | None = 10000
def next(self) -> datetime | None:
if not self._next_fire_times:
# Fill out the fire times on the first run
self._next_fire_times = [t.next() for t in self.triggers]
for _ in range(self.max_iterations):
# Find the earliest and latest fire times
earliest_fire_time: datetime | None = None
latest_fire_time: datetime | None = None
for fire_time in self._next_fire_times:
# If any of the fire times is None, this trigger is finished
if fire_time is None:
return None
if earliest_fire_time is None or earliest_fire_time > fire_time:
earliest_fire_time = fire_time
if latest_fire_time is None or latest_fire_time < fire_time:
latest_fire_time = fire_time
# Replace all the fire times that were within the threshold
for i, _trigger in enumerate(self.triggers):
if self._next_fire_times[i] - earliest_fire_time <= self.threshold:
self._next_fire_times[i] = self.triggers[i].next()
# If all the fire times were within the threshold, return the earliest one
if latest_fire_time - earliest_fire_time <= self.threshold:
self._next_fire_times = [t.next() for t in self.triggers]
return earliest_fire_time
else:
raise MaxIterationsReached
def __getstate__(self) -> dict[str, Any]:
state = super().__getstate__()
state["threshold"] = self.threshold.total_seconds()
state["max_iterations"] = self.max_iterations
return state
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
super().__setstate__(state)
self.threshold = timedelta(seconds=state["threshold"])
self.max_iterations = state["max_iterations"]
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.triggers}, "
f"threshold={self.threshold.total_seconds()}, "
f"max_iterations={self.max_iterations})"
)
@attrs.define
class OrTrigger(BaseCombiningTrigger):
"""
Fires on every fire time of every trigger in chronological order.
If two or more triggers produce the same fire time, it will only be used once.
This trigger will be finished when none of the enclosed triggers can produce any new
fire times.
:param triggers: triggers to combine
"""
def next(self) -> datetime | None:
# Fill out the fire times on the first run
if not self._next_fire_times:
self._next_fire_times = [t.next() for t in self.triggers]
# Find out the earliest of the fire times
earliest_time: datetime | None = min(
(fire_time for fire_time in self._next_fire_times if fire_time is not None),
default=None,
)
if earliest_time is not None:
# Generate new fire times for the trigger(s) that generated the earliest
# fire time
for i, fire_time in enumerate(self._next_fire_times):
if fire_time == earliest_time:
self._next_fire_times[i] = self.triggers[i].next()
return earliest_time
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
super().__setstate__(state)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.triggers})" | PypiClean |
/Interplanetary_Invaders-0.7-py3-none-any.whl/interplanetary_invaders/scripts/pause_menu.py | import pygame
import sys
from interplanetary_invaders.scripts import joystick
pygame.init()
platform = "System"
if sys.platform.startswith("win"):
platform = "Windows"
if sys.platform == "linux":
platform = "Linux"
if sys.platform == "darwin":
platform = "Mac"
def pause_menu(display, images, data, index, exit_lock = False):
from interplanetary_invaders.scripts.menu import Menu
from interplanetary_invaders.scripts.saves import save_data
from interplanetary_invaders.scripts.retro_text import retro_text
joystick.Reset()
background = display.copy()
done = False
sel = 0
items = ["Resume", "Options", "Exit to Main Menu", f"Exit to {platform}"]
old_items = items[:]
stuff_rect = pygame.Rect(0, 0, 300, 400)
stuff_rect.center = display.get_rect().center
toMainMenu = False
confirm = False
while not done:
for event in pygame.event.get():
joystick.Update(event)
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN or joystick.WasEvent():
if not hasattr(event, "key"):
event.key = None
if event.key == pygame.K_ESCAPE or joystick.BackEvent():
done = True
if event.key in (pygame.K_w, pygame.K_UP) or joystick.JustWentUp():
sel -= 1
if event.key in (pygame.K_s, pygame.K_DOWN) or joystick.JustWentDown():
sel += 1
if sel < 0:
sel = 0
if sel >= len(items):
sel = len(items) - 1
if event.key == pygame.K_RETURN or joystick.JustPressedA():
i = items[sel]
if confirm:
if i == "Save":
save_data(index, data)
if "Save" in i:
if not toMainMenu:
pygame.quit()
sys.exit()
done = True
return toMainMenu
if i == "Cancel":
confirm = False
toMainMenu = False
items = old_items
if i == "Resume":
done = True
if i == "Options":
m = Menu(display, images, True)
m.main()
if i == f"Exit to {platform}" and not exit_lock:
items = ["Cancel", "Save", "Don't Save"]
sel = 0
confirm = True
if i == "Exit to Main Menu" and not exit_lock:
toMainMenu = True
items = ["Cancel", "Save", "Don't Save"]
sel = 0
confirm = True
display.blit(background, (0, 0))
pygame.draw.rect(display, (0, 0, 0), stuff_rect)
pygame.draw.rect(display, (255, 255, 0), stuff_rect, 1)
for e, i in enumerate(items):
color = (255, 255, 255)
if e == sel:
color = (255, 255, 175)
display.blit(images["bullet"], (stuff_rect.left + 5, stuff_rect.top + 50 + e * 30))
retro_text((stuff_rect.left + 10, stuff_rect.top + 50 + e * 30), display, 15, " " + i, color = color)
pygame.display.update()
return toMainMenu | PypiClean |
/LFPykernels-0.2.0.tar.gz/LFPykernels-0.2.0/examples/README.md | # Examples
In order to use these codes, please use the Docker container file ``../Dockerfile``) which can be build according to the README (https://github.com/LFPy/LFPykernels#docker).
This cross-platform solution provides an environment including all dependencies for the main example notebook ``LIF_net_forward_model_predictions.ipynb``
## Usage
Assuming the Docker container has been built according to this project's README, the example notebook(s) may be executed by issuing in the terminal:
cd <path-to-LFPykernels>
docker run --mount type=bind,source="$(pwd)",target=/opt/data -it -p 5000:5000 lfpykernels
root@b6ff6a542d8a:/# cd /opt/data/examples
root@b6ff6a542d8a:/# jupyter-notebook --ip 0.0.0.0 --port=5000 --no-browser --allow-root
Then connect to the server with URL similar to http://127.0.0.1:5000/?token=6c26f9a5a9c18f52c31a572ba3bda255f278a40a91297a55 using a browser on the host computer. There provided example notebook(s) may be run and edited in an interactive manner.
## File list
- `BallAndSticks_E.hoc`
Excitatory cell morphology file
- `BallAndSticks_I.hoc`
Inhibitory cell morphology file
- `BallAndSticksTemplate.hoc`
Cell template specification file needed by `LFPy.NetworkCell` loading the different morphologies
- `FIR_filter.nestml`
NESTML (https://github.com/nest/nestml) file specifying a Finite Impulse Response (FIR) filter node in NEST
- `example_network_methods.py`
Various methods required by network simulations
- `example_network_parameters.py`
Parameters for recurrent multicompartment network model set up using LFPy. Here used to derive parameters for kernel predictions
- `plotting.py`
Some shared plotting methods.
- `mod/*.mod`
NMODL language files describing ion-channel and synapse dynamics
- `LIF_net_forward_model_predictions.ipynb`
Jupyter notebook implementing a spiking point-neuron network model with forward-model based predictions using the `LFPykernels` python package
- `README_example.ipynb`
Reference implementation of example code in main README file.
| PypiClean |
/HPI-0.3.20230327.tar.gz/HPI-0.3.20230327/my/core/sqlite.py | from .common import assert_subpackage; assert_subpackage(__name__)
from contextlib import contextmanager
from pathlib import Path
import shutil
import sqlite3
from tempfile import TemporaryDirectory
from typing import Tuple, Any, Iterator, Callable, Optional, Union
from .common import PathIsh, assert_never
from .compat import Literal
def sqlite_connect_immutable(db: PathIsh) -> sqlite3.Connection:
return sqlite3.connect(f'file:{db}?immutable=1', uri=True)
def test_sqlite_connect_immutable(tmp_path: Path) -> None:
db = str(tmp_path / 'db.sqlite')
with sqlite3.connect(db) as conn:
conn.execute('CREATE TABLE testtable (col)')
import pytest # type: ignore
with pytest.raises(sqlite3.OperationalError, match='readonly database'):
with sqlite_connect_immutable(db) as conn:
conn.execute('DROP TABLE testtable')
# succeeds without immutable
with sqlite3.connect(db) as conn:
conn.execute('DROP TABLE testtable')
SqliteRowFactory = Callable[[sqlite3.Cursor, sqlite3.Row], Any]
def dict_factory(cursor, row):
fields = [column[0] for column in cursor.description]
return {key: value for key, value in zip(fields, row)}
Factory = Union[SqliteRowFactory, Literal['row', 'dict']]
@contextmanager
def sqlite_connection(db: PathIsh, *, immutable: bool=False, row_factory: Optional[Factory]=None) -> Iterator[sqlite3.Connection]:
dbp = f'file:{db}'
# https://www.sqlite.org/draft/uri.html#uriimmutable
if immutable:
# assert results in nicer error than sqlite3.OperationalError
assert Path(db).exists(), db
dbp = f'{dbp}?immutable=1'
row_factory_: Any = None
if row_factory is not None:
if callable(row_factory):
row_factory_ = row_factory
elif row_factory == 'row':
row_factory_ = sqlite3.Row
elif row_factory == 'dict':
row_factory_ = dict_factory
else:
assert_never()
conn = sqlite3.connect(dbp, uri=True)
try:
conn.row_factory = row_factory_
with conn:
yield conn
finally:
# Connection context manager isn't actually closing the connection, only keeps transaction
conn.close()
# TODO come up with a better name?
# NOTE: this is tested by tests/sqlite.py::test_sqlite_read_with_wal
def sqlite_copy_and_open(db: PathIsh) -> sqlite3.Connection:
"""
'Snapshots' database and opens by making a deep copy of it including journal/WAL files
"""
dp = Path(db)
# TODO make atomic/check mtimes or something
dest = sqlite3.connect(':memory:')
with TemporaryDirectory() as td:
tdir = Path(td)
# shm should be recreated from scratch -- safer not to copy perhaps
tocopy = [dp] + [p for p in dp.parent.glob(dp.name + '-*') if not p.name.endswith('-shm')]
for p in tocopy:
shutil.copy(p, tdir / p.name)
with sqlite3.connect(str(tdir / dp.name)) as conn:
from .compat import sqlite_backup
sqlite_backup(source=conn, dest=dest)
conn.close()
return dest
# NOTE hmm, so this kinda works
# V = TypeVar('V', bound=Tuple[Any, ...])
# def select(cols: V, rest: str, *, db: sqlite3.Connection) -> Iterator[V]:
# but sadly when we pass columns (Tuple[str, ...]), it seems to bind this type to V?
# and then the return type ends up as Iterator[Tuple[str, ...]], which isn't desirable :(
# a bit annoying to have this copy-pasting, but hopefully not a big issue
from typing import overload
@overload
def select(cols: Tuple[str ], rest: str, *, db: sqlite3.Connection) -> \
Iterator[Tuple[Any ]]: ...
@overload
def select(cols: Tuple[str, str ], rest: str, *, db: sqlite3.Connection) -> \
Iterator[Tuple[Any, Any ]]: ...
@overload
def select(cols: Tuple[str, str, str ], rest: str, *, db: sqlite3.Connection) -> \
Iterator[Tuple[Any, Any, Any ]]: ...
@overload
def select(cols: Tuple[str, str, str, str ], rest: str, *, db: sqlite3.Connection) -> \
Iterator[Tuple[Any, Any, Any, Any ]]: ...
@overload
def select(cols: Tuple[str, str, str, str, str ], rest: str, *, db: sqlite3.Connection) -> \
Iterator[Tuple[Any, Any, Any, Any, Any ]]: ...
@overload
def select(cols: Tuple[str, str, str, str, str, str ], rest: str, *, db: sqlite3.Connection) -> \
Iterator[Tuple[Any, Any, Any, Any, Any, Any ]]: ...
@overload
def select(cols: Tuple[str, str, str, str, str, str, str ], rest: str, *, db: sqlite3.Connection) -> \
Iterator[Tuple[Any, Any, Any, Any, Any, Any, Any ]]: ...
@overload
def select(cols: Tuple[str, str, str, str, str, str, str, str], rest: str, *, db: sqlite3.Connection) -> \
Iterator[Tuple[Any, Any, Any, Any, Any, Any, Any, Any]]: ...
def select(cols, rest, *, db):
# db arg is last cause that results in nicer code formatting..
return db.execute('SELECT ' + ','.join(cols) + ' ' + rest) | PypiClean |
/mrv-1.0.2-stable.zip/mrv-1.0.2-stable/mrv/maya/ui/browse/interface.py | """module with interfaces to define contracts"""
__docformat__ = "restructuredtext"
from mrv.interface import Interface
__all__ = ('iFinderProvider', 'iOptions', 'iFinderFilter')
class iFinderProvider(Interface):
"""Interface defining the capabilities of a provider to be usable by a Finder
control. Every finder as a root, which is used as basis for listing urls.
Besides its function to provide sub-items for given urls, it is also used
to store recently selected items on a given level of a url. This memory
allows the finder to restore common portions of URLs accordingly.
The base implementation of the memorization feature already. """
__slots__ = '_mem_items'
#{ Configuration
# if True, items of urls will be memorized, if False, this information
# will be discarded
memorize_urlItems = True
#} END configuration
def __init__(self, root):
self._root = root
self._mem_items = dict()
#{ Interface
def urlItems(self, url):
"""
:return: list of string-like items which can be found at the given url.
If this url is combined with one of the returned items separated by a slash,
a valid url is formed, i.e. url/item
:param url: A given slash-separated url like base/subitem or '', which
requests items at the root of all urls"""
raise NotImplementedError("To be implemented by subclass")
def formatItem(self, url_base, url_index, url_item):
"""Given the url_item, as well as additional information such as its base
and its index inside of the url, this method encodes the item for presentation
in the user interface.
:param url_base: relative url at which the url_item resides. Is "" if url_index
is 0
:param url_index: index representing the position of the url_item within the
url
:param url_item: item which is to be formatted.
:return: string representing the formatted url."""
return url_item
def storeUrlItem(self, url_index, url_item):
"""Stores and associates a given url_index with a url_item. Makes the stored
item queryable by the ``storedUrlItemByIndex`` method
:param url_index: index from 0 to n, where 0 corresponds to the first item
in the url
:param url_item: the string item to store at the given index"""
if not self.memorize_urlItems:
return
# END ignore store call
self._mem_items[url_index] = url_item
def storedUrlItemByIndex(self, url_index):
""":return: string item previously stored at the given index, or None
if there is no information available"""
return self._mem_items.get(url_index, None)
def root(self):
""":return: string representing the file root"""
return self._root
#} END interface
class iFinderFilter(Interface):
"""Filter interface suitable to perform item filter operations for Finder controls"""
#{ Interface
def filtered(self, finder, element_index, base_url, items):
""":return: list of items which may be shown in the element at element_index
:param finder: finder instance issueing the call
:param element_index: index of the element which is to be filled with items
:param base_url: url at which the given items exist
:param items: list of relative item ids which are to be shown in the finder element"""
return items
#} END interface
class iOptions(Interface):
"""Interface for all custom options layouts to be used with the FinderLayout.
They take a weak-reference to their parent FinderLayout allowing them to
set themselves up if necessary.
The options they represent must be read by a custom implementation of the
FinderLayout"""
#{ Interface
#} END interface | PypiClean |
/JyPlotter-0.9.4.tar.gz/JyPlotter-0.9.4/PyPlotter/gtkGfx.py | import math
import gtk, pango
from gtk import gdk
try:
import Gfx
except ImportError:
from . import Gfx
try:
from Compatibility import *
except ImportError:
from . import Compatiblity
globals().update(Compatibility.__dict__)
driverName = "gtkGfx"
########################################################################
#
# class Driver
#
########################################################################
stipple_Solid = gdk.bitmap_create_from_data(None,
"\xff\xff\xff\xff\xff\xff\xff\xff", 8, 8)
stipple_PatternA = gdk.bitmap_create_from_data(None,
"\xcc\x99\x33\x66\xcc\x99\x33\x66", 8, 8)
stipple_PatternB = gdk.bitmap_create_from_data(None,
"\xcc\x66\x33\x99\xcc\x66\x33\x99", 8, 8)
stipple_PatternC = gdk.bitmap_create_from_data(None,
"\xc3\x66\x3c\x99\xc3\x66\x3c\x99", 8, 8)
white = gdk.color_parse("white")
black = gdk.color_parse("black")
class PangoContextWrapper(pango.Context):
def __init__(self):
pass
class Driver(Gfx.Driver):
"""A simple graphics layer on top of gdk.
See Gfx.py
"""
def __init__(self, gtk_widget, pango_layout):
"""Initialize canvas on a gdk drawable."""
Gfx.Driver.__init__(self)
self.pango_layout = pango_layout
self.pango_context = self.pango_layout.get_context()
self.pango_font = self.pango_context.get_font_description()
self.gtk_widget = gtk_widget
self.changeDrawable(gtk_widget.window)
def changeDrawable(self, drawable, pango_layout=None):
"""Change the drawable"""
## self.pango_font_desc = pango.FontDescription()
## self.pango_context = PangoContextWrapper()
## self.pango_context_set_font_description(self.pango_font_desc)
if pango_layout != None: self.pango_layout = pango_layout
self.drawable = drawable
if self.drawable:
self.gc = gdk.GC(self.drawable)
self.resizedGfx()
else: self.gc = None
self.gc_thickness = 1
self.gc_line_style = gdk.LINE_SOLID
self.gc_cap_style = gdk.CAP_ROUND
self.gc_join_style = gdk.JOIN_MITER
if self.gc:
self.w, self.h = self.drawable.get_size()
self.reset()
else: self.w, self.h = 0, 0
def resizedGfx(self):
self.w, self.h = self.drawable.get_size()
def getSize(self):
return self.w, self.h
def getResolution(self):
return 100
def __gdkColor(self, rgbTuple):
return gdk.Color(int(round(rgbTuple[0]*65535)),
int(round(rgbTuple[1]*65535)),
int(round(rgbTuple[2]*65535)))
def setColor(self, rgbTuple):
self.gc.set_rgb_fg_color(self.__gdkColor(rgbTuple))
# self.gc.set_rgb_bg_color(self.__gdkColor(rgbTuple))
self.color = rgbTuple
def setLineWidth(self, width):
self.lineWidth = width
if width == Gfx.THIN: self.gc_thickness = 1
elif width == Gfx.MEDIUM: self.gc_thickness = 2
elif width == Gfx.THICK: self.gc_thickness = 3
else: raise ValueError("'thickness' must be 'thin', 'medium' or 'thick' !")
self.gc.set_line_attributes(self.gc_thickness,
self.gc_line_style,
self.gc_cap_style,
self.gc_join_style)
def setLinePattern(self, pattern):
self.linePattern = pattern
if pattern == Gfx.CONTINUOUS:
self.gc_line_style = gdk.LINE_SOLID
elif pattern == Gfx.DASHED:
self.gc_line_style = gdk.LINE_ON_OFF_DASH
self.gc.set_dashes(0, (5, 5))
elif pattern == Gfx.DOTTED:
self.gc_line_style = gdk.LINE_ON_OFF_DASH
self.gc.set_dashes(0, (1, 4))
else: raise ValueError("'pattern' must be 'continuous', " + \
"'dashed' or 'dotted' !")
self.gc.set_line_attributes(self.gc_thickness,
self.gc_line_style,
self.gc_cap_style,
self.gc_join_style)
def setFillPattern(self, pattern):
self.fillPattern = pattern
if pattern == Gfx.SOLID:
fp = gdk.SOLID
pat = stipple_Solid
elif pattern == Gfx.PATTERN_A:
fp = gdk.STIPPLED
pat = stipple_PatternA
elif pattern == Gfx.PATTERN_B:
fp = gdk.STIPPLED
pat = stipple_PatternB
elif pattern == Gfx.PATTERN_C:
fp = gdk.STIPPLED
pat = stipple_PatternC
else: raise ValueError("'pattern' must be 'solid' or 'patternA', " + \
"'patternB', 'patternC' !")
self.gc.set_fill(fp)
self.gc.set_stipple(pat)
def setFont(self, ftype, size, weight):
self.fontType = ftype
self.fontSize = size
self.fontWeight = weight
if ftype == Gfx.SANS: ff = "sans"
elif ftype == Gfx.SERIF: ff = "serif"
elif ftype == Gfx.FIXED: ff = "monospace"
else: raise ValueError("'type' must be 'sans', 'serif' or 'fixed' !")
if size == Gfx.SMALL: fs = 5
elif size == Gfx.NORMAL: fs = 10
elif size == Gfx.LARGE: fs = 20
else: raise ValueError("'size' must be 'small', 'normal' or 'large' !")
fst = pango.STYLE_NORMAL
fw = pango.WEIGHT_NORMAL
if "i" in weight: fst = pango.STYLE_ITALIC
elif "b" in weight: fw = pango.WEIGHT_BOLD
self.pango_font.set_family(ff)
self.pango_font.set_size(fs*pango.SCALE)
self.pango_font.set_style(fst)
self.pango_font.set_weight(fw)
self.pango_layout.set_font_description(self.pango_font)
def getTextSize(self, text):
self.pango_layout.set_text(text)
return self.pango_layout.get_pixel_size()
## def selectFontSize(self, text, w,h):
## for fs in range(3,0,-1):
## self.setFont(self, self.fontType, fs, self.fontWeight)
## sw,sh = self.getTextSize(text)
## if sw <= w and sh <= h: break
## else:
## return 0
## return 1
def drawPoint(self, x, y):
self.drawable.draw_point(self.gc, x, self.h-y-1)
def __checkInLine(self):
if self.linePattern != Gfx.CONTINUOUS and \
self.fillPattern != Gfx.SOLID:
self.gc.set_fill(gdk.SOLID)
def __checkOutLine(self):
if self.linePattern != Gfx.CONTINUOUS and \
self.fillPattern != Gfx.SOLID:
self.gc.set_fill(gdk.STIPPLED)
def drawLine(self, x1, y1, x2, y2):
self.__checkInLine()
self.drawable.draw_line(self.gc, x1, self.h-y1-1, x2, self.h-y2-1)
self.__checkOutLine()
def drawRect(self, x, y, w, h):
self.__checkInLine()
self.drawable.draw_rectangle(self.gc,False,x,self.h-y-h,w-1,h-1)
self.__checkOutLine()
def drawPoly(self, array):
if array:
transformed = [(x, self.h-y-1) for x,y in array]
self.__checkInLine()
self.drawable.draw_lines(self.gc, transformed)
self.__checkOutLine()
def fillRect(self, x, y, w, h):
self.drawable.draw_rectangle(self.gc,True,x,self.h-y-h,w,h)
def fillPoly(self, array):
transformed = [(x, self.h-y-1) for x,y in array]
self.drawable.draw_polygon(self.gc, True, transformed)
def writeStr(self, x, y, str, rotationAngle=0.0):
self.pango_layout.set_text(str)
w, h = self.pango_layout.get_pixel_size()
if rotationAngle == 0.0:
self.drawable.draw_layout(self.gc, x, self.h-y-h,
self.pango_layout)
else:
a = rotationAngle / 180.0 * math.pi
da = math.atan2(h,0)-a
dw = int(h*math.cos(da)+0.5)
dh = int(h*math.sin(da)+0.5)-h
pixmap = gdk.Pixmap(self.drawable, w, h)
gc = gdk.GC(pixmap)
gc.set_rgb_fg_color(black)
gc.set_fill(gdk.SOLID)
pixmap.draw_rectangle(gc, True, 0, 0, w, h)
gc.set_rgb_fg_color(white)
pixmap.draw_layout(gc, 0, 0, self.pango_layout)
image = pixmap.get_image(0, 0, w, h)
for dy in range(h):
for dx in range(w):
if (image.get_pixel(dx, dy) & 0x808080)!= 0:
r = math.sqrt(dx**2+dy**2)
da = math.atan2(dy,dx) - a
xx = int(r * math.cos(da)+0.5)
yy = int(r * math.sin(da)+0.5)
self.drawable.draw_point(self.gc, x+xx-dw,
self.h-y-h+yy-dh)
########################################################################
#
# class Window
#
########################################################################
class Window(Driver, Gfx.Window):
def __init__(self, size=(640,480), title="gtkGraph"):
self.win = gtk.Window()
self.win.set_default_size(*size)
self.win.set_size_request(*size)
self.win.set_resizable(False)
self.win.set_title(title)
self.canvas = gtk.DrawingArea()
Driver.__init__(self, self.canvas,
self.canvas.create_pango_layout(""))
self.win.add(self.canvas)
self.canvas.connect("configure-event", self.onConfigure)
self.canvas.connect("expose-event", self.onExpose)
self.win.show_all()
self.win.connect("destroy", lambda w: gtk.main_quit())
self.clear()
def refresh(self):
"""Refresh the display."""
gc = self.canvas.get_style().fg_gc[gtk.STATE_NORMAL]
w, h = self.pixmap.get_size()
self.canvas.window.draw_drawable(gc, self.pixmap, 0,0,0,0,w,h)
def quit(self):
self.win.destroy()
gtk.main_quit()
def waitUntilClosed(self):
gtk.main()
def onConfigure(self, widget, event):
w, h = widget.window.get_size()
self.pixmap = gdk.Pixmap(widget.window, w, h)
self.changeDrawable(self.pixmap)
self.clear()
self.setColor((0.8,0.8,0.8))
self.fillRect(10, 10, 620, 380)
return True
def onExpose(self, widget, event):
x, y, w, h = event.area
gc = widget.get_style().fg_gc[gtk.STATE_NORMAL]
widget.window.draw_drawable(gc, self.pixmap, x, y, x, y, w, h)
return False
########################################################################
#
# Test
#
########################################################################
if __name__ == "__main__":
import systemTest
systemTest.Test_gtkGfx() | PypiClean |
/datascrubber-0.0.5.tar.gz/datascrubber-0.0.5/README.md | # dstrial Module Documentation
## Overview
This documentation explains the functions available in the ***`Datacleaning`*** module, which is designed to assist in data cleaning and analytics tasks
## Creating the an instance
Through this process, we are calling our class which will help us access the various functions to be used.
```python
from Datascrubber import Datacleaning
data_cleaner = Datacleaning()
```
### OR
```python
from Datascrubber.datacleaning import Datacleaning
data_cleaner = Datacleaning()
```
So for all the remaining part of our code, we shall be using the data_cleaner.
## Table of Contents
- read_data
- columns
- head
- summary
- missing_values
- col_missing_value
- remove_empty_columns
- data_types
- cat_cols
- cont_cols
- distributions
- data_types
- col_dist
- cat_dist
- col_cat_dist
- remove_missingvalues
- drop
- outliers
- outliers_single
- remove_outliers
- corr_matrix
- cont_corr
- cont_to_cont
- cat_to_cat
- countplot
- contingency_table
- Chi_square
- combined_boplot
- singleAnova
- cont_to_cat
- getdata
- data_cleaning
<a name="#read_data"></a>
## read_data
### Function Name: read_data
This function is used to read data from a file. It supports reading data from a CSV file, Excel file, and a JSON file. The function automatically detects the file type and reads the data accordingly.
#### Parameters
- `file_path`: The path to the file containing the data. This must be a string input.
#### Return Value
- Returns a dataframe containing the data from the file.
#### Usage Example
```python
data_clener.read_data("file_path") # replace file_path with the directory of the file.
```
<a name="#columns"></a>
## columns
### Function Name: columns
This function returns the columns of the loaded dataset.
#### Parameters
None
#### Return Value
- Returns a list of column names in the dataset.
#### Usage Example
```python
data_cleaner.columns()
```
##### OR
```python
columns_list = data_cleaner.columns()
print("Columns:", columns_list)
```
<a name="#head"></a>
## head
### Function Name: head
This function is used to display the first few rows of the data. It is useful to get a quick overview of the data.
#### Parameters
- `number`: The number of rows to display.
#### Return Value
- Returns a dataframe containing the first few rows of the data.
#### Usage Example
```python
data_cleaner.head(number=5) # replace number with the number of rows to display.
```
- It is also valid not to include `number`in parameter and instead just subsititute with an integer or float.
<a name="#summary"></a>
## summary
### Function Name: summary
This function is used to generate summary statistics of the data. It provides valuable information about the distribution, central tendency, and spread of the data. It calculates statistics for each numeric column in the data.
The statistics provided by the summary function include:
- Count: The number of non-null values in the column.
- Mean: The arithmetic mean (average) of the values.
- Standard Deviation: A measure of the spread or dispersion of the values.
- Minimum: The minimum value in the column.
- 25th Percentile (Q1): The value below which 25% of the data falls.
- 50th Percentile (Median or Q2): The middle value of the data.
- 75th Percentile (Q3): The value below which 75% of the data falls.
- Maximum: The maximum value in the column.
Then to a `categorical column`, The summary function generates statistics such as:
- Count: The number of non-null values in the column.
- Unique: The number of unique categories or levels in the column.
- Top: The most frequent category in the column.
- Freq: The frequency of the top category.
#### Parameters
None
#### Return Value
- Returns a dataframe containing the summary statistics of the data.
#### Usage Example
```python
data_cleaner.summary()
```
<a name="#missing_values"></a>
## missing_values
### Function Name: missing_values
This function is used to check for missing values in the data.
#### Parameters
None
#### Return Value
- Returns a dataframe containing the number of missing values in each column.
#### Usage Example
```python
data_cleaner.missing_values()
```
<a name="#col_missing_value"></a>
## col_missing_value
### Function Name: col_missing_value
This function is used to check for missing values in a specific column.
#### Parameters
- `col_name`: The name of the column to check for missing values. The column name must be entered as as a string.
#### Return Value
- Returns the number of missing values in the specified column.
#### Usage Example
```python
data_clener.col_missing_value("col_name") #replace col_name with the column name.
```
<a name="#remove_empty_columns"></a>
## remove_empty_columns
### Function Name: remove_empty_columns
This function is used to remove columns that have no values. It is useful to remove columns that have no values as they do not provide any useful information.
#### Parameters
None
#### Return Value
None
#### Usage Example
```python
data_cleaner.remove_empty_columns()
```
`Note:` This function is also automatically called by the `remove_missingvalues` function hence for predictive situations, one can put the column back after cleaning.
<a name="#data_types"></a>
## data_types
### Function Name: data_types
This function is used to check the data types of the columns and the creates subsets of the data based on the data types. It creates a subset of the data containing only the categorical columns and another subset containing only the numeric columns.
#### Parameters
None
#### Return Value
None
`Note:` This function maynot necessarily be used as it is called in the background by other functions.
<a name="#cat_cols"></a>
## cat_cols
### Function Name: cat_cols
This function is used to get the categorical columns in the data.
#### Parameters
None
#### Return Value
- Returns a dataframe of the categorical columns in the data.
#### Usage Example
```python
data_cleaner.cat_cols()
```
<a name="#cont_cols"></a>
## cont_cols
### Function Name: cont_cols
This function is used to get the numeric columns in the data.
#### Parameters
None
#### Return Value
- Returns a dataframe of the numeric columns in the data.
#### Usage Example
```python
data_cleaner.cont_cols()
```
<a name="#distributions"></a>
## distributions
### Function Name: distributions
This function is used to plot the distribution of the numeric columns in the data. It plots a histogram for each numeric column in the data.
It is useful to get an idea of the distribution of the data. It can be used to identify outliers and skewness in the data.
#### Parameters
None
#### Return Value
None
#### Usage Example
```python
data_cleaner.distributions()
```
<a name="#col_dist"></a>
## col_dist
### Function Name: col_dist
This function is used to plot the distribution of a specific numeric column in the data.
#### Parameters
- `col`: The name of the column to plot the distribution for.
#### Return Value
None
#### Usage Example
```python
data_cleaner.col_dist("col") #replace col with the column name.
```
<a name="#cat_dist"></a>
## cat_dist
### Function Name: cat_dist
This function is used to plot the distribution of a all categorical columns in the data.
#### Parameters
None
#### Return Value
None
#### Return Value
None
#### Usage Example
```python
data_cleaner.cat_dist()
```
<a name="#col_cat_dist"></a>
## col_cat_dist
### Function Name: col_cat_dist
This function is used to plot the distribution of a specific categorical column in the data.
#### Parameters
- `col`: The name of the column to plot the distribution for.
#### Return Value
None
#### Return Value
None
#### Usage Example
```python
data_cleaner.col_cat_dist("col") #replace col with the column name.
```
<a name="#remove_missingvalues"></a>
## remove_missingvalues
### Function Name: remove_missingvalues
This function is used to remove deal with rows that have missing values (NA).
The funcion first removes all the duplicates that are within the data and also automatically removes all the empty columns.
The missing values are then replaced with the mode of the column (`the most occuring value`) for categorical columns.
For numeric columns, the missing values are replaced with either the mean or median of the column depending on the skewness of the data.
#### Parameters
None
#### Return Value
None
#### Usage Example
```python
data_cleaner.remove_missingvalues()
```
## drop
### Function Name: drop
This function is used to drop columns from the data.
#### Parameters
- `column`: This is a two way parameter. It can either be a string or a list of strings. If it is a string, it is the name of the column to drop. If it is a list of strings, it is a list of columns to drop.
#### Return Value
None
#### Usage Example
```python
data_cleaner.drop("column") #replace column with the column name.
```
```
##### OR
```python
data_cleaner.drop(["column_1","column_2"]) #replace column_1 and column_2 with the column names.
```
<a name="#outliers"></a>
## outliers
### Function Name: outliers
This function is used to plot the outliers in the data. It plots a boxplot for each numeric column in the data.
It is useful to get an idea of the outliers in the data. It can be used to identify outliers in the data.
#### Parameters
None
#### Return Value
None
#### Usage Example
```python
data_cleaner.outliers()
```
<a name="#outliers_single"></a>
## outliers_single
### Function Name: outliers_single
This function is used to plot the outliers in a specific numeric column in the data.
#### Parameters
- `column`: The name of the numeric column to plot the outliers for.
#### Return Value
None
#### Usage Example
```python
data_cleaner.outliers_single("column") #replace column with the column name.
```
<a name="#remove_outliers"></a>
## remove_outliers
### Function Name: remove_outliers
This function is used to remove outliers from the data. It removes outliers from all the numeric columns in the data.
The concept of outliers is based on the interquartile range (IQR). The IQR is the difference between the 75th percentile (Q3) and the 25th percentile (Q1). The IQR is used to identify outliers by defining limits on the sample values that are a factor k of the IQR below the 25th percentile or above the 75th percentile. The common value for the factor k is the value 1.5. This is the default value used by the function.
#### Parameters
None
#### Return Value
None
#### Usage Example
```python
data_cleaner.remove_outliers()
```
`Note :` Depending on the data one is dealing with, the outliers may not be removed completely. Hence one can use alternative methods to remove outliers for example using the `imputation with nearest logical values`, `Transformation`, `Segmentation` and others.
<a name="#corr_matrix"></a>
## corr_matrix
### Function Name: corr_matrix
This function is used to plot the correlation matrix of the data. It plots a heatmap of the correlation matrix of the data.
It is useful to get an idea of the correlation between the numeric columns in the data. It can be used to identify highly correlated columns in the data.
#### Parameters
None
#### Return Value
None
#### Usage Example
```python
data_clener.corr_matrix()
```
<a name="#cont_corr"></a>
## cont_corr
### Function Name: cont_corr
This function is used to plot a pairplot of the numeric columns in the data.
#### Parameters
None
#### Return Value
None
#### Usage Example
```python
data_clener.cont_corr()
```
<a name="#cont_to_cont"></a>
## cont_to_cont
### Function Name: cont_to_cont
The function is used to show significant relationship or difference between two numeric columns in the data.
This is achieved through plotting a scatter plot of two numeric columns in the data.
The function also goes on to indicate the correlation value between the two columns.
#### Parameters
- `col1`: This is a two way parameter. It can either be a string or a list of strings. If it is a string, it is the name of the first column to plot. If it is a list of strings, it is a list of columns to plot.
- `col2`: This is a two way parameter. It can either be a string or a list of strings. If it is a string, it is the name of the second column to plot. If it is a list of strings, it is a list of columns to plot.
#### Return Value
None
#### Usage Example
```python
data_cleaner.cont_to_cont("col1","col2") #replace col1 and col2 with the column names.
```
##### OR
```python
data_cleaner.cont_to_cont("col1",["col2","col3"]) #replace col1, col2 and col3 with the column names.
```
##### OR
```python
data_cleaner.cont_to_cont(["col1","col2"],"col3") #replace col1, col2 and col3 with the column names.
```
##### OR
```python
data_cleaner.cont_to_cont(["col1","col2"],["col3","col4"]) #replace col1, col2, col3 and col4 with the column names.
```
## cat_to_cat
### Function Name: cat_to_cat
The function is used to show significant relationship or difference between two categorical columns in the data.
The function hence displays a contingency table of the two categorical columns in the data. and also plots a comparative bar graph of the two columns.
#### Parameters
- `col1`: This is a two way parameter. It can either be a string or a list of strings. If it is a string, it is the name of the first column to plot. If it is a list of strings, it is a list of columns to plot.
- `col2`: This is a two way parameter. It can either be a string or a list of strings. If it is a string, it is the name of the second column to plot. If it is a list of strings, it is a list of columns to plot.
#### Return Value
None
#### Usage Example
```python
data_cleaner.cat_to_cat("col1","col2") #replace col1 and col2 with the column names.
```
```
##### OR
```python
data_cleaner.cat_to_cat("col1",["col2","col3"]) #replace col1, col2 and col3 with the column names.
```
```
##### OR
```python
data_cleaner.cat_to_cat(["col1","col2"],"col3") #replace col1, col2 and col3 with the column names.
```
```
##### OR
```python
data_cleaner.cat_to_cat(["col1","col2"],["col3","col4"]) #replace col1, col2, col3 and col4 with the column names.
```
## countplot
### Function Name: countplot
The function is used to plot a countplot of a two categorical columns in the data.
This is a way of showing the distribution of the two categorical columns in the data.
#### Parameters
- `col1`: This is a string. It is the name of the first column to plot.
- `col2`: This is a string. It is the name of the second column to plot.
#### Return Value
None
#### Usage Example
```python
data_cleaner.countplot("col1","col2") #replace col1 and col2 with the column names.
```
## contingency_table
### Function Name: contingency_table
The function is used to show significant relationship or difference between two categorical columns in the data.
The function hence displays a contingency table of the two categorical columns in the data.
#### Parameters
- `col1`: This is a string. It is the name of the first column to plot.
- `col2`: This is a string. It is the name of the second column to plot.
#### Return Value
None
#### Usage Example
```python
data_cleaner.contingency_table("col1","col2") #replace col1 and col2 with the column names.
```
## Chi_square
### Function Name: Chi_square
The function tests for a statistically significant relationship between nominal and ordinal variables. In other words, it tells us whether two variables are independent of one another.
#### Parameters
- `col1`: This is a string. It is the name of the first column categorical column.
- `col2`: This is a string. It is the name of the second column categorical column.
#### Return Value
- Chi_square value
- The p-value
- The degrees of freedom
- A string indicating whether the two columns are independent or not.
#### Usage Example
```python
data_cleaner.Chi_square("col1","col2") #replace col1 and col2 with the column names.
```
## combined_boplot
### Function Name: combined_boplot
The function is used to plot a set of side by side box plots, one for each of the categories.
#### Parameters
- `col1`: This is a string. It is the name of the first column, categorical column.
- `col2`: This is a string. It is the name of the second column, continuous column.
#### Return Value
None
#### Usage Example
```python
data_cleaner.combined_boxplot("col1", "col2") #replace col1 and col2 with the column names.
```
## singleAnova
### Function Name: singleAnova
The function is used to test for a statistically significant difference between the means of two or more groups.
#### Parameters
- `col1`: This is a string. It is the name of the first column continuous column.
- `col2`: This is a string. It is the name of the second column categorical column.
#### Return Value
- A string indicating whether the two columns are independent or not.
#### Usage Example
```python
data_cleaner.singleAnova("col1", "col2") #replace col1 and col2 with the column names.
```
## cont_to_cat
### Function Name: cont_to_cat
The function is used to show significant relationship or difference between a continuous and a categorical column in the data.
The function hence displays a side by side boxplot of the continuous column and a categorical column in the data.
#### Parameters
- `col1`: This is two way parameter. It can either be a string or a list of strings. If it is a string, it is the name of the first column continuous column. If it is a list of strings, it is a list of columns to plot.On the other hand, it can be a a string or a list of strings of categorical columns.
- `col2`: This is two way parameter. It can either be a string or a list of strings. If it is a string, it is the name of the second column categorical column. If it is a list of strings, it is a list of columns to plot.On the other hand, it can be a a string or a list of strings of continuous columns.
#### Return Value
- A string indicating whether the two columns are independent or not.
#### Usage Example
```python
data_cleaner.cont_to_cat("col1","col2") #replace col1 and col2 with the column names.
```
##### OR
```python
data_cleaner.cont_to_cat("col1",["col2","col3"]) #replace col1, col2 and col3 with the column names.
```
##### OR
```python
data_cleaner.cont_to_cat(["col1","col2"],"col3") #replace col1, col2 and col3 with the column names.
```
##### OR
```python
data_cleaner.cont_to_cat(["col1","col2"],["col3","col4"]) #replace col1, col2, col3 and col4 with the column names.
```
## getdata
### Function Name: getdata
The function returns the data that has been cleaned and preprocessed.
#### Parameters
None
#### Return Value
- Returns a dataframe containing the cleaned data.
#### Usage Example
```python
data = data_cleaner.getdata()
data.head()
```
`Note :` This method can be used to access the data at any step after achieving any required process.
## data_cleaning
### Function Name: data_cleaning
The function is used to clean the data. It performs the following operations:
- Removes empty columns.
- Removes duplicate rows.
- Deals with missing values appropriately.
- Removes outliers.
#### Parameters
None
#### Return Value
- A dataframe containing the cleaned data.
#### Usage Example
```python
data = data_cleaner.data_cleaning()
data.head()
```
| PypiClean |
/Hikka_Pyro-2.0.66-py3-none-any.whl/pyrogram/types/messages_and_media/video.py |
from datetime import datetime
from typing import List
import pyrogram
from pyrogram import raw, utils
from pyrogram import types
from pyrogram.file_id import FileId, FileType, FileUniqueId, FileUniqueType
from ..object import Object
class Video(Object):
"""A video file.
Parameters:
file_id (``str``):
Identifier for this file, which can be used to download or reuse the file.
file_unique_id (``str``):
Unique identifier for this file, which is supposed to be the same over time and for different accounts.
Can't be used to download or reuse the file.
width (``int``):
Video width as defined by sender.
height (``int``):
Video height as defined by sender.
duration (``int``):
Duration of the video in seconds as defined by sender.
file_name (``str``, *optional*):
Video file name.
mime_type (``str``, *optional*):
Mime type of a file as defined by sender.
file_size (``int``, *optional*):
File size.
supports_streaming (``bool``, *optional*):
True, if the video was uploaded with streaming support.
ttl_seconds (``int``. *optional*):
Time-to-live seconds, for secret photos.
date (:py:obj:`~datetime.datetime`, *optional*):
Date the video was sent.
thumbs (List of :obj:`~pyrogram.types.Thumbnail`, *optional*):
Video thumbnails.
"""
def __init__(
self,
*,
client: "pyrogram.Client" = None,
file_id: str,
file_unique_id: str,
width: int,
height: int,
duration: int,
file_name: str = None,
mime_type: str = None,
file_size: int = None,
supports_streaming: bool = None,
ttl_seconds: int = None,
date: datetime = None,
thumbs: List["types.Thumbnail"] = None
):
super().__init__(client)
self.file_id = file_id
self.file_unique_id = file_unique_id
self.width = width
self.height = height
self.duration = duration
self.file_name = file_name
self.mime_type = mime_type
self.file_size = file_size
self.supports_streaming = supports_streaming
self.ttl_seconds = ttl_seconds
self.date = date
self.thumbs = thumbs
@staticmethod
def _parse(
client,
video: "raw.types.Document",
video_attributes: "raw.types.DocumentAttributeVideo",
file_name: str,
ttl_seconds: int = None
) -> "Video":
return Video(
file_id=FileId(
file_type=FileType.VIDEO,
dc_id=video.dc_id,
media_id=video.id,
access_hash=video.access_hash,
file_reference=video.file_reference
).encode(),
file_unique_id=FileUniqueId(
file_unique_type=FileUniqueType.DOCUMENT,
media_id=video.id
).encode(),
width=video_attributes.w,
height=video_attributes.h,
duration=video_attributes.duration,
file_name=file_name,
mime_type=video.mime_type,
supports_streaming=video_attributes.supports_streaming,
file_size=video.size,
date=utils.timestamp_to_datetime(video.date),
ttl_seconds=ttl_seconds,
thumbs=types.Thumbnail._parse(client, video),
client=client
) | PypiClean |
/Geode_GEM-0.12.0-py3-none-any.whl/geode_gem/widgets/button.py |
# Geode
from geode_gem.widgets.common import GeodeGtkCommon
from geode_gem.widgets.menu import GeodeGtkMenu
# GObject
from gi.repository import Gtk, Pango
# ------------------------------------------------------------------------------
# Class
# ------------------------------------------------------------------------------
class CommonButton(GeodeGtkCommon):
def __init__(self, subclass, label, *args, **kwargs):
""" Constructor
Parameters
----------
subclass : Gtk.Button
Subclass widget type
label : str
String use as button label
"""
GeodeGtkCommon.__init__(self, subclass, **kwargs)
# Inner widgets
self.image = None
# Button image icon name
self.icon_name = kwargs.get("icon_name", None)
# ------------------------------------
# Properties
# ------------------------------------
if self.icon_name is None:
self.set_label(label)
else:
self.set_tooltip_text(label)
self.image = Gtk.Image.new_from_icon_name(
self.icon_name, kwargs.get("icon_size", Gtk.IconSize.BUTTON))
setattr(self.image, "identifier", f"{self.identifier}_image")
# ------------------------------------
# Packing
# ------------------------------------
if self.image is not None:
self.append_widget(self.image)
self.add(self.image)
class GeodeGtkButton(CommonButton, Gtk.Button):
def __init__(self, *args, **kwargs):
""" See geode_gem.ui.widgets.button.CommonButton
"""
CommonButton.__init__(self, Gtk.Button, *args, **kwargs)
class GeodeGtkFileChooserButton(GeodeGtkCommon, Gtk.FileChooserButton):
def __init__(self, *args, **kwargs):
""" See geode_gem.ui.widgets.button.CommonButton
"""
GeodeGtkCommon.__init__(self, Gtk.FileChooserButton, **kwargs)
for element in args:
if isinstance(element, Gtk.FileChooserAction):
self.set_action(element)
class GeodeGtkFontButton(GeodeGtkCommon, Gtk.FontButton):
def __init__(self, *args, **kwargs):
""" See geode_gem.ui.widgets.button.CommonButton
"""
GeodeGtkCommon.__init__(self, Gtk.FontButton, **kwargs)
# HACK: Set an ellipsize mode for the label inside FontButton
if kwargs.get("use_ellipsize", False):
for child in self.get_child():
if type(child) == Gtk.Label:
child.set_ellipsize(Pango.EllipsizeMode.END)
class GeodeGtkLinkButton(CommonButton, Gtk.LinkButton):
def __init__(self, *args, **kwargs):
""" See geode_gem.ui.widgets.button.CommonButton
"""
CommonButton.__init__(self, Gtk.LinkButton, *args, **kwargs)
class GeodeGtkMenuButton(CommonButton, Gtk.MenuButton):
__setters__ = {
"set_use_popover": False,
}
def __init__(self, label, *args, **kwargs):
""" See geode_gem.ui.widgets.button.CommonButton
"""
CommonButton.__init__(self, Gtk.MenuButton, label, *args, **kwargs)
# Inner widgets
self.submenu = None
# Properties
if args:
self.submenu = GeodeGtkMenu(*args)
self.append_widget(self.submenu)
# Packing
if self.submenu is not None:
self.set_popup(self.submenu)
self.submenu.show_all()
class GeodeGtkSpinButton(GeodeGtkCommon, Gtk.SpinButton):
def __init__(self, *args, **kwargs):
""" See geode_gem.ui.widgets.button.CommonButton
"""
GeodeGtkCommon.__init__(self, Gtk.SpinButton, **kwargs)
class GeodeGtkToggleButton(CommonButton, Gtk.ToggleButton):
def __init__(self, *args, **kwargs):
""" See geode_gem.ui.widgets.button.CommonButton
"""
CommonButton.__init__(self, Gtk.ToggleButton, *args, **kwargs) | PypiClean |
/CAMSA-1.3.tar.gz/CAMSA-1.3/camsa/libs/js/dataTables.material.min.js | (function(c){"function"===typeof define&&define.amd?define(["jquery","datatables.net"],function(a){return c(a,window,document)}):"object"===typeof exports?module.exports=function(a,d){a||(a=window);if(!d||!d.fn.dataTable)d=require("datatables.net")(a,d).$;return c(d,a,a.document)}:c(jQuery,window,document)})(function(c,a,d){var g=c.fn.dataTable;c.extend(!0,g.defaults,{dom:"<'mdl-grid'<'mdl-cell mdl-cell--6-col'l><'mdl-cell mdl-cell--6-col'f>><'mdl-grid dt-table'<'mdl-cell mdl-cell--12-col'tr>><'mdl-grid'<'mdl-cell mdl-cell--4-col'i><'mdl-cell mdl-cell--8-col'p>>",
renderer:"material"});c.extend(g.ext.classes,{sWrapper:"dataTables_wrapper form-inline dt-material",sFilterInput:"form-control input-sm",sLengthSelect:"form-control input-sm",sProcessing:"dataTables_processing panel panel-default"});g.ext.renderer.pageButton.material=function(a,h,r,s,i,n){var o=new g.Api(a),l=a.oLanguage.oPaginate,t=a.oLanguage.oAria.paginate||{},f,e,p=0,q=function(d,g){var m,h,j,b,k=function(a){a.preventDefault();!c(a.currentTarget).hasClass("disabled")&&o.page()!=a.data.action&&
o.page(a.data.action).draw("page")};m=0;for(h=g.length;m<h;m++)if(b=g[m],c.isArray(b))q(d,b);else{f="";j=!1;switch(b){case "ellipsis":f="…";e="disabled";break;case "first":f=l.sFirst;e=b+(0<i?"":" disabled");break;case "previous":f=l.sPrevious;e=b+(0<i?"":" disabled");break;case "next":f=l.sNext;e=b+(i<n-1?"":" disabled");break;case "last":f=l.sLast;e=b+(i<n-1?"":" disabled");break;default:f=b+1,e="",j=i===b}j&&(e+=" mdl-button--raised mdl-button--colored");f&&(j=c("<button>",{"class":"mdl-button "+
e,id:0===r&&"string"===typeof b?a.sTableId+"_"+b:null,"aria-controls":a.sTableId,"aria-label":t[b],"data-dt-idx":p,tabindex:a.iTabIndex,disabled:-1!==e.indexOf("disabled")}).html(f).appendTo(d),a.oApi._fnBindAction(j,{action:b},k),p++)}},k;try{k=c(h).find(d.activeElement).data("dt-idx")}catch(u){}q(c(h).empty().html('<div class="pagination"/>').children(),s);k&&c(h).find("[data-dt-idx="+k+"]").focus()};return g}); | PypiClean |
/FragPELE-2.1.1.tar.gz/FragPELE-2.1.1/frag_pele/Templates/constants.py | import sys
import os
import socket
DIR = os.path.dirname(__file__)
##############################################
# PUBLIC CONSTANTS (to change by the user)
# Preparation inputs to grow
machine = socket.getfqdn()
if "bsc.mn" in machine:
# PELE parameters
PATH_TO_PELE = "/gpfs/projects/bsc72/PELE++/mniv/rev12536/bin/Pele_mpi"
PATH_TO_PELE_DATA = "/gpfs/projects/bsc72/PELE++/data/rev12360/Data"
PATH_TO_PELE_DOCUMENTS = "/gpfs/projects/bsc72/PELE++/Documents/rev12360"
PATH_TO_LICENSE = "/gpfs/projects/bsc72/PELE++/license"
# PlopRotTemp parameters
SCHRODINGER_PY_PATH = "/gpfs/projects/bsc72/SCHRODINGER_ACADEMIC/utilities/python"
else:
# PELE parameters
PATH_TO_PELE = "$PELE_BIN"
PATH_TO_PELE_DATA = os.path.join("$PELE", "Data")
PATH_TO_PELE_DOCUMENTS = os.path.join("$PELE", "Documents")
PATH_TO_LICENSE = "$LICENSE"
SCHRODINGER_PY_PATH = os.path.join("$SCHRODINGER", "utilities/python")
CONTROL_TEMPLATE = os.path.join(DIR, "Templates/control_template.conf")
RESULTS_FOLDER = "growing_output"
GROWING_STEPS = 10
SELECTION_CRITERIA = "Binding Energy"
SERIE_FILE = False
REPORT_NAME = "report"
TRAJECTORY_NAME = "trajectory"
CPUS = 48
PELE_EQ_STEPS = 20
RESTART = False
STEPS = 6
TEMPERATURE = 1000
MAX_OVERLAP = 0.70
MIN_OVERLAP = 0.50
TEMPERATURE = 1000
# Clustering parameters
DISTANCE_COUNTER = 4
CONTACT_THRESHOLD = 0.3
EPSILON = 0.5
##############################################
# PRIVATE CONSTANTS (not to change)
PRE_WORKING_DIR = "pregrow"
TEMPLATES_PATH = "DataLocal/Templates/OPLS2005/HeteroAtoms/"
ROTAMERS_PATH = "DataLocal/LigandRotamerLibs/"
PDBS_OUTPUT_FOLDER = "PDBs_growing"
OUTPUT_FOLDER = "growing_results/"
TEMPLATES_FOLDER = "growing_templates"
CONFIG_PATH = "log_configure.ini"
PLOP_PATH = "PlopRotTemp_S_2017/ligand_prep.py"
ROTRES = 30
# Clustering constants
CONDITION = "min" # min or max
METRICS_WEIGHTS = "linear"
NUM_CLUSTERS = 5
# Messages constants
TEMPLATE_MESSAGE = "We are going to transform the template _{}_ into _{}_ in _{}_ steps! Starting..."
LINES_MESSAGE = "\n•*´¨`*•.¸¸.•*´¨`*•.¸¸.•*´¨`*•.¸¸.•*´¨`*•.¸¸.••*´¨`*•.¸¸.•*´¨`*•.¸¸.•*´¨`*•.¸¸.•*´¨`*•.¸¸.•\n"
SELECTED_MESSAGE = "\n============ Files selected ============\nControl file: {}\nPDB file: {}\nResults folder name: {}\nStep: {}\n"
FINISH_SIM_MESSAGE = "SIMULATION {} COMPLETED!!! "
############################################## | PypiClean |
/NearPy-0.2.2.tar.gz/NearPy-0.2.2/nearpy/storage/storage.py |
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class Storage(object):
""" Interface for storage adapters. """
def store_vector(self, hash_name, bucket_key, v, data):
"""
Stores vector and JSON-serializable data in bucket with specified key.
"""
raise NotImplementedError
def get_bucket(self, hash_name, bucket_key):
"""
Returns bucket content as list of tuples (vector, data).
"""
raise NotImplementedError
def clean_buckets(self, hash_name):
"""
Removes all buckets and their content.
"""
raise NotImplementedError
def clean_all_buckets(self):
"""
Removes all buckets and their content.
"""
raise NotImplementedError
def store_hash_configuration(self, lshash):
"""
Stores hash configuration
"""
raise NotImplementedError
def load_hash_configuration(self, hash_name):
"""
Loads and returns hash configuration
"""
raise NotImplementedError | PypiClean |
/ConferenceCorpus-0.1.1.tar.gz/ConferenceCorpus-0.1.1/corpus/datasources/dblpxml.py | from pathlib import Path
from io import BytesIO
import urllib.request
from gzip import GzipFile
from lxml import etree
from collections import Counter
from xml.dom import minidom
from lodstorage.sql import SQLDB
from lodstorage.schema import Schema
from corpus.utils.progress import Progress
import os
import re
import time
class DblpXml(object):
'''
handler for https://dblp.uni-trier.de/xml/ dumps
see https://github.com/IsaacChanghau/DBLPParser/blob/master/src/dblp_parser.py
'''
def __init__(self,xmlname:str="dblp.xml",dtd_validation:bool=False,xmlpath:str=None,gzurl:str="https://dblp.uni-trier.de/xml/dblp.xml.gz",debug=False,verbose=True):
'''
Constructor
Args:
xmlname (str): name of the xml file
dtd_validation (bool): True if dtd validation should be activated when parsing
xmlpath(str): download path
gzurl(str): url of the gzipped original file
debug(bool): if True show debugging information
verbose(bool): if True show logging information
'''
self.debug=debug
self.verbose=verbose
if xmlpath is None:
home = str(Path.home())
xmlpath=f"{home}/.dblp"
self.gzurl=gzurl
self.xmlname=xmlname
self.xmlpath=xmlpath
self.dtd_validation=dtd_validation
self.reinit()
def reinit(self):
'''
reinitialize my file names
'''
self.xmlfile="%s/%s" % (self.xmlpath,self.xmlname)
self.dtdfile="%s/%s" % (self.xmlpath,self.xmlname.replace(".xml",".dtd"))
def getSize(self)->int:
'''
get the size of my xmlFile
Returns:
int: the size
'''
stats=os.stat(self.xmlfile)
size=stats.st_size
return size
def getExpectedTotal(self)->int:
'''
get the expected Total of records
'''
return self.getSize()//380
def warnFullSize(self):
'''
warn if we are using the full dataset
'''
print(f"Warning - using full {self.xmlfile} dataset ~{self.getExpectedTotal()/1000000:3.1f}m records!")
def isDownloaded(self,minsize:int=3000000000)->bool:
'''
check that the dblp file is downloaded
Returns:
bool: True if the dblpfile is fully downloaded and is bigger than the given minimum size
'''
result=os.path.isfile(self.xmlfile)
if result:
result=self.getSize()>=minsize
return result
def prettyXml(self,tree,indent=' '):
'''
get a pretty XML representation of the given etree
'''
xmlstr = minidom.parseString(etree.tostring(tree.getroot())).toprettyxml(indent=indent)
return xmlstr
def createSample(self,keyEntities=None,keyPrefix="conf/",entityLimit=1000,entities=None,progress:int=500000):
'''
create a sample with the given entityLimit
Args:
keyPrefix(str): the keyPrefix to filter for
'''
if entities is None:
entities=['article','book','incollection','www']
if keyEntities is None:
keyEntities=['proceedings','inproceedings']
allEntities=[]
allEntities.extend(entities)
allEntities.extend(keyEntities)
root = etree.Element('dblp')
counter=Counter()
level=0
showProgress=Progress(progress)
for event, element in self.iterParser():
showProgress.next()
if event == 'start':
level += 1
if level==2:
doadd=element.tag in entities
if element.tag in keyEntities:
if 'key' in element.attrib:
key=element.attrib['key']
if key.startswith(keyPrefix):
doadd=True
if (doadd and counter[element.tag]<entityLimit):
node=etree.fromstring(etree.tostring(element))
root.append(node)
counter[element.tag]+=1
else:
keys=counter.keys()
done=True
for entity in allEntities:
if not entity in keys:
done=False
else:
done=done and counter[entity]>=entityLimit
if done:
break
pass
elif event == 'end':
level -=1
self.clear_element(element)
sampleTree=etree.ElementTree(root)
return sampleTree
def getXmlFile(self,reload=False):
'''
get the dblp xml file - will download the file if it doesn't exist
Args:
reload(bool): if True force download
Returns:
str: the xmlfile
'''
if not os.path.isfile(self.xmlfile) or reload:
os.makedirs(self.xmlpath,exist_ok=True)
if self.verbose:
print(f"downloading {self.xmlfile} from {self.gzurl}")
urlreq = urllib.request.urlopen(self.gzurl)
z = GzipFile(fileobj=BytesIO(urlreq.read()), mode='rb')
with open(self.xmlfile, 'wb') as outfile:
outfile.write(z.read())
if not os.path.isfile(self.dtdfile) or reload:
dtdurl=self.gzurl.replace(".xml.gz",".dtd")
urllib.request.urlretrieve (dtdurl, self.dtdfile)
return self.xmlfile
def iterParser(self):
"""
Create a dblp data iterator of (event, element) pairs for processing
Returns:
etree.iterparse result
"""
if not os.path.isfile(self.xmlfile):
raise ("dblp xml file %s not downloaded yet - please call getXmlFile first")
# with dtd validation
if self.debug:
print(f"starting parser for {self.xmlfile}" )
# https://lxml.de/api/lxml.etree.iterparse-class.html
self.parser=etree.iterparse(source=self.xmlfile, events=('end', 'start' ), dtd_validation=self.dtd_validation, load_dtd=True, huge_tree=True)
return self.parser
def clear_element(self,element):
"""
Free up memory for temporary element tree after processing the element
Args:
element(node): the etree element to clear together with it's parent
"""
element.clear()
while element.getprevious() is not None:
del element.getparent()[0]
def checkRow(self,kind:str,index,row:dict):
'''
check the row content
Args:
kind(str): e.g. proceedings/article
index(int): the index of the row
row(dict): the row to process
'''
if kind=='proceedings':
if 'title' in row:
title=row['title']
if not title:
print(f'empty title for {index}{row}')
else:
print(f'missing title for {index}{row}')
def postProcess(self,_kind:str,_index,row:dict):
'''
postProcess the given row
Args:
_kind(str): e.g. proceedings/article
_index(int): the index of the row
row(dict): the row to process
'''
if 'key' in row:
key=row['key']
if key.startswith("conf/"):
conf=re.sub(r"conf/(.*)/.*",r"\1",key)
row['conf']=conf
pass
def getXmlSqlDB(self,reload=False,showProgress=False):
'''
get the SqlDB derived from the XML download
'''
self.getXmlFile(reload=reload)
return self.getSqlDB(postProcess=self.postProcess,showProgress=showProgress)
def getSqlDB(self,limit=1000000000,sample=None,createSample=10000000,debug=False,recreate=False,postProcess=None,check_same_thread=False,showProgress:bool=False):
'''
get the SQL database or create it from the XML content
Args:
limit(int): maximum number of records
'''
dbname=f"{self.xmlpath}/dblp.sqlite"
# estimate size
if showProgress:
expectedTotal=self.getExpectedTotal()
progress=expectedTotal//86
else:
expectedTotal=None
progress=None
if sample is None:
sample=5
if (os.path.isfile(dbname)) and not recreate:
sqlDB=SQLDB(dbname=dbname,debug=debug,errorDebug=True,check_same_thread=check_same_thread)
else:
if (os.path.isfile(dbname)) and recreate:
os.remove(dbname)
sqlDB=SQLDB(dbname=dbname,debug=debug,errorDebug=True,check_same_thread=check_same_thread)
starttime=time.time()
dictOfLod=self.asDictOfLod(limit,progressSteps=progress,expectedTotal=expectedTotal)
elapsed=time.time()-starttime
executeMany=True
if showProgress:
print(f"parsing done after {elapsed:5.1f} s ... storing ...")
starttime=time.time()
fixNone=True
for i, (kind, lod) in enumerate(dictOfLod.items()):
if postProcess is not None:
for j,row in enumerate(lod):
postProcess(kind,j,row)
rows=0
for i, (kind, lod) in enumerate(dictOfLod.items()):
rows+=len(lod)
if debug:
print ("#%4d %5d: %s" % (i+1,len(lod),kind))
entityInfo=sqlDB.createTable(lod,kind,'key',sampleRecordCount=createSample,failIfTooFew=False)
sqlDB.store(lod,entityInfo,executeMany=executeMany,fixNone=fixNone)
for j,row in enumerate(lod):
if debug:
print (" %4d: %s" % (j,row))
if j>sample:
break
elapsed=time.time()-starttime
if showProgress:
print (f"stored {rows} rows in {elapsed:5.1f} s {rows/elapsed:5.0f} rows/s" )
tableList=sqlDB.getTableList()
viewDDL=Schema.getGeneralViewDDL(tableList, "record")
if debug:
print(viewDDL)
sqlDB.execute(viewDDL)
return sqlDB
def asDictOfLod(self,limit:int=1000,delim:str=',',progressSteps:int=None,expectedTotal:int=None):
'''
get the dblp data as a dict of list of dicts - effectively separating the content
into table structures
Args:
limit(int): maximum amount of records to process
delim(str): the delimiter to use for splitting attributes with multiple values (e.g. author)
progressSteps(int): if set the interval at which to print a progress dot
expectedTotal(int): the expected Total number
'''
index=0
progress=Progress(progressSteps,expectedTotal,msg="Parsing dblp xml dump",showMemory=True)
level=0
dictOfLod={}
current={}
levelCount=Counter()
for event, elem in self.iterParser():
if event == 'start':
level += 1
levelCount[level]+=1
if level==2:
kind=elem.tag
if not kind in dictOfLod:
dictOfLod[kind]=[]
lod=dictOfLod[kind]
# copy the attributes (if any)
if hasattr(elem, "attrib"):
current = {**current, **elem.attrib}
elif level==3:
name=elem.tag
newvalue=elem.text
# is there already an entry for the given name
if name in current:
oldvalue=current[name]
newvalue=f"{oldvalue}{delim}{newvalue}"
# set the name/value pair
current[name]=newvalue
if (kind=="proceedings") and (name=="title") and (elem.text is None):
print(f"{elem.sourceline:6}:{elem.tag} - None text")
pass
elif level>=4:
# interesting things happen here ...
# sub/sup i and so on see dblp xml faq
#if elem.sourceline:
# print(f"{elem.sourceline:6}:{elem.tag}")
pass
elif event == 'end':
if level==2:
lod.append(current)
progress.next()
kind=elem.tag
self.checkRow(kind,progress.count,current)
current={}
if progress.count>=limit:
break
level -= 1
self.clear_element(elem)
index+=1
if self.debug:
pass
if progress is not None:
progress.done()
return dictOfLod | PypiClean |
/My-CountriesAPI-1234567-1.0.tar.gz/My-CountriesAPI-1234567-1.0/my_countries_api_1234567/controllers/base_controller.py | from my_countries_api_1234567.api_helper import APIHelper
from my_countries_api_1234567.http.http_context import HttpContext
from my_countries_api_1234567.http.requests_client import RequestsClient
from my_countries_api_1234567.exceptions.api_exception import APIException
class BaseController(object):
"""All controllers inherit from this base class.
Attributes:
http_client (HttpClient): The HttpClient which a specific controller
instance will use. By default all the controller objects share
the same HttpClient. A user can use his own custom HttpClient
as well.
http_call_back (HttpCallBack): An object which holds call back
methods to be called before and after the execution of an HttpRequest.
global_headers (dict): The global headers of the API which are sent with
every request.
"""
http_client = RequestsClient()
http_call_back = None
global_headers = {
'user-agent': 'APIMATIC 2.0'
}
def __init__(self, client=None, call_back=None):
if client != None:
self.http_client = client
if call_back != None:
self.http_call_back = call_back
def validate_parameters(self, **kwargs):
"""Validates required parameters of an endpoint.
Args:
kwargs (dict): A dictionary of the required parameters.
"""
for name, value in kwargs.items():
if value is None:
raise ValueError("Required parameter {} cannot be None.".format(name))
def execute_request(self, request, binary=False):
"""Executes an HttpRequest.
Args:
request (HttpRequest): The HttpRequest to execute.
binary (bool): A flag which should be set to True if
a binary response is expected.
Returns:
HttpContext: The HttpContext of the request. It contains,
both, the request itself and the HttpResponse object.
"""
# Invoke the on before request HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_before_request(request)
# Add global headers to request
request.headers = APIHelper.merge_dicts(self.global_headers, request.headers)
# Invoke the API call to fetch the response.
func = self.http_client.execute_as_binary if binary else self.http_client.execute_as_string
response = func(request)
context = HttpContext(request, response)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back != None:
self.http_call_back.on_after_response(context)
return context
def validate_response(self, context):
"""Validates an HTTP response by checking for global errors.
Args:
context (HttpContext): The HttpContext of the API call.
"""
if (context.response.status_code < 200) or (context.response.status_code > 208): #[200,208] = HTTP OK
raise APIException('HTTP response not OK.', context) | PypiClean |
/KiKit-1.3.0-py3-none-any.whl/kikit/plugin.py | from kikit.actionPlugins import importAllPlugins # type: ignore
from typing import Any, Dict, Iterable
from kikit.panelize import Panel
from kikit.substrate import Substrate
from pcbnewTransition import pcbnew
from shapely.geometry import LineString
Preset = Dict[str, Dict[str, Any]]
class HookPlugin:
"""
This type of plugin has a number of callbacks that are invoked during the
panelization process. The plugin can tweak the process by modifying the
panel. Inherit from this class and override the callbacks listed below.
The same instance of the plugin object is used for invoking all of the
callbacks. So you can safely store information between the calls.
If you want to know the precise order of operation, please refer to the
function kikit.panelize_ui:doPanelization.
"""
def __init__(self, userArg: str, board: pcbnew.BOARD,
preset: Dict[str, Dict[str, Any]]) -> None:
"""
The constructor of the hook plugin will always receive a single string
from the user, the source design and the presets Dictionary.
"""
self.userArg = userArg
self.board = board
self.preset = preset
def prePanelSetup(self, panel: Panel) -> None:
"""
This callback is invoked just after a panel instance was created and no
operations were performed on it.
"""
pass
def afterPanelSetup(self, panel: Panel) -> None:
"""
This callback is invoked after the panel has inherited design setting,
properties and the title block.
"""
pass
def afterLayout(self, panel: Panel, substrates: Iterable[Substrate]) -> None:
"""
This callback is invoked after the boards are placed in panel and before
the partition line is constructed. substrates is an iterable of
individual boards substrates in the panel
"""
pass
def afterTabs(self, panel: Panel, tabCuts: Iterable[LineString],
backboneCuts: Iterable[LineString]) -> None:
"""
This callback is invoked after the tabs have been formed.
"""
pass
def afterFraming(self, panel: Panel, frameCuts: Iterable[LineString]) -> None:
"""
This callback is invoked after the frame was build and before any frame
decorators (cuts, fiducials) were placed.
"""
pass
def afterCuts(self, panel: Panel) -> None:
"""
This callback is invoked after the cuts were rendered.
"""
pass
def finish(self, panel: Panel) -> None:
"""
This callback is invoked after the panel is finished, just before
debugging information is collected and the panel is saved.
"""
pass
class LayoutPlugin:
"""
This type of plugin can create user specified board layouts
"""
def __init__(self, preset: Preset, userArg: str, netPattern: str,
refPattern: str, vspace: int, hspace: int, rotation: int) -> None:
self.preset = preset
self.userArg = userArg
self.netPattern = netPattern
self.refPattern = refPattern
self.vspace = vspace
self.hspace = hspace
self.rotation = rotation
def buildLayout(self, panel: Panel, inputFile: str,
sourceArea: pcbnew.BOX2I) -> Iterable[Substrate]:
"""
This function is supposed to build the layout (append the boards to the
panel) and return an iterable of substrates of these boards.
"""
raise NotImplementedError("Layout plugin has to define buildLayout")
def buildPartitionLine(self, panel: Panel, framingSubstrates: Iterable[Substrate]) -> None:
"""
This function should build the partition line in the panel. It gets an
iterable of extra substrates that represent soon-to-be frame of the
panel.
"""
return panel.buildPartitionLineFromBB(framingSubstrates)
def buildExtraCuts(self, panel: Panel) -> Iterable[LineString]:
"""
This function can return extra cuts, e.g., from internal backbone. It
shouldn't deal with tab cuts.
"""
return []
class FramingPlugin:
"""
This type of plugin can build custom framing
"""
def __init__(self, preset: Preset, userArg: str) -> None:
self.preset = preset
self.userArg = userArg
def buildFraming(self, panel: Panel) -> Iterable[LineString]:
"""
This function should append frame to the panel and return list of cuts.
"""
raise NotImplementedError("FramingPlugin has to define buildFraming")
def buildDummyFramingSubstrates(self, substrates: Iterable[Substrate]) -> Iterable[Substrate]:
"""
This function should build dummy substrates that emulate the
soon-to-be-frame. These substrates are used for partition line
computation.
"""
raise NotImplementedError("FramingPlugin has to define buildDummyFramingSubstrates")
class TabsPlugin:
"""
This plugin can make custom tabs. It provides two functions, however, you
should override only one of them.
"""
def __init__(self, preset: Preset, userArg: str) -> None:
self.preset = preset
self.userArg = userArg
def buildTabAnnotations(self, panel: Panel) -> None:
"""
This function should append tabs annotations to the panel. The rendering
will be handled automatically.
"""
raise NotImplementedError("Tabs plugin has to provide buildTabAnnotations when it doesn't override buildTabs")
def buildTabs(self, panel: Panel) -> Iterable[LineString]:
"""
This function can directly build the tabs. In most cases, you don't have
to override this and instead, override buildTabAnnotations.
"""
panel.clearTabsAnnotations()
self.buildTabAnnotations(panel)
return panel.buildTabsFromAnnotations(self.preset["tabs"]["fillet"])
class CutsPlugin:
"""
This plugin renders tabs (LineStrings) into board features. The cuts are
divided into two types so you can, e.g., inset you tab cuts.
"""
def __init__(self, preset: Preset, userArg: str) -> None:
self.preset = preset
self.userArg = userArg
def renderTabCuts(self, panel: Panel, cuts: Iterable[LineString]) -> None:
"""
Render tab cuts into the panel.
"""
raise NotImplementedError("Cuts plugin has to provide renderTabCuts")
def renderOtherCuts(self, panel: Panel, cuts: Iterable[LineString]) -> None:
"""
Render any other type of cuts (frame, backbone, etc.)
"""
raise NotImplementedError("Cuts plugin has to provide renderOtherCuts")
class ToolingPlugin:
"""
This plugin places tooling holes on the board frame.
"""
def __init__(self, preset: Preset, userArg: str) -> None:
self.preset = preset
self.userArg = userArg
def buildTooling(self, panel: Panel) -> None:
"""
Add tooling holes
"""
raise NotImplementedError("Tooling plugin has to provide buildTooling")
class FiducialsPlugin:
"""
This plugin places fiducials holes on the board frame.
"""
def __init__(self, preset: Preset, userArg: str) -> None:
self.preset = preset
self.userArg = userArg
def buildFiducials(self, panel: Panel) -> None:
"""
Add fiducials
"""
raise NotImplementedError("Fiducials plugin has to provide buildFiducials")
class TextVariablePlugin:
"""
This plugin provides text variables the user can use in text fields.
"""
def __init__(self, board: pcbnew.BOARD) -> None:
self.board = board
def variables(self) -> Dict[str, Any]:
"""
This function should return a dictionary from variable names to their
values. The values don't have to be strings – it can be anything
convertible to string. Especially, if calculating of the value is
expensive, you can use kikit.text.Formatter to postpone the value
computation to the moment when it is used from user text.
"""
return {} | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/environment/collectors/title.py | from typing import Any, Dict, Set
from docutils import nodes
from sphinx.application import Sphinx
from sphinx.environment import BuildEnvironment
from sphinx.environment.collectors import EnvironmentCollector
from sphinx.transforms import SphinxContentsFilter
class TitleCollector(EnvironmentCollector):
"""title collector for sphinx.environment."""
def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:
env.titles.pop(docname, None)
env.longtitles.pop(docname, None)
def merge_other(self, app: Sphinx, env: BuildEnvironment,
docnames: Set[str], other: BuildEnvironment) -> None:
for docname in docnames:
env.titles[docname] = other.titles[docname]
env.longtitles[docname] = other.longtitles[docname]
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:
"""Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
titlenode = nodes.title()
longtitlenode = titlenode
# explicit title set with title directive; use this only for
# the <title> tag in HTML output
if 'title' in doctree:
longtitlenode = nodes.title()
longtitlenode += nodes.Text(doctree['title'])
# look for first section title and use that as the title
for node in doctree.traverse(nodes.section):
visitor = SphinxContentsFilter(doctree)
node[0].walkabout(visitor)
titlenode += visitor.get_entry_text()
break
else:
# document has no title
titlenode += nodes.Text('<no title>')
app.env.titles[app.env.docname] = titlenode
app.env.longtitles[app.env.docname] = longtitlenode
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_env_collector(TitleCollector)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
} | PypiClean |
/Drupdates-1.5.2.tar.gz/Drupdates-1.5.2/drupdates/sitebuild.py | import git, os, copy
from os.path import expanduser
from drupdates.utils import Utils
from drupdates.settings import Settings
from drupdates.settings import DrupdatesError
from drupdates.drush import Drush
from git import Repo
class DrupdatesBuildError(DrupdatesError):
""" Parent Drupdates site build error. """
class Sitebuild(object):
""" Build out the repository folder. """
def __init__(self, site_name, ssh, working_dir):
self.settings = Settings()
self._site_name = site_name
self.site_dir = os.path.join(working_dir, self._site_name)
self.ssh = ssh
self.utilities = Utils()
self.si_files = copy.copy(self.settings.get('drushSiFiles'))
def build(self):
""" Core build method. """
working_branch = self.settings.get('workingBranch')
try:
Utils.remove_dir(self.site_dir)
except DrupdatesError as remove_error:
raise DrupdatesBuildError(20, remove_error.msg)
self.utilities.sys_commands(self, 'preBuildCmds')
repository = Repo.init(self.site_dir)
remote = git.Remote.create(repository, self._site_name, self.ssh)
try:
remote.fetch(working_branch, depth=1)
except git.exc.GitCommandError as error:
msg = "{0}: Could not checkout {1}. \n".format(self._site_name, working_branch)
msg += "Error: {0}".format(error)
raise DrupdatesBuildError(20, msg)
git_repo = repository.git
git_repo.checkout('FETCH_HEAD', b=working_branch)
self.utilities.load_dir_settings(self.site_dir)
self.standup_site()
try:
repo_status = Drush.call(['st'], self._site_name, True)
except DrupdatesError as st_error:
raise DrupdatesBuildError(20, st_error.msg)
finally:
self.file_cleanup()
if not 'bootstrap' in repo_status:
msg = "{0} failed to Stand-up properly after running drush qd".format(self._site_name)
raise DrupdatesBuildError(20, msg)
self.utilities.sys_commands(self, 'postBuildCmds')
return "Site build for {0} successful".format(self._site_name)
def standup_site(self):
""" Using the drush core-quick-drupal (qd) command stand-up a Drupal site.
This will:
- Perform site install with sqlite.
- If needed, build webroot from a make file.
- Install any sub sites (ie multi-sites)
- Ensure that all the files in the web root are writable.
"""
qd_settings = self.settings.get('qdCmds')
qd_cmds = copy.copy(qd_settings)
backup_dir = Utils.check_dir(self.settings.get('backupDir'))
qd_cmds += ['--backup-dir=' + backup_dir]
try:
qd_cmds.remove('--no-backup')
except ValueError:
pass
if self.settings.get('useMakeFile'):
make_file = self.utilities.find_make_file(self._site_name, self.site_dir)
if make_file:
qd_cmds += ['--makefile=' + make_file]
else:
msg = "Can't find make file in {0} for {1}".format(self.site_dir, self._site_name)
raise DrupdatesBuildError(20, msg)
if self.settings.get('buildSource') == 'make':
qd_cmds.remove('--use-existing')
try:
Drush.call(qd_cmds, self._site_name)
sub_sites = Drush.get_sub_site_aliases(self._site_name)
for alias, data in sub_sites.items():
Drush.call(qd_cmds, alias)
# Add sub site settings.php to list of file_cleanup() files.
sub_site_st = Drush.call(['st'], alias, True)
self.si_files.append(sub_site_st['site'] + '/settings.php')
self.si_files.append(sub_site_st['files'] + '/.htaccess')
self.si_files.append(sub_site_st['site'])
except DrupdatesError as standup_error:
raise standup_error
def file_cleanup(self):
""" Drush sets the folder permissions for some file to be 0444, convert to 0777. """
drush_dd = Drush.call(['dd', '@drupdates.' + self._site_name])
site_webroot = drush_dd[0]
for name in self.si_files:
complete_name = os.path.join(site_webroot, name)
if os.path.isfile(complete_name) or os.path.isdir(complete_name):
try:
os.chmod(complete_name, 0o777)
except OSError:
msg = "Couldn't change file permission for {0}".format(complete_name)
raise DrupdatesBuildError(20, msg) | PypiClean |
/Dead_Link_Checker-1.0.0-py3-none-any.whl/src/DLChecker.py | import sys
import re
try:
from src import DLFunctions
except ModuleNotFoundError:
import DLFunctions
# #Regular expression
regex = DLFunctions.regex
# #List of each links
goodLinks = DLFunctions.goodLinks
badLinks = DLFunctions.badLinks
jsonArr = DLFunctions.jsonArr
unknownLinks = DLFunctions.unknownLinks
def main_wrapper():
if len(sys.argv) > 1:
if re.search("^-[vV]", sys.argv[1]):
print("Program name: Dead-URL-Check")
print("Version: 1.0.1 by Mintae Kim")
elif re.search("^-[hH]", sys.argv[1]):
DLFunctions.help_dead_link_check()
elif re.search("^--[jJ]", sys.argv[1]):
print("URL JSON file is created...")
DLFunctions.create_JSON(sys.argv[2])
print(jsonArr)
elif re.search("^--good", sys.argv[1]):
print("Good URL Checker is activated")
DLFunctions.file_chekcer(sys.argv[2], "g")
elif re.search("^--bad", sys.argv[1]):
print("Bad URL Checker is activated")
DLFunctions.file_chekcer(sys.argv[2], "b")
elif re.search("^--all", sys.argv[1]):
print("All URL Checker is activated")
DLFunctions.file_chekcer(sys.argv[2], "a")
elif re.search("^--ignore", sys.argv[1]):
DLFunctions.file_chekcer(sys.argv[3], "i")
DLFunctions.check_result()
elif re.search("^--t", sys.argv[1]):
print("Telescope url checker is activated")
DLFunctions.telescope_url_check()
DLFunctions.file_chekcer("telescope.txt", "a")
else:
print("URL Checker is activated")
for argv in sys.argv:
# check URLs which users want to check
if re.search(regex, argv):
DLFunctions.check_dead_links(argv, "a")
# check the file
else:
DLFunctions.file_chekcer(argv, "a")
DLFunctions.check_result()
else:
DLFunctions.help_dead_link_check()
# --- Main ---
# Check the argument first what users want to do it
# Can call "help", "version", "URLs checker", "file checker"
if __name__ == "__main__":
main_wrapper() | PypiClean |
/AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/heartbeat.py |
import logging
import threading
from amqpstorm.exception import AMQPConnectionError
LOGGER = logging.getLogger(__name__)
class Heartbeat(object):
"""Internal Heartbeat handler."""
def __init__(self, interval, send_heartbeat_impl, timer=threading.Timer):
self.send_heartbeat_impl = send_heartbeat_impl
self.timer_impl = timer
self._lock = threading.Lock()
self._running = threading.Event()
self._timer = None
self._exceptions = None
self._reads_since_check = 0
self._writes_since_check = 0
self._interval = interval
self._threshold = 0
def register_read(self):
"""Register that a frame has been received.
:return:
"""
self._reads_since_check += 1
def register_write(self):
"""Register that a frame has been sent.
:return:
"""
self._writes_since_check += 1
def start(self, exceptions):
"""Start the Heartbeat Checker.
:param list exceptions:
:return:
"""
if not self._interval:
return False
self._running.set()
with self._lock:
self._threshold = 0
self._reads_since_check = 0
self._writes_since_check = 0
self._exceptions = exceptions
LOGGER.debug('Heartbeat Checker Started')
return self._start_new_timer()
def stop(self):
"""Stop the Heartbeat Checker.
:return:
"""
self._running.clear()
with self._lock:
if self._timer:
self._timer.cancel()
self._timer = None
def _check_for_life_signs(self):
"""Check Connection for life signs.
First check if any data has been sent, if not send a heartbeat
to the remote server.
If we have not received any data what so ever within two
intervals, we need to raise an exception so that we can
close the connection.
:rtype: bool
"""
if not self._running.is_set():
return False
if self._writes_since_check == 0:
self.send_heartbeat_impl()
self._lock.acquire()
try:
if self._reads_since_check == 0:
self._threshold += 1
if self._threshold >= 2:
self._running.clear()
self._raise_or_append_exception()
return False
else:
self._threshold = 0
finally:
self._reads_since_check = 0
self._writes_since_check = 0
self._lock.release()
return self._start_new_timer()
def _raise_or_append_exception(self):
"""The connection is presumably dead and we need to raise or
append an exception.
If we have a list for exceptions, append the exception and let
the connection handle it, if not raise the exception here.
:return:
"""
message = (
'Connection dead, no heartbeat or data received in >= '
'%ds' % (
self._interval * 2
)
)
why = AMQPConnectionError(message)
if self._exceptions is None:
raise why
self._exceptions.append(why)
def _start_new_timer(self):
"""Create a timer that will be used to periodically check the
connection for heartbeats.
:return:
"""
if not self._running.is_set():
return False
self._timer = self.timer_impl(
interval=self._interval,
function=self._check_for_life_signs
)
self._timer.daemon = True
self._timer.start()
return True | PypiClean |
/MezzanineFor1.7-3.1.10.tar.gz/MezzanineFor1.7-3.1.10/mezzanine/twitter/migrations/0001_initial.py | from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Query',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=10, verbose_name='Type', choices=[('user', 'User'), ('list', 'List'), ('search', 'Search')])),
('value', models.CharField(max_length=140, verbose_name='Value')),
('interested', models.BooleanField(default=True, verbose_name='Interested')),
],
options={
'ordering': ('-id',),
'verbose_name': 'Twitter query',
'verbose_name_plural': 'Twitter queries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('remote_id', models.CharField(max_length=50, verbose_name='Twitter ID')),
('created_at', models.DateTimeField(null=True, verbose_name='Date/time')),
('text', models.TextField(null=True, verbose_name='Message')),
('profile_image_url', models.URLField(null=True, verbose_name='Profile image URL')),
('user_name', models.CharField(max_length=100, null=True, verbose_name='User name')),
('full_name', models.CharField(max_length=100, null=True, verbose_name='Full name')),
('retweeter_profile_image_url', models.URLField(null=True, verbose_name='Profile image URL (Retweeted by)')),
('retweeter_user_name', models.CharField(max_length=100, null=True, verbose_name='User name (Retweeted by)')),
('retweeter_full_name', models.CharField(max_length=100, null=True, verbose_name='Full name (Retweeted by)')),
('query', models.ForeignKey(related_name='tweets', to='twitter.Query')),
],
options={
'ordering': ('-created_at',),
'verbose_name': 'Tweet',
'verbose_name_plural': 'Tweets',
},
bases=(models.Model,),
),
] | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mdnd/dropMode/VerticalDropMode.js.uncompressed.js | define("dojox/mdnd/dropMode/VerticalDropMode", [
"dojo/_base/kernel",
"dojo/_base/declare",
"dojo/_base/html",
"dojo/_base/array",
"dojox/mdnd/AreaManager"
],function(dojo){
var vdm = dojo.declare(
"dojox.mdnd.dropMode.VerticalDropMode",
null,
{
// summary:
// Enabled a type of calcul for Dnd.
// Default class to find the nearest target.
// _oldXPoint: Integer
// used to save a X position
_oldXPoint: null,
// _oldYPoint: Integer
// used to save a Y position
_oldYPoint: null,
// _oldBehaviour: String
// see <getDragPoint>
_oldBehaviour: "up",
addArea: function(/*Array*/areas, /*Object*/object){
// summary:
// Add a DnD Area into an array sorting by the x position.
// areas:
// array of areas
// object:
// data type of a DndArea
// returns:
// a sorted area
//console.log("dojox.mdnd.dropMode.VerticalDropMode ::: addArea");
var length = areas.length;
var position = dojo.position(object.node, true);
object.coords = {'x':position.x, 'y':position.y};
if(length == 0){
areas.push(object);
}
else{
var x = object.coords.x;
for(var i = 0; i < length; i++){
if(x < areas[i].coords.x){
for(var j = length-1; j >= i; j--)
areas[j + 1] = areas[j];
areas[i] = object;
break;
}
}
if(i == length){
areas.push(object);
}
}
return areas; // Array
},
updateAreas: function(/*Array*/areaList){
// summary:
// Refresh intervals between areas to determinate the nearest area to drop an item.
// Algorithm :
// the marker should be the vertical line passing by the
// central point between two contiguous areas.
// Note:
// If the page has only one targetArea, it's not necessary to calculate coords.
// areaList:
// array of areas
//console.log("dojox.mdnd.dropMode.VerticalDropMode ::: initAreas");
var length = areaList.length;
if(length > 1){
var currentRight, nextLeft;
for(var i = 0; i < length; i++){
var area = areaList[i];
var nextArea;
area.coords.x1 = -1;
area.coords.x2 = -1;
if(i == 0){
nextArea = areaList[i+1];
this._updateArea(area);
this._updateArea(nextArea);
currentRight = area.coords.x + area.node.offsetWidth;
nextLeft = nextArea.coords.x;
area.coords.x2 = currentRight + (nextLeft-currentRight)/2;
}
else if(i == length-1){
area.coords.x1 = areaList[i-1].coords.x2;
}
else{
nextArea = areaList[i+1];
this._updateArea(nextArea);
currentRight = area.coords.x + area.node.offsetWidth;
nextLeft = nextArea.coords.x;
area.coords.x1 = areaList[i-1].coords.x2;
area.coords.x2 = currentRight + (nextLeft-currentRight)/2;
}
}
}
},
_updateArea : function(/*Object*/area){
// summary:
// update the DnD area object (i.e. update coordinates of its DOM node)
// area:
// the DnD area
// tags:
// protected
//console.log("dojox.mdnd.dropMode.VerticalDropMode ::: _updateArea");
var position = dojo.position(area.node, true);
area.coords.x = position.x;
area.coords.y = position.y;
},
initItems: function(/*Object*/area){
// summary:
// initialize the horizontal line in order to determinate the drop zone.
// area:
// the DnD area
//console.log("dojox.mdnd.dropMode.VerticalDropMode ::: initItems");
dojo.forEach(area.items, function(obj){
//get the vertical middle of the item
var node = obj.item.node;
var position = dojo.position(node, true);
var y = position.y + position.h/2;
obj.y = y;
});
area.initItems = true;
},
refreshItems: function(/*Object*/area, /*Integer*/indexItem, /*Object*/size, /*Boolean*/added){
// summary:
// take into account the drop indicator DOM element in order to compute horizontal lines
// area:
// a DnD area object
// indexItem:
// index of a draggable item
// size:
// dropIndicator size
// added:
// boolean to know if a dropIndicator has been added or deleted
//console.log("dojox.mdnd.dropMode.VerticalDropMode ::: refreshItems");
if(indexItem == -1){
return;
}
else if(area && size && size.h){
var height = size.h;
if(area.margin){
height += area.margin.t;
}
var length = area.items.length;
for(var i = indexItem; i < length; i++){
var item = area.items[i];
if(added){
item.y += height;
}
else{
item.y -= height;
}
}
}
},
getDragPoint: function(/*Object*/coords, /*Object*/size, /*Object*/mousePosition){
// summary:
// return coordinates of the draggable item
// description:
// return for:
// - X point : the middle
// - Y point : search if the user goes up or goes down with his mouse.
// - Up : top of the draggable item
// - Down : bottom of the draggable item
// coords:
// an object encapsulating X and Y position
// size:
// an object encapsulating width and height values
// mousePosition:
// coordinates of mouse
// returns:
// an object of coordinates
// example : {'x':10,'y':10}
//console.log("dojox.mdnd.dropMode.VerticalDropMode ::: getDragPoint");
var y = coords.y;
if(this._oldYPoint){
if(y > this._oldYPoint){
this._oldBehaviour = "down";
y += size.h;
}
else
if(y <= this._oldYPoint){
this._oldBehaviour = "up";
}
}
this._oldYPoint = y;
return {
'x': coords.x + (size.w / 2),
'y': y
}; // Object
},
getTargetArea: function(/*Array*/areaList, /*Object*/ coords, /*integer*/currentIndexArea ){
// summary:
// get the nearest DnD area.
// Coordinates are basically provided by the <getDragPoint> method.
// areaList:
// a list of DnD areas objects
// coords:
// coordinates [x,y] of the dragItem
// currentIndexArea:
// an index representing the active DnD area
// returns:
// the index of the DnD area
//console.log("dojox.mdnd.dropMode.VerticalDropMode ::: getTargetArea");
var index = 0;
var x = coords.x;
var end = areaList.length;
if(end > 1){
var start = 0, direction = "right", compute = false;
if(currentIndexArea == -1 || arguments.length < 3){
// first time : Need to search the nearest area in all areas.
compute = true;
}
else{
// check if it's always the same area
if(this._checkInterval(areaList, currentIndexArea, x)){
index = currentIndexArea;
}
else{
if(this._oldXPoint < x){
start = currentIndexArea + 1;
}
else{
start = currentIndexArea - 1;
end = 0;
direction = "left";
}
compute = true;
}
}
if(compute){
if(direction === "right"){
for(var i = start; i < end; i++){
if(this._checkInterval(areaList, i, x)){
index = i;
break;
}
}
}
else{
for(var i = start; i >= end; i--){
if(this._checkInterval(areaList, i, x)){
index = i;
break;
}
}
}
}
}
this._oldXPoint = x;
return index; // Integer
},
_checkInterval: function(/*Array*/areaList, /*Integer*/index, /*Coord*/x){
// summary:
// check if the dragNode is in the interval.
// The x coordinate is basically provided by the <getDragPoint> method.
// areaList:
// a list of DnD areas objects
// index:
// index of a DnD area (to get the interval)
// x:
// coordinate x, of the dragNode
// returns:
// true if the dragNode is in intervall
// tags:
// protected
var coords = areaList[index].coords;
if(coords.x1 == -1){
if(x <= coords.x2){
return true;
}
}
else
if(coords.x2 == -1){
if(x > coords.x1){
return true;
}
}
else{
if(coords.x1 < x && x <= coords.x2){
return true;
}
}
return false; // Boolean
},
getDropIndex: function(/*Object*/ targetArea, /*Object*/ coords){
// summary:
// Return the index where the drop has to be placed.
// targetArea:
// a DnD area object
// coords:
// coordinates [x,y] of the draggable item
// returns:
// a number
// or -1 if the area has no children or the drop index represents the last position in to the area
//console.log("dojox.mdnd.dropMode.VerticalDropMode ::: getDropIndex");
var length = targetArea.items.length;
var coordinates = targetArea.coords;
var y = coords.y;
if(length > 0){
// course all children in the target area.
for(var i = 0; i < length; i++){
// compare y value with y value of children
if(y < targetArea.items[i].y){
return i; // Integer
}
else{
if(i == length-1){
return -1;
}
}
}
}
return -1;
},
destroy: function(){
// can be overwritten.
}
});
//------------
//Singleton
//------------
dojox.mdnd.areaManager()._dropMode = new dojox.mdnd.dropMode.VerticalDropMode();
return vdm;
}); | PypiClean |
/Ds_Style-0.0.1-py3-none-any.whl/Ds_Style/Ds_Style.py | import os
from time import sleep as timeout
import time
import sys
####################################
class Loading:
def LD (Txt=str('Loading'),A='\033[1;37m┊',Start='\033[1;31m▊',End='\033[1;37m▒',B='\033[1;37m┊',Time=0.1,Repeat=40,TxtC='\033[1;37m'):
for i in range(0,Repeat):
i+=1
cs=len (Txt)
sv=Repeat+cs+1
txt=End *sv
f=i*Start
print (txt+B,end='\r')
print (TxtC+Txt+A+'{}'.format(f),end='\r')
time.sleep(Time)
def LD3(Txt='txt',Ds='=',A='[',Start=' ',End=' ',B=']',Number=10,Time=0.1,Repeat=4,TxtC='\033[1;37m'):
for i in range (Repeat):
for x in range (Number):
x+=1
cs=len (Txt)
sv=Number+cs+3
txt=End *sv
f=x*Start
ss=str(Ds)
print (txt,B,end='\r')
print (TxtC+Txt,A,'{}'.format(f)+ss,end='\r')
time.sleep(Time)
############################################################
def loading (Txt="Txt...",Time=0.1,Repeat=5):
for x in range (Repeat):
txt=Txt
ss="|"
sc="/"
sd="-"
sf="\\"
time.sleep (Time)
print (Txt+ss,end='\r')
time.sleep (Time)
print (Txt+sc,end='\r')
time.sleep (Time)
print (Txt+sd,end='\r')
time.sleep (Time)
print (Txt+sf,end='\r')
time.sleep (Time)
########################################
def counterup(Txt='Txt...',Number=10,Time=0.1,Txt2="% ",Repeat=5):
for x in range (Repeat):
for i in range (Number):
i+=1
time.sleep (Time)
print (Txt,i,Txt2,end='\r')
time.sleep (Time)
def counterdown(Txt='Txt..',Number=10,Txt2='% ',Time=0.1,Repeat=1):
txt=str(Txt)
txt2=str(Txt2)
for i in range (Repeat):
for x in range (0,Number+1):
ss=int(Number-x)
timeout(Time)
print (txt,ss,txt2,end='\r')
time.sleep(Time)
###################################################
class Animation :
def __init__(self,Txt=' Txt..'):
self.Txt=Txt
def SlowIndex (Animation,Time=0.001):
txt=Animation.Txt
for x in txt:
time.sleep (Time)
print (x,end='')
def SlowText (Animation,Time=0.1):
for chat in Animation.Txt:
sys. stdout.write(chat)
sys.stdout. flush ()
time.sleep (Time)
def Text_Line (Animation,Time=0.1,Repeat=1,CLT='\033[1;37m',CUL='\033[1;37m'):
txt=Animation.Txt
cs=len (txt)
for n in range (Repeat):
time.sleep (Time)
print (CUL+txt[0].upper ()+CLT+txt[1::].lower(),end='\r')
for x in range (0,cs):
v=x+1
time.sleep (Time)
print (CLT+txt[0:x].lower()+CUL+txt[x].upper()+CLT+txt[v::].lower(),end='\r')
time.sleep (Time)
print (CLT+txt[0:x].lower()+CUL+txt[x].upper()+CLT+txt[v::].lower(),end='\r')
time.sleep (Time)
print (CLT+txt.lower(),end='\r')
time.sleep (Time)
################################################
class Ds_Style:
def __init__(self,*Txt):
self.Txt=Txt
for var in self.Txt:
self.Txt=list(*Txt)
def Style(Ds_Style,cols=2,Taps=0,Color='\033[1;31m',Space=0,Equal=False,TxtC='\033[1;37m',plus=''):
if Equal == False:
if cols==1:
ss=len (Ds_Style.Txt)
txt=Ds_Style.Txt
taps=' '*Taps
for x in range (0,ss):
ssv=len (txt[x])
sd1=str('─')*ssv ;sd2=str(" ")*ssv ;sd3=str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[x]+Color+sd7); print (taps+Color+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
#######################################
## Equale == False
## cols == 2
if Equal == False:
if cols==2:
ss=len (Ds_Style.Txt)
bb=ss%2
s7=ss-bb
if bb%2==bb:
txt=Ds_Style.Txt
for x in range (0,s7,2):
mk=len(txt[x]);ssc=str('─')*mk;ssA=str(" ")*mk;ssB=str('╭');ssC=str('╮');ssD=str ('╰');ssE=str('╯')
sd=x+1;sr=len(txt[sd]);sd1=str('─')*sr;sd2=str(" ")*sr;sd3=str('╭');
sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
tap=' '*Space ; taps=' '*Taps
print (taps+Color+ssB+ssc+ssC+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x]+Color+sd7+tap+sd7+txt[sd]+Color+sd7);print (taps+Color+ssD+ssc+ssE+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);sd1=str('─')*lk;sd2=str(" ")*lk;sd3 =str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[-1]+Color+sd7);print (taps+Color+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);sd1=str('─')*lk;sd2=str(" ")*lk;sd3 =str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
tito=len (txt[-1]);ssc=str('─')*tito;ssA=str(" ")*tito;ssB=str('╭');ssC=str('╮');ssD=str ('╰');ssE=str('╯')
print (taps+Color+sd3+sd1+sd4+tap+ssB+ssc+ssC);print (taps+Color+sd7+txt[-2]+Color+sd7+tap+sd7+txt[-1]+Color+sd7);print (taps+Color+sd5+sd1+sd6+tap+ssD+ssc+ssE)
break
########################################
## cols =3 Equal=false
if Equal == False:
if cols==3:
ss=len (Ds_Style.Txt)
bb=ss%3
s7=ss-bb
if bb%3==bb:
txt=Ds_Style.Txt
for x in range (0,s7,3):
mk=len(txt[x]);ssc=str('─')*mk;ssA=str(" ")*mk;ssB=str('╭');ssC=str('╮');ssD=str ('╰');ssE=str('╯')
sd=x+1;sr=len(txt[sd]);sd1=str('─')*sr;sd2=str(" ")*sr;sd3=str('╭');
sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
sx=sd+1
sks=len(txt[sx]);
xz=str('─')*sks;xzz=str('╭');dxz=str('╮');
zza=str ('╰');zzx=str('╯')
taps=' '*Taps
tap=' '*Space
print (taps+Color+ssB+ssc+ssC+tap+sd3+sd1+sd4+tap+xzz+xz+dxz)
print (taps+Color+sd7+txt[x]+Color+sd7+tap+sd7+txt[sd]+Color+sd7+tap+sd7+txt[sx]+Color+sd7)
print (taps+Color+ssD+ssc+ssE+tap+sd5+sd1+sd6+tap+zza+xz+zzx)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);sd1=str('─')*lk;sd2=str(" ")*lk;sd3 =str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7,txt[-1]+Color+sd7);print (taps+Color+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);sd1=str('─')*lk;sd2=str(" ")*lk;sd3 =str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
tito=len (txt[-1]);ssc=str('─')*tito;ssA=str(" ")*tito;ssB=str('╭');ssC=str('╮');ssD=str ('╰');ssE=str('╯')
print (taps+Color+sd3+sd1+sd4+tap+ssB+ssc+ssC);print (taps+Color+sd7+txt[-2]+Color+sd7+tap+sd7+txt[-1]+Color+sd7);print (taps+Color+sd5+sd1+sd6+tap+ssD+ssc+ssE)
break
##########################################
## Equal == True
## Cols == 1
if Equal ==True:
if cols==1:
max1=0 ;num=0
txt=Ds_Style.Txt
sv=len(txt)
for x in range (0,sv):
num=len(txt[x])
if num > max1:
max1 = num
ss=max1+2
for n in range (0,sv):
vb=len(txt[n])
taps=' '*Taps
smm=ss-vb+vb
sd1=str('─')*ss ;sd3=str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[n].center(smm)+Color+sd7); print (taps+Color+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
################################################
## Equale == True
## Cols == 2
if Equal ==True:
if cols ==2:
ss=len (Ds_Style.Txt)
bb=ss%2
s7=ss-bb
if bb%2==bb:
txt=Ds_Style.Txt
mm=len (Ds_Style.Txt)
max1=0 ;num=0
for n in range (0,mm):
num=len(txt[n])
if num > max1:
max1 = num
ss=max1+2
bg=max1
for x in range (0,s7,2):
mk=len(txt[x]);ssc=ss-mk+mk
sd=x+1;sr=len(txt[sd]);cv=ss-sr+sr
tap=' '*Space
taps=' '*Taps
sd1=str('─')*ss ;sd2=str(" ")*ss;sd3=str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x].center(ssc)+Color+sd7+tap+sd7+txt[sd].center(cv)+Color+sd7);print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);snc=ss-lk+lk
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);snc=ss-lk+lk
tito=len (txt[-1]);trg=ss-tito+tito
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[-2].center(snc)+Colorsd7+tap+sd7,txt[-1].center(trg)+Color+sd7)
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
break
##############################################################
## Equale ==True
## Colos == 3 True
if Equal ==True:
if cols ==3:
ss=len (Ds_Style.Txt)
bb=ss%3
s7=ss-bb
if bb%3==bb:
txt=Ds_Style.Txt
mm=len (Ds_Style.Txt)
max1=0 ;num=0
for n in range (0,mm):
num=len(txt[n])
if num > max1:
max1 = num
ss=max1+2
for x in range (0,s7,3):
mk=len(txt[x]);ssc=ss-mk+mk
sd=x+1;sr=len(txt[sd]);cv=ss-sr+sr
ccz=x+2;vss=len (txt[ccz]);bhy=ss-vss+vss
tap=' '*Space
taps=' '*Taps
sd1=str('─')*ss ;sd2=str(" ")*ss ;sd3=str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x].center(ssc)+Color+sd7+tap+sd7+txt[sd].center(cv)+Color+sd7+tap+sd7+txt[ccz].center(bhy)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);snc=ss-lk+lk
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);snc=ss-lk+lk
tito=len (txt[-1]);trg=ss-tito+tito
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[-2].center(snc)+Color+sd7+tap+sd7+txt[-1].center(trg)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
break
############################################
def Center (Ds_Style,cols=3,Taps=0,Color='\033[1;31m',Space=0,TxtC='\033[1;37m',plus=''):
Equal=True
taps=' '*Taps
s=Ds_Style.Style
if Equal ==True:
if cols ==3:
ss=len (Ds_Style.Txt)
bb=ss%3
s7=ss-bb
if bb%3==bb:
txt=Ds_Style.Txt
mm=len (Ds_Style.Txt)
max1=0 ;num=0
for n in range (0,mm):
num=len(txt[n])
if num > max1:
max1 = num
ss=max1+2
for x in range (0,s7,3):
mk=len(txt[x]);ssc=ss-mk+mk
sd=x+1;sr=len(txt[sd]);cv=ss-sr+sr
ccz=x+2;vss=len (txt[ccz]);bhy=ss-vss+vss
tap=' '*Space
taps=' '*Taps
sd1=str('─')*ss ;sd2=str(" ")*ss ;sd3=str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x].center(ssc)+Color+sd7+tap+sd7+txt[sd].center(cv)+Color+sd7+tap+sd7+txt[ccz].center(bhy)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);snc=ss-lk+lk
k=len (txt[x]);l=len(txt[sd]);joo=len(txt[ccz])
pp=max1+4
jj=max1-pp
if l%3==1:
mm=' '*pp
print (taps+Color+mm+sd3+sd1+sd4);print (taps+Color+mm+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+mm+sd5+sd1+sd6)
vip=str(plus)
else:
mm=' '*pp
print (taps+Color+mm+sd3+sd1+sd4);print (taps+Color+mm+sd7,txt[-1].center(snc)+Color+sd7);
print (taps+Color+mm+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);snc=ss-lk+lk
tito=len (txt[-1]);trg=ss-tito+tito
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[-2].center(snc)+Color+sd7+tap+sd7+txt[-1].center(trg)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
break
########################################
## Equale == True
## Cols == 2
if Equal ==True:
if cols ==2:
ss=len (Ds_Style.Txt)
bb=ss%2
s7=ss-bb
if bb%2==bb:
txt=Ds_Style.Txt
mm=len (Ds_Style.Txt)
max1=0 ;num=0
for n in range (0,mm):
num=len(txt[n])
if num > max1:
max1 = num
hh=max1
if hh == hh:
ss=max1+2
for x in range (0,s7,2):
mk=len(txt[x]);ssc=ss-mk+mk
sd=x+1;sr=len(txt[sd]);cv=ss-sr+sr
tap=' '*Space
taps=' '*Taps
sd1=str('─')*ss ;sd2=str(" ")*ss ;sd3=str('╭');sd4=str('╮');sd5=str ('╰');sd6=str('╯');sd7=str(Color+'│'+TxtC)
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x].center(ssc)+Color+sd7+tap+sd7+txt[sd].center(cv)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);snc=ss-lk+lk
k=len (txt[x]);l=len(txt[sd])
pp=max1//2
jj=max1-pp+3
mm=str(' ')*jj
print (taps+Color+mm+sd3+sd1+sd4);print (taps+Color+mm+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+mm+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);snc=ss-lk+lk
tito=len (txt[-1]);trg=ss-tito+tito
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[-2].center(snc)+Color+sd7+tap+sd7+txt[-1].center (trg)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
break
##########################################
class My_Style:
def __init__(self,*Txt):
self.Txt=Txt
for x in Txt:
self.Txt=list(*Txt)
def Square(My_Style,cols=2,Color='\033[1;31m',TxtC='\033[1;37m',Taps=0,Space=0,Equal=True,Ds1='╭',
Ds2='─',Ds3='╮',Ds4='│',Ds5='╰',Ds6='╯',plus=''):
if Equal == False:
if cols==1:
ss=len (My_Style.Txt)
txt=My_Style.Txt
taps=' '*Taps
for x in range (0,ss):
ssv=len (txt[x])
sd1=str(Ds2)*ssv;sd3=str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[x]+Color+sd7); print (taps+Color+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
###########################################
## Equale == False
## cols == 2
if Equal == False:
if cols==2:
ss=len (My_Style.Txt)
bb=ss%2
s7=ss-bb
if bb%2==bb:
txt=My_Style.Txt
for x in range (0,s7,2):
mk=len(txt[x]);ssc=str(Ds2)*mk;ssB=str(Ds1);ssC=str(Ds3);ssD=str (Ds5);ssE=str(Ds6)
sd=x+1;sr=len(txt[sd]);sd1=str(Ds2)*sr;sd3=str(Ds1);
sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
tap=' '*Space ; taps=' '*Taps
print (taps+Color+ssB+ssc+ssC+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x]+Color+sd7+tap+sd7+txt[sd]+Color+sd7);print (taps+Color+ssD+ssc+ssE+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);sd1=str(Ds2)*lk;sd3 =str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[-1]+Color+sd7);print (taps+Color+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);sd1=str(Ds2)*lk;sd3 =str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
tito=len (txt[-1]);ssc=str(Ds2)*tito;ssA=str(" ")*tito;ssB=str(Ds1);ssC=str(Ds3);ssD=str (Ds5);ssE=str(Ds6)
print (taps+Color+sd3+sd1+sd4+tap+ssB+ssc+ssC);print (taps+Color+sd7+txt[-2]+Color+sd7+tap+sd7+txt[-1]+Color+sd7);print (taps+Color+sd5+sd1+sd6+tap+ssD+ssc+ssE)
break
#########################################
if Equal == False:
if cols==3:
ss=len (My_Style.Txt)
bb=ss%3
s7=ss-bb
if bb%3==bb:
txt=My_Style.Txt
for x in range (0,s7,3):
mk=len(txt[x]);ssc=str(Ds2)*mk;ssA=str(" ")*mk;ssB=str(Ds1);ssC=str(Ds3);ssD=str (Ds5);ssE=str(Ds6)
sd=x+1;sr=len(txt[sd]);sd1=str(Ds2)*sr;sd2=str(" ")*sr;sd3=str(Ds1);
sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
sx=sd+1
sks=len(txt[sx]);
xz=str(Ds2)*sks;xzz=str(Ds1);dxz=str(Ds3);
zza=str (Ds5);zzx=str(Ds6)
taps=' '*Taps
tap=' '*Space
print (taps+Color+ssB+ssc+ssC+tap+sd3+sd1+sd4+tap+xzz+xz+dxz)
print (taps+Color+sd7+txt[x]+Color+sd7+tap+sd7+txt[sd]+Color+sd7+tap+sd7+txt[sx]+Color+sd7)
print (taps+Color+ssD+ssc+ssE+tap+sd5+sd1+sd6+tap+zza+xz+zzx)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);sd1=str(Ds2)*lk;sd2=str(" ")*lk;sd3 =str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[-1]+Color+sd7);print (taps+Color+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);sd1=str(Ds2)*lk;sd2=str(" ")*lk;sd3 =str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
tito=len (txt[-1]);ssc=str(Ds2)*tito;ssA=str(" ")*tito;ssB=str(Ds1);ssC=str(Ds3);ssD=str (Ds5);ssE=str(Ds6)
print (taps+Color+sd3+sd1+sd4+tap+ssB+ssc+ssC);print (taps+Color+sd7+txt[-2]+Color+sd7+tap+sd7+txt[-1]+Color+sd7);print (taps+Color+sd5+sd1+sd6+tap+ssD+ssc+ssE)
break
#########################################
## Equal == True
## Cols == 1
if Equal ==True:
if cols==1:
max1=0 ;num=0
txt=My_Style.Txt
sv=len(txt)
for x in range (0,sv):
num=len(txt[x])
if num > max1:
max1 = num
ss=max1+2
for n in range (0,sv):
vb=len(txt[n])
taps=' '*Taps
smm=ss-vb+vb
sd1=str(Ds2)*ss ;sd2=str(" ")*ss ;sd3=str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[n].center(smm)+Color+sd7);print (taps+Color+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
pass
print (vip)
########################################
## Equale == True
## Cols == 2
if Equal ==True:
if cols ==2:
ss=len (My_Style.Txt)
bb=ss%2
s7=ss-bb
if bb%2==bb:
txt=My_Style.Txt
mm=len (My_Style.Txt)
max1=0 ;num=0
for n in range (0,mm):
num=len(txt[n])
if num > max1:
max1 = num
ss=max1+2
bg=max1
for x in range (0,s7,2):
mk=len(txt[x]);ssc=ss-mk+mk
sd=x+1;sr=len(txt[sd]);cv=ss-sr+sr
tap=' '*Space
taps=' '*Taps
sd1=str(Ds2)*ss ;sd2=str(" ")*ss;sd3=str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x].center(ssc)+Color+sd7+tap+sd7+txt[sd].center(cv)+Color+sd7);print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
pass
for i in range(bb):
if bb==1:
lk=len (txt[-1]);snc=ss-lk+lk
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);snc=ss-lk+lk
tito=len (txt[-1]);trg=ss-tito+tito
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[-2].center(snc)+Color+sd7+tap+sd7+txt[-1].center(trg)+Color+sd7)
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
break
########################################
## Equale ==True
## Colos == 3 True
if Equal ==True:
if cols ==3:
ss=len (My_Style.Txt)
bb=ss%3
s7=ss-bb
if bb%3==bb:
txt=My_Style.Txt
mm=len (My_Style.Txt)
max1=0 ;num=0
for n in range (0,mm):
num=len(txt[n])
if num > max1:
max1 = num
ss=max1+2
for x in range (0,s7,3):
mk=len(txt[x]);ssc=ss-mk+mk
sd=x+1;sr=len(txt[sd]);cv=ss-sr+sr
ccz=x+2;vss=len (txt[ccz]);bhy=ss-vss+vss
tap=' '*Space
taps=' '*Taps
sd1=str(Ds2)*ss ;sd2=str(" ")*ss ;sd3=str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x].center(ssc)+Color+sd7+tap+sd7+txt[sd].center (cv)+Color+sd7+tap+sd7+txt[ccz].center(bhy)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
pass
for i in range(bb):
if bb==1:
lk=len (txt[-1]);snc=ss-lk+lk
print (taps+Color+sd3+sd1+sd4);print (taps+Color+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);snc=ss-lk+lk
tito=len (txt[-1]);trg=ss-tito+tito
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[-2].center (snc)+Color+sd7+tap+sd7+txt[-1].center(trg)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
break
#######################################
# My_Style center
def Center (My_Style,cols=3,Taps=0,Color='\033[1;31m',Space=0,TxtC='\033[1;37m',Ds1='╭',Ds2='─',Ds3='╮',Ds4='│',Ds5='╰',Ds6='╯',plus=''):
Equal=True
taps=' '*Taps
s=My_Style.Txt
if Equal ==True:
if cols ==3:
ss=len (My_Style.Txt)
bb=ss%3
s7=ss-bb
if bb%3==bb:
txt=My_Style.Txt
mm=len (My_Style.Txt)
max1=0 ;num=0
for n in range (0,mm):
num=len(txt[n])
if num > max1:
max1 = num
ss=max1+2
for x in range (0,s7,3):
mk=len(txt[x]);ssc=ss-mk+mk
sd=x+1;sr=len(txt[sd]);cv=ss-sr+sr
ccz=x+2;vss=len (txt[ccz]);bhy=ss-vss+vss
tap=' '*Space
taps=' '*Taps
sd1=str(Ds2)*ss ;sd2=str(" ")*ss ;sd3=str(Ds1);sd4=str(Ds3);sd5=str(Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x].center(ssc)+Color+sd7+tap+sd7+txt[sd].center (cv)+Color+sd7+tap+sd7+txt[ccz].center (bhy)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);snc=ss-lk+lk
k=len (txt[x]);l=len(txt[sd]);joo=len(txt[ccz])
pp=max1+4
jj=max1-pp
if l%3==1:
mm=' '*pp
print (taps+Color+mm+sd3+sd1+sd4);print (taps+Color+mm+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+mm+sd5+sd1+sd6)
else:
mm=' '*pp
print (taps+Color+mm+sd3+sd1+sd4);print (taps+Color+mm+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+mm+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);snc=ss-lk+lk
tito=len (txt[-1]);trg=ss-tito+tito
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[-2].center(snc)+Color+sd7+tap+sd7+txt[-1].center(trg)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
break
## Equale == True
## Cols == 2
if Equal ==True:
if cols ==2:
ss=len (My_Style.Txt)
bb=ss%2
s7=ss-bb
if bb%2==bb:
txt=My_Style.Txt
mm=len (My_Style.Txt)
max1=0 ;num=0
for n in range (0,mm):
num=len(txt[n])
if num > max1:
max1 = num
hh=max1
if hh == hh:
ss=max1+2
for x in range (0,s7,2):
mk=len(txt[x]);ssc=ss-mk+mk
sd=x+1;sr=len(txt[sd]);cv=ss-sr+sr
tap=' '*Space
taps=' '*Taps
sd1=str(Ds2)*ss ;sd2=str(" ")*ss ;sd3=str(Ds1);sd4=str(Ds3);sd5=str (Ds5);sd6=str(Ds6);sd7=str(Color+Ds4+TxtC)
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[x].center(ssc)+Color+sd7+tap+sd7+txt[sd].center(cv)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
vip=str(plus)
if vip =='':
pass
else:
print (vip)
for i in range(bb):
if bb==1:
lk=len (txt[-1]);snc=ss-lk+lk
k=len (txt[x]);l=len(txt[sd])
pp=max1//2
jj=max1-pp+3
mm=str(' ')*jj
print (taps+Color+mm+sd3+sd1+sd4);print (taps+Color+mm+sd7+txt[-1].center(snc)+Color+sd7);
print (taps+Color+mm+sd5+sd1+sd6)
if bb==2:
lk=len (txt[-2]);snc=ss-lk+lk
tito=len (txt[-1]);trg=ss-tito+tito
print (taps+Color+sd3+sd1+sd4+tap+sd3+sd1+sd4);print (taps+Color+sd7+txt[-2].center(snc)+Color+sd7+tap+sd7+txt[-1].center (trg)+Color+sd7);
print (taps+Color+sd5+sd1+sd6+tap+sd5+sd1+sd6)
break
#########################################
# Dark_Storm #
######################################### | PypiClean |
/KD_Lib-0.0.32.tar.gz/KD_Lib-0.0.32/KD_Lib/KD/vision/KA/LSR.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from KD_Lib.KD.common import BaseClass
class LabelSmoothReg(BaseClass):
"""
Implementation of the label smoothening regularization technique from the paper
"Preparing Lessons: Improve Knowledge Distillation with Better Supervision"
https://arxiv.org/abs/1911.07471
:param teacher_model (torch.nn.Module): Teacher model
:param student_model (torch.nn.Module): Student model
:param train_loader (torch.utils.data.DataLoader): Dataloader for training
:param val_loader (torch.utils.data.DataLoader): Dataloader for validation/testing
:param optimizer_teacher (torch.optim.*): Optimizer used for training teacher
:param optimizer_student (torch.optim.*): Optimizer used for training student
:param correct_prob(float): The probability which is given to the correct class
:param loss_fn (torch.nn.Module): Loss Function used for distillation
:param temp (float): Temperature parameter for distillation
:param ka_weight (float): Weight (0 to 1) given to knowledge adjusted loss.
:param device (str): Device used for training; 'cpu' for cpu and 'cuda' for gpu
:param log (bool): True if logging required
:param logdir (str): Directory for storing logs
"""
def __init__(
self,
teacher_model,
student_model,
train_loader,
val_loader,
optimizer_teacher,
optimizer_student,
correct_prob=0.90,
loss_fn=nn.KLDivLoss(reduction="batchmean"),
temp=20.0,
ka_weight=0.85,
device="cpu",
log=False,
logdir="./Experiments",
):
super(LabelSmoothReg, self).__init__(
teacher_model,
student_model,
train_loader,
val_loader,
optimizer_teacher,
optimizer_student,
loss_fn=loss_fn,
temp=temp,
distil_weight=ka_weight,
device=device,
log=log,
logdir=logdir,
)
self.correct_prob = correct_prob
def calculate_kd_loss(self, y_pred_student, y_pred_teacher, y_true):
"""
Applies label smoothing with teacher outputs to compare with student.
:param y_pred_student (Tensor): Predicted outputs from the student network
:param y_pred_teacher (Tensor): Predicted outputs from the teacher network
:param y_true (Tensor): True labels
"""
num_classes = y_pred_teacher.shape[1]
soft_pred_student = F.softmax(y_pred_student / self.temp, dim=1)
with torch.no_grad():
soft_pred_teacher = F.softmax(y_pred_teacher / self.temp, dim=1)
activated_label = torch.zeros(soft_pred_teacher.shape).to(self.device)
for i in range(soft_pred_teacher.shape[0]):
t_label = torch.argmax(soft_pred_teacher[i])
if t_label == y_true[i]:
activated_label[i] = soft_pred_teacher[i]
else:
activated_label[i] = (1 - self.correct_prob) / (num_classes - 1)
activated_label[i][y_true[i]] = self.correct_prob
ka_loss = (self.temp * self.temp) * self.loss_fn(
activated_label, soft_pred_student
)
ce_loss = self.temp * nn.CrossEntropyLoss()(y_pred_student / self.temp, y_true)
return (1 - self.distil_weight) * ce_loss + self.distil_weight * ka_loss | PypiClean |
/KalturaApiClient-19.3.0.tar.gz/KalturaApiClient-19.3.0/KalturaClient/Plugins/Caption.py | from __future__ import absolute_import
from .Core import *
from ..Base import (
getXmlNodeBool,
getXmlNodeFloat,
getXmlNodeInt,
getXmlNodeText,
KalturaClientPlugin,
KalturaEnumsFactory,
KalturaObjectBase,
KalturaObjectFactory,
KalturaParams,
KalturaServiceBase,
)
########## enums ##########
# @package Kaltura
# @subpackage Client
class KalturaCaptionAssetStatus(object):
ERROR = -1
QUEUED = 0
READY = 2
DELETED = 3
IMPORTING = 7
EXPORTING = 9
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaCaptionAssetOrderBy(object):
CREATED_AT_ASC = "+createdAt"
DELETED_AT_ASC = "+deletedAt"
SIZE_ASC = "+size"
UPDATED_AT_ASC = "+updatedAt"
CREATED_AT_DESC = "-createdAt"
DELETED_AT_DESC = "-deletedAt"
SIZE_DESC = "-size"
UPDATED_AT_DESC = "-updatedAt"
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaCaptionParamsOrderBy(object):
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaCaptionSource(object):
UNKNOWN = "0"
ZOOM = "1"
WEBEX = "2"
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
# @package Kaltura
# @subpackage Client
class KalturaCaptionType(object):
SRT = "1"
DFXP = "2"
WEBVTT = "3"
CAP = "4"
SCC = "5"
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
########## classes ##########
# @package Kaltura
# @subpackage Client
class KalturaCaptionAsset(KalturaAsset):
def __init__(self,
id=NotImplemented,
entryId=NotImplemented,
partnerId=NotImplemented,
version=NotImplemented,
size=NotImplemented,
tags=NotImplemented,
fileExt=NotImplemented,
createdAt=NotImplemented,
updatedAt=NotImplemented,
deletedAt=NotImplemented,
description=NotImplemented,
partnerData=NotImplemented,
partnerDescription=NotImplemented,
actualSourceAssetParamsIds=NotImplemented,
sizeInBytes=NotImplemented,
captionParamsId=NotImplemented,
language=NotImplemented,
languageCode=NotImplemented,
isDefault=NotImplemented,
label=NotImplemented,
format=NotImplemented,
source=NotImplemented,
status=NotImplemented,
parentId=NotImplemented,
accuracy=NotImplemented,
displayOnPlayer=NotImplemented,
associatedTranscriptIds=NotImplemented):
KalturaAsset.__init__(self,
id,
entryId,
partnerId,
version,
size,
tags,
fileExt,
createdAt,
updatedAt,
deletedAt,
description,
partnerData,
partnerDescription,
actualSourceAssetParamsIds,
sizeInBytes)
# The Caption Params used to create this Caption Asset
# @var int
# @insertonly
self.captionParamsId = captionParamsId
# The language of the caption asset content
# @var KalturaLanguage
self.language = language
# The language of the caption asset content
# @var KalturaLanguageCode
# @readonly
self.languageCode = languageCode
# Is default caption asset of the entry
# @var KalturaNullableBoolean
self.isDefault = isDefault
# Friendly label
# @var string
self.label = label
# The caption format
# @var KalturaCaptionType
# @insertonly
self.format = format
# The source of the asset
# @var KalturaCaptionSource
# @insertonly
self.source = source
# The status of the asset
# @var KalturaCaptionAssetStatus
# @readonly
self.status = status
# The parent id of the asset
# @var string
# @insertonly
self.parentId = parentId
# The Accuracy of the caption content
# @var int
self.accuracy = accuracy
# The Accuracy of the caption content
# @var bool
self.displayOnPlayer = displayOnPlayer
# List of associated transcript asset id's, comma separated
# @var string
self.associatedTranscriptIds = associatedTranscriptIds
PROPERTY_LOADERS = {
'captionParamsId': getXmlNodeInt,
'language': (KalturaEnumsFactory.createString, "KalturaLanguage"),
'languageCode': (KalturaEnumsFactory.createString, "KalturaLanguageCode"),
'isDefault': (KalturaEnumsFactory.createInt, "KalturaNullableBoolean"),
'label': getXmlNodeText,
'format': (KalturaEnumsFactory.createString, "KalturaCaptionType"),
'source': (KalturaEnumsFactory.createString, "KalturaCaptionSource"),
'status': (KalturaEnumsFactory.createInt, "KalturaCaptionAssetStatus"),
'parentId': getXmlNodeText,
'accuracy': getXmlNodeInt,
'displayOnPlayer': getXmlNodeBool,
'associatedTranscriptIds': getXmlNodeText,
}
def fromXml(self, node):
KalturaAsset.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionAsset.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaAsset.toParams(self)
kparams.put("objectType", "KalturaCaptionAsset")
kparams.addIntIfDefined("captionParamsId", self.captionParamsId)
kparams.addStringEnumIfDefined("language", self.language)
kparams.addIntEnumIfDefined("isDefault", self.isDefault)
kparams.addStringIfDefined("label", self.label)
kparams.addStringEnumIfDefined("format", self.format)
kparams.addStringEnumIfDefined("source", self.source)
kparams.addStringIfDefined("parentId", self.parentId)
kparams.addIntIfDefined("accuracy", self.accuracy)
kparams.addBoolIfDefined("displayOnPlayer", self.displayOnPlayer)
kparams.addStringIfDefined("associatedTranscriptIds", self.associatedTranscriptIds)
return kparams
def getCaptionParamsId(self):
return self.captionParamsId
def setCaptionParamsId(self, newCaptionParamsId):
self.captionParamsId = newCaptionParamsId
def getLanguage(self):
return self.language
def setLanguage(self, newLanguage):
self.language = newLanguage
def getLanguageCode(self):
return self.languageCode
def getIsDefault(self):
return self.isDefault
def setIsDefault(self, newIsDefault):
self.isDefault = newIsDefault
def getLabel(self):
return self.label
def setLabel(self, newLabel):
self.label = newLabel
def getFormat(self):
return self.format
def setFormat(self, newFormat):
self.format = newFormat
def getSource(self):
return self.source
def setSource(self, newSource):
self.source = newSource
def getStatus(self):
return self.status
def getParentId(self):
return self.parentId
def setParentId(self, newParentId):
self.parentId = newParentId
def getAccuracy(self):
return self.accuracy
def setAccuracy(self, newAccuracy):
self.accuracy = newAccuracy
def getDisplayOnPlayer(self):
return self.displayOnPlayer
def setDisplayOnPlayer(self, newDisplayOnPlayer):
self.displayOnPlayer = newDisplayOnPlayer
def getAssociatedTranscriptIds(self):
return self.associatedTranscriptIds
def setAssociatedTranscriptIds(self, newAssociatedTranscriptIds):
self.associatedTranscriptIds = newAssociatedTranscriptIds
# @package Kaltura
# @subpackage Client
class KalturaCaptionParams(KalturaAssetParams):
def __init__(self,
id=NotImplemented,
partnerId=NotImplemented,
name=NotImplemented,
systemName=NotImplemented,
description=NotImplemented,
createdAt=NotImplemented,
isSystemDefault=NotImplemented,
tags=NotImplemented,
requiredPermissions=NotImplemented,
sourceRemoteStorageProfileId=NotImplemented,
remoteStorageProfileIds=NotImplemented,
mediaParserType=NotImplemented,
sourceAssetParamsIds=NotImplemented,
language=NotImplemented,
isDefault=NotImplemented,
label=NotImplemented,
format=NotImplemented,
sourceParamsId=NotImplemented):
KalturaAssetParams.__init__(self,
id,
partnerId,
name,
systemName,
description,
createdAt,
isSystemDefault,
tags,
requiredPermissions,
sourceRemoteStorageProfileId,
remoteStorageProfileIds,
mediaParserType,
sourceAssetParamsIds)
# The language of the caption content
# @var KalturaLanguage
# @insertonly
self.language = language
# Is default caption asset of the entry
# @var KalturaNullableBoolean
self.isDefault = isDefault
# Friendly label
# @var string
self.label = label
# The caption format
# @var KalturaCaptionType
# @insertonly
self.format = format
# Id of the caption params or the flavor params to be used as source for the caption creation
# @var int
self.sourceParamsId = sourceParamsId
PROPERTY_LOADERS = {
'language': (KalturaEnumsFactory.createString, "KalturaLanguage"),
'isDefault': (KalturaEnumsFactory.createInt, "KalturaNullableBoolean"),
'label': getXmlNodeText,
'format': (KalturaEnumsFactory.createString, "KalturaCaptionType"),
'sourceParamsId': getXmlNodeInt,
}
def fromXml(self, node):
KalturaAssetParams.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionParams.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaAssetParams.toParams(self)
kparams.put("objectType", "KalturaCaptionParams")
kparams.addStringEnumIfDefined("language", self.language)
kparams.addIntEnumIfDefined("isDefault", self.isDefault)
kparams.addStringIfDefined("label", self.label)
kparams.addStringEnumIfDefined("format", self.format)
kparams.addIntIfDefined("sourceParamsId", self.sourceParamsId)
return kparams
def getLanguage(self):
return self.language
def setLanguage(self, newLanguage):
self.language = newLanguage
def getIsDefault(self):
return self.isDefault
def setIsDefault(self, newIsDefault):
self.isDefault = newIsDefault
def getLabel(self):
return self.label
def setLabel(self, newLabel):
self.label = newLabel
def getFormat(self):
return self.format
def setFormat(self, newFormat):
self.format = newFormat
def getSourceParamsId(self):
return self.sourceParamsId
def setSourceParamsId(self, newSourceParamsId):
self.sourceParamsId = newSourceParamsId
# @package Kaltura
# @subpackage Client
class KalturaCaptionPlaybackPluginData(KalturaObjectBase):
def __init__(self,
label=NotImplemented,
format=NotImplemented,
language=NotImplemented,
webVttUrl=NotImplemented,
url=NotImplemented,
isDefault=NotImplemented,
languageCode=NotImplemented):
KalturaObjectBase.__init__(self)
# @var string
self.label = label
# @var string
self.format = format
# @var string
self.language = language
# @var string
self.webVttUrl = webVttUrl
# @var string
self.url = url
# @var bool
self.isDefault = isDefault
# @var string
self.languageCode = languageCode
PROPERTY_LOADERS = {
'label': getXmlNodeText,
'format': getXmlNodeText,
'language': getXmlNodeText,
'webVttUrl': getXmlNodeText,
'url': getXmlNodeText,
'isDefault': getXmlNodeBool,
'languageCode': getXmlNodeText,
}
def fromXml(self, node):
KalturaObjectBase.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionPlaybackPluginData.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaObjectBase.toParams(self)
kparams.put("objectType", "KalturaCaptionPlaybackPluginData")
kparams.addStringIfDefined("label", self.label)
kparams.addStringIfDefined("format", self.format)
kparams.addStringIfDefined("language", self.language)
kparams.addStringIfDefined("webVttUrl", self.webVttUrl)
kparams.addStringIfDefined("url", self.url)
kparams.addBoolIfDefined("isDefault", self.isDefault)
kparams.addStringIfDefined("languageCode", self.languageCode)
return kparams
def getLabel(self):
return self.label
def setLabel(self, newLabel):
self.label = newLabel
def getFormat(self):
return self.format
def setFormat(self, newFormat):
self.format = newFormat
def getLanguage(self):
return self.language
def setLanguage(self, newLanguage):
self.language = newLanguage
def getWebVttUrl(self):
return self.webVttUrl
def setWebVttUrl(self, newWebVttUrl):
self.webVttUrl = newWebVttUrl
def getUrl(self):
return self.url
def setUrl(self, newUrl):
self.url = newUrl
def getIsDefault(self):
return self.isDefault
def setIsDefault(self, newIsDefault):
self.isDefault = newIsDefault
def getLanguageCode(self):
return self.languageCode
def setLanguageCode(self, newLanguageCode):
self.languageCode = newLanguageCode
# @package Kaltura
# @subpackage Client
class KalturaCaptionAssetListResponse(KalturaListResponse):
def __init__(self,
totalCount=NotImplemented,
objects=NotImplemented):
KalturaListResponse.__init__(self,
totalCount)
# @var array of KalturaCaptionAsset
# @readonly
self.objects = objects
PROPERTY_LOADERS = {
'objects': (KalturaObjectFactory.createArray, 'KalturaCaptionAsset'),
}
def fromXml(self, node):
KalturaListResponse.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionAssetListResponse.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaListResponse.toParams(self)
kparams.put("objectType", "KalturaCaptionAssetListResponse")
return kparams
def getObjects(self):
return self.objects
# @package Kaltura
# @subpackage Client
class KalturaCaptionParamsListResponse(KalturaListResponse):
def __init__(self,
totalCount=NotImplemented,
objects=NotImplemented):
KalturaListResponse.__init__(self,
totalCount)
# @var array of KalturaCaptionParams
# @readonly
self.objects = objects
PROPERTY_LOADERS = {
'objects': (KalturaObjectFactory.createArray, 'KalturaCaptionParams'),
}
def fromXml(self, node):
KalturaListResponse.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionParamsListResponse.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaListResponse.toParams(self)
kparams.put("objectType", "KalturaCaptionParamsListResponse")
return kparams
def getObjects(self):
return self.objects
# @package Kaltura
# @subpackage Client
class KalturaConvertCaptionAssetJobData(KalturaJobData):
def __init__(self,
captionAssetId=NotImplemented,
fileLocation=NotImplemented,
fileEncryptionKey=NotImplemented,
fromType=NotImplemented,
toType=NotImplemented):
KalturaJobData.__init__(self)
# @var string
self.captionAssetId = captionAssetId
# @var string
self.fileLocation = fileLocation
# @var string
self.fileEncryptionKey = fileEncryptionKey
# @var string
self.fromType = fromType
# @var string
self.toType = toType
PROPERTY_LOADERS = {
'captionAssetId': getXmlNodeText,
'fileLocation': getXmlNodeText,
'fileEncryptionKey': getXmlNodeText,
'fromType': getXmlNodeText,
'toType': getXmlNodeText,
}
def fromXml(self, node):
KalturaJobData.fromXml(self, node)
self.fromXmlImpl(node, KalturaConvertCaptionAssetJobData.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaJobData.toParams(self)
kparams.put("objectType", "KalturaConvertCaptionAssetJobData")
kparams.addStringIfDefined("captionAssetId", self.captionAssetId)
kparams.addStringIfDefined("fileLocation", self.fileLocation)
kparams.addStringIfDefined("fileEncryptionKey", self.fileEncryptionKey)
kparams.addStringIfDefined("fromType", self.fromType)
kparams.addStringIfDefined("toType", self.toType)
return kparams
def getCaptionAssetId(self):
return self.captionAssetId
def setCaptionAssetId(self, newCaptionAssetId):
self.captionAssetId = newCaptionAssetId
def getFileLocation(self):
return self.fileLocation
def setFileLocation(self, newFileLocation):
self.fileLocation = newFileLocation
def getFileEncryptionKey(self):
return self.fileEncryptionKey
def setFileEncryptionKey(self, newFileEncryptionKey):
self.fileEncryptionKey = newFileEncryptionKey
def getFromType(self):
return self.fromType
def setFromType(self, newFromType):
self.fromType = newFromType
def getToType(self):
return self.toType
def setToType(self, newToType):
self.toType = newToType
# @package Kaltura
# @subpackage Client
class KalturaCopyCaptionsJobData(KalturaJobData):
def __init__(self,
entryId=NotImplemented,
clipsDescriptionArray=NotImplemented,
fullCopy=NotImplemented):
KalturaJobData.__init__(self)
# entry Id
# @var string
self.entryId = entryId
# an array of source start time and duration
# @var array of KalturaClipDescription
self.clipsDescriptionArray = clipsDescriptionArray
# @var bool
self.fullCopy = fullCopy
PROPERTY_LOADERS = {
'entryId': getXmlNodeText,
'clipsDescriptionArray': (KalturaObjectFactory.createArray, 'KalturaClipDescription'),
'fullCopy': getXmlNodeBool,
}
def fromXml(self, node):
KalturaJobData.fromXml(self, node)
self.fromXmlImpl(node, KalturaCopyCaptionsJobData.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaJobData.toParams(self)
kparams.put("objectType", "KalturaCopyCaptionsJobData")
kparams.addStringIfDefined("entryId", self.entryId)
kparams.addArrayIfDefined("clipsDescriptionArray", self.clipsDescriptionArray)
kparams.addBoolIfDefined("fullCopy", self.fullCopy)
return kparams
def getEntryId(self):
return self.entryId
def setEntryId(self, newEntryId):
self.entryId = newEntryId
def getClipsDescriptionArray(self):
return self.clipsDescriptionArray
def setClipsDescriptionArray(self, newClipsDescriptionArray):
self.clipsDescriptionArray = newClipsDescriptionArray
def getFullCopy(self):
return self.fullCopy
def setFullCopy(self, newFullCopy):
self.fullCopy = newFullCopy
# @package Kaltura
# @subpackage Client
class KalturaParseMultiLanguageCaptionAssetJobData(KalturaJobData):
def __init__(self,
multiLanaguageCaptionAssetId=NotImplemented,
entryId=NotImplemented,
fileLocation=NotImplemented,
fileEncryptionKey=NotImplemented):
KalturaJobData.__init__(self)
# @var string
self.multiLanaguageCaptionAssetId = multiLanaguageCaptionAssetId
# @var string
self.entryId = entryId
# @var string
self.fileLocation = fileLocation
# @var string
self.fileEncryptionKey = fileEncryptionKey
PROPERTY_LOADERS = {
'multiLanaguageCaptionAssetId': getXmlNodeText,
'entryId': getXmlNodeText,
'fileLocation': getXmlNodeText,
'fileEncryptionKey': getXmlNodeText,
}
def fromXml(self, node):
KalturaJobData.fromXml(self, node)
self.fromXmlImpl(node, KalturaParseMultiLanguageCaptionAssetJobData.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaJobData.toParams(self)
kparams.put("objectType", "KalturaParseMultiLanguageCaptionAssetJobData")
kparams.addStringIfDefined("multiLanaguageCaptionAssetId", self.multiLanaguageCaptionAssetId)
kparams.addStringIfDefined("entryId", self.entryId)
kparams.addStringIfDefined("fileLocation", self.fileLocation)
kparams.addStringIfDefined("fileEncryptionKey", self.fileEncryptionKey)
return kparams
def getMultiLanaguageCaptionAssetId(self):
return self.multiLanaguageCaptionAssetId
def setMultiLanaguageCaptionAssetId(self, newMultiLanaguageCaptionAssetId):
self.multiLanaguageCaptionAssetId = newMultiLanaguageCaptionAssetId
def getEntryId(self):
return self.entryId
def setEntryId(self, newEntryId):
self.entryId = newEntryId
def getFileLocation(self):
return self.fileLocation
def setFileLocation(self, newFileLocation):
self.fileLocation = newFileLocation
def getFileEncryptionKey(self):
return self.fileEncryptionKey
def setFileEncryptionKey(self, newFileEncryptionKey):
self.fileEncryptionKey = newFileEncryptionKey
# @package Kaltura
# @subpackage Client
class KalturaCaptionAssetBaseFilter(KalturaAssetFilter):
def __init__(self,
orderBy=NotImplemented,
advancedSearch=NotImplemented,
idEqual=NotImplemented,
idIn=NotImplemented,
entryIdEqual=NotImplemented,
entryIdIn=NotImplemented,
partnerIdEqual=NotImplemented,
partnerIdIn=NotImplemented,
sizeGreaterThanOrEqual=NotImplemented,
sizeLessThanOrEqual=NotImplemented,
tagsLike=NotImplemented,
tagsMultiLikeOr=NotImplemented,
tagsMultiLikeAnd=NotImplemented,
createdAtGreaterThanOrEqual=NotImplemented,
createdAtLessThanOrEqual=NotImplemented,
updatedAtGreaterThanOrEqual=NotImplemented,
updatedAtLessThanOrEqual=NotImplemented,
deletedAtGreaterThanOrEqual=NotImplemented,
deletedAtLessThanOrEqual=NotImplemented,
typeIn=NotImplemented,
captionParamsIdEqual=NotImplemented,
captionParamsIdIn=NotImplemented,
formatEqual=NotImplemented,
formatIn=NotImplemented,
statusEqual=NotImplemented,
statusIn=NotImplemented,
statusNotIn=NotImplemented):
KalturaAssetFilter.__init__(self,
orderBy,
advancedSearch,
idEqual,
idIn,
entryIdEqual,
entryIdIn,
partnerIdEqual,
partnerIdIn,
sizeGreaterThanOrEqual,
sizeLessThanOrEqual,
tagsLike,
tagsMultiLikeOr,
tagsMultiLikeAnd,
createdAtGreaterThanOrEqual,
createdAtLessThanOrEqual,
updatedAtGreaterThanOrEqual,
updatedAtLessThanOrEqual,
deletedAtGreaterThanOrEqual,
deletedAtLessThanOrEqual,
typeIn)
# @var int
self.captionParamsIdEqual = captionParamsIdEqual
# @var string
self.captionParamsIdIn = captionParamsIdIn
# @var KalturaCaptionType
self.formatEqual = formatEqual
# @var string
self.formatIn = formatIn
# @var KalturaCaptionAssetStatus
self.statusEqual = statusEqual
# @var string
self.statusIn = statusIn
# @var string
self.statusNotIn = statusNotIn
PROPERTY_LOADERS = {
'captionParamsIdEqual': getXmlNodeInt,
'captionParamsIdIn': getXmlNodeText,
'formatEqual': (KalturaEnumsFactory.createString, "KalturaCaptionType"),
'formatIn': getXmlNodeText,
'statusEqual': (KalturaEnumsFactory.createInt, "KalturaCaptionAssetStatus"),
'statusIn': getXmlNodeText,
'statusNotIn': getXmlNodeText,
}
def fromXml(self, node):
KalturaAssetFilter.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionAssetBaseFilter.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaAssetFilter.toParams(self)
kparams.put("objectType", "KalturaCaptionAssetBaseFilter")
kparams.addIntIfDefined("captionParamsIdEqual", self.captionParamsIdEqual)
kparams.addStringIfDefined("captionParamsIdIn", self.captionParamsIdIn)
kparams.addStringEnumIfDefined("formatEqual", self.formatEqual)
kparams.addStringIfDefined("formatIn", self.formatIn)
kparams.addIntEnumIfDefined("statusEqual", self.statusEqual)
kparams.addStringIfDefined("statusIn", self.statusIn)
kparams.addStringIfDefined("statusNotIn", self.statusNotIn)
return kparams
def getCaptionParamsIdEqual(self):
return self.captionParamsIdEqual
def setCaptionParamsIdEqual(self, newCaptionParamsIdEqual):
self.captionParamsIdEqual = newCaptionParamsIdEqual
def getCaptionParamsIdIn(self):
return self.captionParamsIdIn
def setCaptionParamsIdIn(self, newCaptionParamsIdIn):
self.captionParamsIdIn = newCaptionParamsIdIn
def getFormatEqual(self):
return self.formatEqual
def setFormatEqual(self, newFormatEqual):
self.formatEqual = newFormatEqual
def getFormatIn(self):
return self.formatIn
def setFormatIn(self, newFormatIn):
self.formatIn = newFormatIn
def getStatusEqual(self):
return self.statusEqual
def setStatusEqual(self, newStatusEqual):
self.statusEqual = newStatusEqual
def getStatusIn(self):
return self.statusIn
def setStatusIn(self, newStatusIn):
self.statusIn = newStatusIn
def getStatusNotIn(self):
return self.statusNotIn
def setStatusNotIn(self, newStatusNotIn):
self.statusNotIn = newStatusNotIn
# @package Kaltura
# @subpackage Client
class KalturaCaptionParamsBaseFilter(KalturaAssetParamsFilter):
def __init__(self,
orderBy=NotImplemented,
advancedSearch=NotImplemented,
idEqual=NotImplemented,
idIn=NotImplemented,
systemNameEqual=NotImplemented,
systemNameIn=NotImplemented,
isSystemDefaultEqual=NotImplemented,
tagsEqual=NotImplemented,
formatEqual=NotImplemented,
formatIn=NotImplemented):
KalturaAssetParamsFilter.__init__(self,
orderBy,
advancedSearch,
idEqual,
idIn,
systemNameEqual,
systemNameIn,
isSystemDefaultEqual,
tagsEqual)
# @var KalturaCaptionType
self.formatEqual = formatEqual
# @var string
self.formatIn = formatIn
PROPERTY_LOADERS = {
'formatEqual': (KalturaEnumsFactory.createString, "KalturaCaptionType"),
'formatIn': getXmlNodeText,
}
def fromXml(self, node):
KalturaAssetParamsFilter.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionParamsBaseFilter.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaAssetParamsFilter.toParams(self)
kparams.put("objectType", "KalturaCaptionParamsBaseFilter")
kparams.addStringEnumIfDefined("formatEqual", self.formatEqual)
kparams.addStringIfDefined("formatIn", self.formatIn)
return kparams
def getFormatEqual(self):
return self.formatEqual
def setFormatEqual(self, newFormatEqual):
self.formatEqual = newFormatEqual
def getFormatIn(self):
return self.formatIn
def setFormatIn(self, newFormatIn):
self.formatIn = newFormatIn
# @package Kaltura
# @subpackage Client
class KalturaCaptionAssetFilter(KalturaCaptionAssetBaseFilter):
def __init__(self,
orderBy=NotImplemented,
advancedSearch=NotImplemented,
idEqual=NotImplemented,
idIn=NotImplemented,
entryIdEqual=NotImplemented,
entryIdIn=NotImplemented,
partnerIdEqual=NotImplemented,
partnerIdIn=NotImplemented,
sizeGreaterThanOrEqual=NotImplemented,
sizeLessThanOrEqual=NotImplemented,
tagsLike=NotImplemented,
tagsMultiLikeOr=NotImplemented,
tagsMultiLikeAnd=NotImplemented,
createdAtGreaterThanOrEqual=NotImplemented,
createdAtLessThanOrEqual=NotImplemented,
updatedAtGreaterThanOrEqual=NotImplemented,
updatedAtLessThanOrEqual=NotImplemented,
deletedAtGreaterThanOrEqual=NotImplemented,
deletedAtLessThanOrEqual=NotImplemented,
typeIn=NotImplemented,
captionParamsIdEqual=NotImplemented,
captionParamsIdIn=NotImplemented,
formatEqual=NotImplemented,
formatIn=NotImplemented,
statusEqual=NotImplemented,
statusIn=NotImplemented,
statusNotIn=NotImplemented):
KalturaCaptionAssetBaseFilter.__init__(self,
orderBy,
advancedSearch,
idEqual,
idIn,
entryIdEqual,
entryIdIn,
partnerIdEqual,
partnerIdIn,
sizeGreaterThanOrEqual,
sizeLessThanOrEqual,
tagsLike,
tagsMultiLikeOr,
tagsMultiLikeAnd,
createdAtGreaterThanOrEqual,
createdAtLessThanOrEqual,
updatedAtGreaterThanOrEqual,
updatedAtLessThanOrEqual,
deletedAtGreaterThanOrEqual,
deletedAtLessThanOrEqual,
typeIn,
captionParamsIdEqual,
captionParamsIdIn,
formatEqual,
formatIn,
statusEqual,
statusIn,
statusNotIn)
PROPERTY_LOADERS = {
}
def fromXml(self, node):
KalturaCaptionAssetBaseFilter.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionAssetFilter.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaCaptionAssetBaseFilter.toParams(self)
kparams.put("objectType", "KalturaCaptionAssetFilter")
return kparams
# @package Kaltura
# @subpackage Client
class KalturaCaptionParamsFilter(KalturaCaptionParamsBaseFilter):
def __init__(self,
orderBy=NotImplemented,
advancedSearch=NotImplemented,
idEqual=NotImplemented,
idIn=NotImplemented,
systemNameEqual=NotImplemented,
systemNameIn=NotImplemented,
isSystemDefaultEqual=NotImplemented,
tagsEqual=NotImplemented,
formatEqual=NotImplemented,
formatIn=NotImplemented):
KalturaCaptionParamsBaseFilter.__init__(self,
orderBy,
advancedSearch,
idEqual,
idIn,
systemNameEqual,
systemNameIn,
isSystemDefaultEqual,
tagsEqual,
formatEqual,
formatIn)
PROPERTY_LOADERS = {
}
def fromXml(self, node):
KalturaCaptionParamsBaseFilter.fromXml(self, node)
self.fromXmlImpl(node, KalturaCaptionParamsFilter.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaCaptionParamsBaseFilter.toParams(self)
kparams.put("objectType", "KalturaCaptionParamsFilter")
return kparams
########## services ##########
# @package Kaltura
# @subpackage Client
class KalturaCaptionAssetService(KalturaServiceBase):
"""Retrieve information and invoke actions on caption Asset"""
def __init__(self, client = None):
KalturaServiceBase.__init__(self, client)
def add(self, entryId, captionAsset):
"""Add caption asset"""
kparams = KalturaParams()
kparams.addStringIfDefined("entryId", entryId)
kparams.addObjectIfDefined("captionAsset", captionAsset)
self.client.queueServiceActionCall("caption_captionasset", "add", "KalturaCaptionAsset", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionAsset')
def delete(self, captionAssetId):
kparams = KalturaParams()
kparams.addStringIfDefined("captionAssetId", captionAssetId)
self.client.queueServiceActionCall("caption_captionasset", "delete", "None", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
def export(self, assetId, storageProfileId):
"""manually export an asset"""
kparams = KalturaParams()
kparams.addStringIfDefined("assetId", assetId)
kparams.addIntIfDefined("storageProfileId", storageProfileId);
self.client.queueServiceActionCall("caption_captionasset", "export", "KalturaFlavorAsset", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaFlavorAsset')
def get(self, captionAssetId):
kparams = KalturaParams()
kparams.addStringIfDefined("captionAssetId", captionAssetId)
self.client.queueServiceActionCall("caption_captionasset", "get", "KalturaCaptionAsset", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionAsset')
def getRemotePaths(self, id):
"""Get remote storage existing paths for the asset"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
self.client.queueServiceActionCall("caption_captionasset", "getRemotePaths", "KalturaRemotePathListResponse", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaRemotePathListResponse')
def getUrl(self, id, storageId = NotImplemented):
"""Get download URL for the asset"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
kparams.addIntIfDefined("storageId", storageId);
self.client.queueServiceActionCall("caption_captionasset", "getUrl", "None", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return getXmlNodeText(resultNode)
def list(self, filter = NotImplemented, pager = NotImplemented):
"""List caption Assets by filter and pager"""
kparams = KalturaParams()
kparams.addObjectIfDefined("filter", filter)
kparams.addObjectIfDefined("pager", pager)
self.client.queueServiceActionCall("caption_captionasset", "list", "KalturaCaptionAssetListResponse", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionAssetListResponse')
def serve(self, captionAssetId):
"""Serves caption by its id"""
kparams = KalturaParams()
kparams.addStringIfDefined("captionAssetId", captionAssetId)
self.client.queueServiceActionCall('caption_captionasset', 'serve', None ,kparams)
return self.client.getServeUrl()
def serveAsJson(self, captionAssetId):
"""Serves caption file as Json by its ID"""
kparams = KalturaParams()
kparams.addStringIfDefined("captionAssetId", captionAssetId)
self.client.queueServiceActionCall('caption_captionasset', 'serveAsJson', None ,kparams)
return self.client.getServeUrl()
def serveByEntryId(self, entryId, captionParamId = NotImplemented):
"""Serves caption by entry id and thumnail params id"""
kparams = KalturaParams()
kparams.addStringIfDefined("entryId", entryId)
kparams.addIntIfDefined("captionParamId", captionParamId);
self.client.queueServiceActionCall('caption_captionasset', 'serveByEntryId', None ,kparams)
return self.client.getServeUrl()
def serveWebVTT(self, captionAssetId, segmentDuration = 30, segmentIndex = NotImplemented, localTimestamp = 10000):
"""Serves caption by its id converting it to segmented WebVTT"""
kparams = KalturaParams()
kparams.addStringIfDefined("captionAssetId", captionAssetId)
kparams.addIntIfDefined("segmentDuration", segmentDuration);
kparams.addIntIfDefined("segmentIndex", segmentIndex);
kparams.addIntIfDefined("localTimestamp", localTimestamp);
self.client.queueServiceActionCall('caption_captionasset', 'serveWebVTT', None ,kparams)
return self.client.getServeUrl()
def setAsDefault(self, captionAssetId):
"""Markss the caption as default and removes that mark from all other caption assets of the entry."""
kparams = KalturaParams()
kparams.addStringIfDefined("captionAssetId", captionAssetId)
self.client.queueServiceActionCall("caption_captionasset", "setAsDefault", "None", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
def setContent(self, id, contentResource):
"""Update content of caption asset"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
kparams.addObjectIfDefined("contentResource", contentResource)
self.client.queueServiceActionCall("caption_captionasset", "setContent", "KalturaCaptionAsset", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionAsset')
def update(self, id, captionAsset):
"""Update caption asset"""
kparams = KalturaParams()
kparams.addStringIfDefined("id", id)
kparams.addObjectIfDefined("captionAsset", captionAsset)
self.client.queueServiceActionCall("caption_captionasset", "update", "KalturaCaptionAsset", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionAsset')
# @package Kaltura
# @subpackage Client
class KalturaCaptionParamsService(KalturaServiceBase):
"""Add & Manage Caption Params"""
def __init__(self, client = None):
KalturaServiceBase.__init__(self, client)
def add(self, captionParams):
"""Add new Caption Params"""
kparams = KalturaParams()
kparams.addObjectIfDefined("captionParams", captionParams)
self.client.queueServiceActionCall("caption_captionparams", "add", "KalturaCaptionParams", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionParams')
def delete(self, id):
"""Delete Caption Params by ID"""
kparams = KalturaParams()
kparams.addIntIfDefined("id", id);
self.client.queueServiceActionCall("caption_captionparams", "delete", "None", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
def get(self, id):
"""Get Caption Params by ID"""
kparams = KalturaParams()
kparams.addIntIfDefined("id", id);
self.client.queueServiceActionCall("caption_captionparams", "get", "KalturaCaptionParams", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionParams')
def list(self, filter = NotImplemented, pager = NotImplemented):
"""List Caption Params by filter with paging support (By default - all system default params will be listed too)"""
kparams = KalturaParams()
kparams.addObjectIfDefined("filter", filter)
kparams.addObjectIfDefined("pager", pager)
self.client.queueServiceActionCall("caption_captionparams", "list", "KalturaCaptionParamsListResponse", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionParamsListResponse')
def update(self, id, captionParams):
"""Update Caption Params by ID"""
kparams = KalturaParams()
kparams.addIntIfDefined("id", id);
kparams.addObjectIfDefined("captionParams", captionParams)
self.client.queueServiceActionCall("caption_captionparams", "update", "KalturaCaptionParams", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaCaptionParams')
########## main ##########
class KalturaCaptionClientPlugin(KalturaClientPlugin):
# KalturaCaptionClientPlugin
instance = None
# @return KalturaCaptionClientPlugin
@staticmethod
def get():
if KalturaCaptionClientPlugin.instance == None:
KalturaCaptionClientPlugin.instance = KalturaCaptionClientPlugin()
return KalturaCaptionClientPlugin.instance
# @return array<KalturaServiceBase>
def getServices(self):
return {
'captionAsset': KalturaCaptionAssetService,
'captionParams': KalturaCaptionParamsService,
}
def getEnums(self):
return {
'KalturaCaptionAssetStatus': KalturaCaptionAssetStatus,
'KalturaCaptionAssetOrderBy': KalturaCaptionAssetOrderBy,
'KalturaCaptionParamsOrderBy': KalturaCaptionParamsOrderBy,
'KalturaCaptionSource': KalturaCaptionSource,
'KalturaCaptionType': KalturaCaptionType,
}
def getTypes(self):
return {
'KalturaCaptionAsset': KalturaCaptionAsset,
'KalturaCaptionParams': KalturaCaptionParams,
'KalturaCaptionPlaybackPluginData': KalturaCaptionPlaybackPluginData,
'KalturaCaptionAssetListResponse': KalturaCaptionAssetListResponse,
'KalturaCaptionParamsListResponse': KalturaCaptionParamsListResponse,
'KalturaConvertCaptionAssetJobData': KalturaConvertCaptionAssetJobData,
'KalturaCopyCaptionsJobData': KalturaCopyCaptionsJobData,
'KalturaParseMultiLanguageCaptionAssetJobData': KalturaParseMultiLanguageCaptionAssetJobData,
'KalturaCaptionAssetBaseFilter': KalturaCaptionAssetBaseFilter,
'KalturaCaptionParamsBaseFilter': KalturaCaptionParamsBaseFilter,
'KalturaCaptionAssetFilter': KalturaCaptionAssetFilter,
'KalturaCaptionParamsFilter': KalturaCaptionParamsFilter,
}
# @return string
def getName(self):
return 'caption' | PypiClean |
/CephQeSdk-1.0.0.tar.gz/CephQeSdk-1.0.0/src/RhcsQeSdk/core/cli/ceph/ceph.py | import logging
import RhcsQeSdk.core.cli.fabfile as fabfile
from RhcsQeSdk.core.cli.ceph.auth import Auth
from RhcsQeSdk.core.cli.ceph.balancer import Balancer
from RhcsQeSdk.core.cli.ceph.cephadm import CephAdm
from RhcsQeSdk.core.cli.ceph.config import Config
from RhcsQeSdk.core.cli.ceph.config_key import ConfigKey
from RhcsQeSdk.core.cli.ceph.health import Health
from RhcsQeSdk.core.cli.ceph.mds import Mds
from RhcsQeSdk.core.cli.ceph.mgr import Mgr
from RhcsQeSdk.core.cli.ceph.mon import Mon
from RhcsQeSdk.core.cli.ceph.orch.orch import Orch
from RhcsQeSdk.core.cli.ceph.osd import Osd
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(message)s"
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
class Ceph:
"""This module provides CLI interface for deployment and maintenance of ceph cluster."""
def __init__(self, base_cmd=""):
self.base_cmd = f"{base_cmd}ceph"
self.config = Config(self.base_cmd)
self.osd = Osd(self.base_cmd)
self.auth = Auth(self.base_cmd)
self.cephadm = CephAdm(self.base_cmd)
self.orch = Orch(self.base_cmd)
self.health = Health(self.base_cmd)
self.balancer = Balancer(self.base_cmd)
self.configkey = ConfigKey(self.base_cmd)
self.mds = Mds(self.base_cmd)
self.mon = Mon(self.base_cmd)
self.mgr = Mgr(self.base_cmd)
def version(self, **kw):
"""
The command version will display the mon daemon version.
Args:
kw(Dict): Key/value pairs that needs to be provided to the installer.
Example::
Supported Keys:
None
Returns:
Dict(str)
A mapping of host strings to the given task's return value for that host's execution run.
"""
kw = kw.get("kw")
cmd = self.base_cmd + " version"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def status(self, **kw):
"""
This method is used to show cluster status.
Args:
None
Returns:
Dict(str)
A mapping of host strings to the given task's return value for that host's execution run.
"""
kw = kw.get("kw")
cmd = self.base_cmd + " status"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def quorum_status(self, **kw):
"""
This method is used to report the status of monitor quorum.
Args:
None
Returns:
Dict(str)
A mapping of host strings to the given task's return value for that host's execution run.
"""
kw = kw.get("kw")
cmd = self.base_cmd + " quorum_status"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def daemon(self, **kw):
"""Submit admin-socket commands.
Args:
kw(dict): Key/value pairs that needs to be provided to the installer
Example::
Supported keys:
daemon-name(str): takes name of the daemon
path-to-socket-file(str): takes path to the socket file
command(str): takes input the command
Returns:
Dict(str)
A mapping of host strings to the given task’s return value for that host’s execution run
"""
kw = kw.get("kw")
daemon_name = kw.get("daemon-name", "")
path_to_socket_file = kw.get("path-to-socket-file", "")
command = kw.get("command", "")
cmd = self.base_cmd + f" daemon {daemon_name}{path_to_socket_file} {command}"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def log(self, **kw):
"""Log supplied text to the monitor log.
Args:
kw(dict): Key/value pairs that needs to be provided to the installer
Example::
Supported keys:
logtext(str): takes the logtext
command(str): takes input the command
Returns:
Dict(str)
A mapping of host strings to the given task’s return value for that host’s execution run
"""
kw = kw.get("kw")
logtext = kw.get("logtext", "")
command = kw.get("command", "")
cmd = self.base_cmd + f" log {command} [{logtext}]"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def df(self, **kw):
"""
The command df will display the cluster's free space status.
Args:
detail(str): to show more information about the cluster.
Returns:
Dict(str)
A mapping of host strings to the given task's return value for that host's execution run.
"""
kw = kw.get("kw")
cmd = self.base_cmd + " df" + (" detail" if kw.get("detail") else "")
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config")) | PypiClean |
/parts/Top/Hats/Turban.py | def Turban(color):
return (
'<mask id="mask0_0_775" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="74" y="98" width="118" height="99">'
' <path fill-rule="evenodd" clip-rule="evenodd"'
' d="M133.498 165.841C122.124 166.219 117.05 171.721 113.229 166.13C110.361 161.932 111.561 154.874 114.241 150.903C118.054 145.251 123.227 147.985 129.01 147.34C130.583 147.165 132.163 146.723 133.498 146C134.834 146.723 136.414 147.165 137.986 147.34C143.77 147.985 148.943 145.251 152.756 150.903C155.435 154.874 156.635 161.932 153.767 166.13C149.946 171.721 144.873 165.463 133.498 165.841ZM188.72 98C185.336 112.075 183.781 126.434 181.328 140.671C180.816 143.639 180.257 146.596 179.662 149.55C179.538 150.17 179.415 152.473 178.811 152.764C176.982 153.648 173.254 148.947 172.257 147.885C169.754 145.219 167.272 142.529 164.223 140.437C158.063 136.21 150.85 133.711 143.345 133.118C140.205 132.869 135.959 133.303 133 135.11C130.041 133.303 125.795 132.869 122.654 133.118C115.149 133.711 107.937 136.21 101.777 140.437C98.7278 142.529 96.2462 145.219 93.7425 147.885C92.7457 148.947 89.0182 153.648 87.1891 152.764C86.5853 152.473 86.4623 150.17 86.3375 149.55C85.7432 146.596 85.1835 143.639 84.6722 140.671C82.219 126.434 80.6643 112.075 77.2805 98C76.2959 98 75.4321 116.748 75.3223 118.495C74.8751 125.589 74.353 132.525 75.0202 139.626C76.1705 151.875 77.3696 167.234 86.5918 176.588C94.9247 185.039 107.023 186.806 117.459 192.141C118.802 192.828 120.584 193.676 122.506 194.371C124.531 195.934 128.546 197 133.172 197C138.024 197 142.205 195.827 144.12 194.138C145.801 193.493 147.345 192.753 148.541 192.141C158.976 186.805 171.075 185.039 179.408 176.588C188.63 167.234 189.829 151.875 190.98 139.626C191.647 132.525 191.125 125.589 190.678 118.495C190.568 116.748 189.704 98 188.72 98Z"'
' fill="white" />'
'</mask>'
'<path fill-rule="evenodd" clip-rule="evenodd"'
' d="M190.47 97.5C191.471 95.0906 192 92.5798 192 90C192 71.7746 165.585 57 133 57C100.415 57 74 71.7746 74 90C74 92.5798 74.5293 95.0906 75.5304 97.5C81.6019 82.8879 105.028 72 133 72C160.972 72 184.398 82.8879 190.47 97.5Z"'
' fill="#EDECE3" />'
'<path fill-rule="evenodd" clip-rule="evenodd"'
' d="M49.0002 94.3235C48.9335 133.499 78.0002 141 78.0002 141C72.5578 91.4478 101.536 75.8486 124.529 63.4715C127.469 61.8887 130.312 60.3587 132.971 58.8171C135.641 60.3664 138.497 61.904 141.452 63.4952C164.429 75.8686 193.418 91.4794 188 141C188 141 217.066 132.54 217 94.3235C216.918 47.1483 164.851 3 135 3C134.326 3 133.656 3.02963 132.992 3.08807C132.333 3.02963 131.668 3 131 3C101.074 3 49.0804 47.1483 49.0002 94.3235Z"'
' fill="#124C74" />'
'<mask id="mask1_0_775" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="49" y="3" width="168" height="138">'
' <path fill-rule="evenodd" clip-rule="evenodd"'
' d="M49.0002 94.3235C48.9335 133.499 78.0002 141 78.0002 141C72.5578 91.4478 101.536 75.8486 124.529 63.4715C127.469 61.8887 130.312 60.3587 132.971 58.8171C135.641 60.3664 138.497 61.904 141.452 63.4952C164.429 75.8686 193.418 91.4794 188 141C188 141 217.066 132.54 217 94.3235C216.918 47.1483 164.851 3 135 3C134.326 3 133.656 3.02963 132.992 3.08807C132.333 3.02963 131.668 3 131 3C101.074 3 49.0804 47.1483 49.0002 94.3235Z"'
' fill="white" />'
'</mask>'
'<g mask="url(#mask1_0_775)">'
' <rect x="1" width="264" height="280" fill="{color}" />'
'</g>'
'<path fill-rule="evenodd" clip-rule="evenodd"'
' d="M49.0134 95.8992C49.7161 133.701 78.0002 141 78.0002 141C78.0002 141 48.9335 133.934 49.0002 97.0294C49.0008 96.6525 49.0052 96.2757 49.0134 95.8992ZM77.3339 129.68C77.4832 91.8227 103.508 78.6258 124.529 67.9659C135.534 62.3853 145.168 57.5 149 50.1358C153.126 42.8892 154.39 36.1953 153.646 30.4681C153.141 34.8352 151.67 39.5668 149 44.5441C145.168 52.3615 135.534 57.5475 124.529 63.4715C103.387 74.8525 77.1834 88.9578 77.3339 129.68Z"'
' fill="black" fill-opacity="0.16" />'
).format(color=color) | PypiClean |
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/data/util/default_item_sorter.py | from muntjac.data.util.item_sorter import IItemSorter
class DefaultItemSorter(IItemSorter):
"""Provides a default implementation of an IItemSorter. The
C{DefaultItemSorter} adheres to the L{ISortable.sort} rules and sorts
the container according to the properties given using L{setSortProperties}.
A Comparator is used for comparing the individual C{Property}
values. The comparator can be set using the constructor. If no comparator
is provided a default comparator is used.
"""
def __init__(self, propertyValueComparator=None):
"""Constructs a DefaultItemSorter which uses the C{Comparator}
indicated by the C{propertyValueComparator} parameter for
comparing C{Property} values. Uses the default C{Comparator}
for comparing C{Property} values if propertyValueComparator is None.
@param propertyValueComparator:
The comparator to use when comparing individual
C{Property} values
"""
self._sortPropertyIds = None
self._sortDirections = None
self._container = None
self._propertyValueComparator = None
if propertyValueComparator is None:
DefaultItemSorter.__init__(self, DefaultPropertyValueComparator())
else:
self._propertyValueComparator = propertyValueComparator
def __call__(self, o1, o2):
return self.compare(o1, o2)
def compare(self, o1, o2):
item1 = self._container.getItem(o1)
item2 = self._container.getItem(o2)
# Items can be null if the container is filtered. Null is considered
# "less" than not-null.
if item1 is None:
if item2 is None:
return 0
else:
return 1
elif item2 is None:
return -1
for i in range(len(self._sortPropertyIds)):
result = self.compareProperty(self._sortPropertyIds[i],
self._sortDirections[i], item1, item2)
# If order can be decided
if result != 0:
return result
return 0
def compareProperty(self, propertyId, sortDirection, item1, item2):
"""Compares the property indicated by C{propertyId} in the items
indicated by C{item1} and C{item2} for order. Returns a negative
integer, zero, or a positive integer as the property value in
the first item is less than, equal to, or greater than the property
value in the second item. If the C{sortDirection} is false the
returned value is negated.
The comparator set for this C{DefaultItemSorter} is used for
comparing the two property values.
@param propertyId:
The property id for the property that is used for comparison.
@param sortDirection:
The direction of the sort. A false value negates the result.
@param item1:
The first item to compare.
@param item2:
The second item to compare.
@return: a negative, zero, or positive integer if the property value in
the first item is less than, equal to, or greater than the
property value in the second item. Negated if
C{sortDirection} is false.
"""
property1 = item1.getItemProperty(propertyId)
property2 = item2.getItemProperty(propertyId)
# Get the values to compare
value1 = None if property1 is None else property1.getValue()
value2 = None if property2 is None else property2.getValue()
# Result of the comparison
r = 0
if sortDirection:
r = self._propertyValueComparator.compare(value1, value2)
else:
r = self._propertyValueComparator.compare(value2, value1)
return r
def setSortProperties(self, container, propertyId, ascending):
self._container = container
# Removes any non-sortable property ids
ids = list()
orders = list()
sortable = container.getSortableContainerPropertyIds()
for i in range(len(propertyId)):
if propertyId[i] in sortable:
ids.append(propertyId[i])
order = bool(ascending[i]) if i < len(ascending) else True
orders.append(order)
self._sortPropertyIds = list(ids)
self._sortDirections = [None] * len(orders)
for i in range(len(self._sortDirections)):
self._sortDirections[i] = bool( orders[i] )
class DefaultPropertyValueComparator(object):
"""Provides a default comparator used for comparing L{Property} values.
The C{DefaultPropertyValueComparator} assumes all objects it compares
can be cast to Comparable.
"""
def __call__(self, o1, o2):
return self.compare(o1, o1)
def compare(self, o1, o2):
r = 0
# Normal non-null comparison
if o1 is not None and o2 is not None:
# Assume the objects to be comparable
r = cmp(o1, o2)
elif o1 == o2:
# Objects are equal if both are null
r = 0
elif o1 is None:
# null is less than non-null
r = -1
else:
# non-null is greater than null
r = 1
return r | PypiClean |
/Aitomatic-Contrib-23.8.10.3.tar.gz/Aitomatic-Contrib-23.8.10.3/src/aito/iot_mgmt/data/models.py | import warnings
from django.db.models import (
Model,
CharField, DateField, FloatField, IntegerField,
JSONField,
ForeignKey, ManyToManyField,
PROTECT)
from django.db.models.signals import m2m_changed, pre_delete
from aito.iot_mgmt.utils import MAX_CHAR_LEN, clean_lower_str, clean_upper_str # noqa: E501
# pylint: disable=line-too-long
class LogicalDataType(Model):
"""Logical Data Type."""
name = \
CharField(
verbose_name='Logical Data Type',
blank=False,
null=False,
unique=True,
db_index=True,
max_length=MAX_CHAR_LEN)
class Meta:
"""Metadata."""
verbose_name = 'Logical Data Type'
verbose_name_plural = 'Logical Data Types'
ordering = ('name',)
def __str__(self):
"""Return string repr."""
return f'LogicalDataTp {self.name.upper()}'
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
super().save(*args, **kwargs)
class NumericMeasurementUnit(Model):
"""Numeric Measurement Unit."""
name = \
CharField(
verbose_name='Numeric Measurement Unit',
blank=False,
null=False,
unique=True,
db_index=True,
max_length=MAX_CHAR_LEN)
class Meta:
"""Metadata."""
verbose_name = 'Numeric Measurement Unit'
verbose_name_plural = 'Numeric Measurement Units'
ordering = ('name',)
def __str__(self):
"""Return string repr."""
return f'NumMeasureUnit "{self.name}"'
def save(self, *args, **kwargs):
"""Save."""
self.name = self.name.strip()
super().save(*args, **kwargs)
class EquipmentDataFieldType(Model):
"""Equipment Data Field Type."""
name = \
CharField(
verbose_name='Equipment Data Field Type',
blank=False,
null=False,
unique=True,
db_index=True,
max_length=MAX_CHAR_LEN)
class Meta:
"""Metadata."""
verbose_name = 'Equipment Data Field Type'
verbose_name_plural = 'Equipment Data Field Types'
ordering = ('name',)
def __str__(self):
"""Return string repr."""
return f'EqDataFldTp {self.name.upper()}'
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
super().save(*args, **kwargs)
class EquipmentGeneralType(Model):
"""Equipment General Type."""
name = \
CharField(
verbose_name='Equipment General Type',
blank=False,
null=False,
unique=True,
db_index=True,
max_length=MAX_CHAR_LEN)
class Meta:
"""Metadata."""
verbose_name = 'Equipment General Type'
verbose_name_plural = 'Equipment General Types'
ordering = ('name',)
def __str__(self):
"""Return string repr."""
return f'EqGenTp {self.name.upper()}'
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
super().save(*args, **kwargs)
class EquipmentDataField(Model):
"""Equipment Data Field."""
RELATED_NAME = 'equipment_data_fields'
RELATED_QUERY_NAME = 'equipment_data_field'
DEFAULT_UPPER_NUMERIC_NULL = 2 ** 30 # << MaxInt = 2 ** 31 - 1
DEFAULT_LOWER_NUMERIC_NULL = -DEFAULT_UPPER_NUMERIC_NULL
equipment_general_type = \
ForeignKey(
to=EquipmentGeneralType,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=False,
null=False,
on_delete=PROTECT)
name = \
CharField(
verbose_name='Equipment Data Field',
blank=False,
null=False,
db_index=True,
max_length=MAX_CHAR_LEN)
equipment_data_field_type = \
ForeignKey(
to=EquipmentDataFieldType,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=False,
null=False,
on_delete=PROTECT)
logical_data_type = \
ForeignKey(
to=LogicalDataType,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=True,
null=True,
on_delete=PROTECT)
numeric_measurement_unit = \
ForeignKey(
to=NumericMeasurementUnit,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=True,
null=True,
on_delete=PROTECT)
lower_numeric_null = \
FloatField(
blank=False,
null=False,
default=DEFAULT_LOWER_NUMERIC_NULL)
upper_numeric_null = \
FloatField(
blank=False,
null=False,
default=DEFAULT_UPPER_NUMERIC_NULL)
min_val = \
FloatField(
blank=True,
null=True)
max_val = \
FloatField(
blank=True,
null=True)
equipment_unique_types = \
ManyToManyField(
to='EquipmentUniqueType',
related_name=RELATED_NAME + '_reverse',
related_query_name=RELATED_QUERY_NAME,
blank=True)
class Meta:
"""Metadata."""
verbose_name = 'Equipment Data Field'
verbose_name_plural = 'Equipment Data Fields'
unique_together = 'equipment_general_type', 'name'
ordering = 'equipment_general_type', 'name'
def __str__(self):
"""Return string repr."""
return ((f'{self.equipment_general_type.name.upper()} '
f'[{self.equipment_data_field_type.name}] '
f'{self.name} [') +
(self.logical_data_type.name
if self.logical_data_type
else 'UNTYPED') +
(f', unit {self.numeric_measurement_unit.name.upper()}'
if self.numeric_measurement_unit and self.numeric_measurement_unit.name # noqa: E501
else '') +
f', nulls ({self.lower_numeric_null}, {self.upper_numeric_null})' + # noqa: E501
(''
if self.min_val is None
else f', min {self.min_val}') +
(''
if self.max_val is None
else f', max {self.max_val}') +
']')
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
super().save(*args, **kwargs)
class EquipmentUniqueTypeGroup(Model):
"""Equipment Unique Type Group."""
RELATED_NAME = 'equipment_unique_type_groups'
RELATED_QUERY_NAME = 'equipment_unique_type_group'
equipment_general_type = \
ForeignKey(
to=EquipmentGeneralType,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=False,
null=False,
on_delete=PROTECT)
name = \
CharField(
verbose_name='Equipment Unique Type Group',
blank=False,
null=False,
unique=True,
db_index=True,
max_length=MAX_CHAR_LEN)
equipment_unique_types = \
ManyToManyField(
to='EquipmentUniqueType',
related_name=RELATED_NAME + '_reverse',
related_query_name=RELATED_QUERY_NAME,
blank=True)
equipment_data_fields = \
ManyToManyField(
to=EquipmentDataField,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=True)
class Meta:
"""Metadata."""
verbose_name = 'Equipment Unique Type Group'
verbose_name_plural = 'Equipment Unique Type Groups'
ordering = 'equipment_general_type', 'name'
def __str__(self):
"""Return string repr."""
return (f'{self.equipment_general_type.name.upper()} '
f'UnqTpGrp {self.name.upper()}')
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
super().save(*args, **kwargs)
class EquipmentUniqueType(Model):
"""Equipment Unique Type."""
RELATED_NAME = 'equipment_unique_types'
RELATED_QUERY_NAME = 'equipment_unique_type'
equipment_general_type = \
ForeignKey(
to=EquipmentGeneralType,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=False,
null=False,
on_delete=PROTECT)
name = \
CharField(
verbose_name='Equipment Unique Type',
blank=False,
null=False,
unique=True,
db_index=True,
max_length=MAX_CHAR_LEN)
equipment_data_fields = \
ManyToManyField(
to=EquipmentDataField,
through=EquipmentDataField.equipment_unique_types.through,
related_name=RELATED_NAME + '_reverse',
related_query_name=RELATED_QUERY_NAME,
blank=True)
equipment_unique_type_groups = \
ManyToManyField(
to=EquipmentUniqueTypeGroup,
through=EquipmentUniqueTypeGroup.equipment_unique_types.through,
related_name=RELATED_NAME + '_reverse',
related_query_name=RELATED_QUERY_NAME,
blank=True)
class Meta:
"""Metadata."""
verbose_name = 'Equipment Unique Type'
verbose_name_plural = 'Equipment Unique Types'
ordering = 'equipment_general_type', 'name'
def __str__(self):
"""Return string repr."""
return (f'{self.equipment_general_type.name.upper()} '
f'UnqTp {self.name.upper()}')
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
super().save(*args, **kwargs)
def equipment_unique_types_equipment_data_fields_m2m_changed(
sender, instance, action, reverse, model, pk_set, using,
*args, **kwargs):
"""M2M-changed signal."""
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals
# pylint: disable=unused-argument
if action == 'pre_add':
invalid_objs = \
model.objects \
.filter(pk__in=pk_set) \
.exclude(equipment_general_type=instance.equipment_general_type)
if invalid_objs:
warnings.warn(
message=(f'*** {instance}: CANNOT ADD INVALID {invalid_objs} '
'WITH DIFFERENT EQUIPMENT GENERAL TYPE(S) ***'))
pk_set.difference_update(
i['pk']
for i in invalid_objs.values('pk'))
elif action in ('post_add', 'post_remove') and pk_set:
if (model is EquipmentDataField) and \
instance.equipment_unique_type_groups.count():
equipment_unique_type_groups_to_update = \
instance.equipment_unique_type_groups.all()
print(
f'{instance}: Changed Equipment Data Fields: {action.upper()}:'
f' Updating Equipment Data Fields of {equipment_unique_type_groups_to_update}...' # noqa: E501
)
for equipment_unique_type_group_to_update in \
equipment_unique_type_groups_to_update:
equipment_unique_type_group_to_update.equipment_data_fields.set( # noqa: E501
equipment_unique_type_group_to_update.equipment_unique_types.all()[0].equipment_data_fields.all().union( # noqa: E501
*(equipment_unique_type.equipment_data_fields.all()
for equipment_unique_type in
equipment_unique_type_group_to_update.equipment_unique_types.all()[1:]), # noqa: E501
all=False),
clear=False)
elif model is EquipmentUniqueType:
changed_equipment_unique_types = \
model.objects.filter(pk__in=pk_set)
equipment_unique_type_groups_to_update = \
changed_equipment_unique_types[0].equipment_unique_type_groups.all().union( # noqa: E501
*(equipment_unique_type.equipment_unique_type_groups.all()
for equipment_unique_type in changed_equipment_unique_types[1:]), # noqa: E501
all=False)
if equipment_unique_type_groups_to_update:
print(
f'{instance}: Changed Equipment Unique Types: '
f'{action.upper()}: Updating Equipment Data Fields of '
f'{equipment_unique_type_groups_to_update} Related to '
f'Added/Removed {changed_equipment_unique_types}...')
for equipment_unique_type_group_to_update in \
equipment_unique_type_groups_to_update:
equipment_unique_type_group_to_update.equipment_data_fields.set( # noqa: E501
equipment_unique_type_group_to_update.equipment_unique_types.all()[0].equipment_data_fields.all().union( # noqa: E501
*(equipment_unique_type.equipment_data_fields.all()
for equipment_unique_type in
equipment_unique_type_group_to_update.equipment_unique_types.all()[1:]), # noqa: E501
all=False),
clear=False)
elif action == 'pre_clear':
if (model is EquipmentDataField) and \
instance.equipment_unique_type_groups.count():
equipment_unique_type_groups_to_update = \
instance.equipment_unique_type_groups.all()
print(
f'*** {instance}: CLEARING Equipment Data Fields: '
f'{action.upper()}: Updating Equipment Data Fields of '
f'{equipment_unique_type_groups_to_update}... ***')
for equipment_unique_type_group_to_update in \
equipment_unique_type_groups_to_update:
remaining_equipment_unique_types = (
equipment_unique_type_group_to_update
.equipment_unique_types.exclude(pk=instance.pk))
if remaining_equipment_unique_types.count():
equipment_unique_type_group_to_update.equipment_data_fields.set( # noqa: E501
remaining_equipment_unique_types[0].equipment_data_fields.all().union( # noqa: E501
*(remaining_equipment_unique_type.equipment_data_fields.all() # noqa: E501
for remaining_equipment_unique_type in
remaining_equipment_unique_types[1:]),
all=False),
clear=False)
else:
print(
f'*** {instance}: CLEARING Equipment Data Fields: '
f'{action.upper()}: CLEARING Equipment Data Fields '
f'of {equipment_unique_type_groups_to_update}... ***')
equipment_unique_type_group_to_update.equipment_data_fields.clear() # noqa: E501
elif (model is EquipmentUniqueType) and \
instance.equipment_unique_types.count():
equipment_unique_types_to_clear = \
instance.equipment_unique_types.all()
equipment_unique_type_groups_to_update = \
equipment_unique_types_to_clear[0].equipment_unique_type_groups.all().union( # noqa: E501
*(equipment_unique_type_to_clear.equipment_unique_type_groups.all() # noqa: E501
for equipment_unique_type_to_clear in
equipment_unique_types_to_clear[1:]),
all=False)
if equipment_unique_type_groups_to_update:
print(
f'*** {instance}: CLEARING Equipment Unique Types: '
f'{action.upper()}: Updating Equipment Data Fields of '
f'{equipment_unique_type_groups_to_update} Related to '
f'{equipment_unique_types_to_clear} to Clear...')
for equipment_unique_type_group_to_update in \
equipment_unique_type_groups_to_update:
first_equipment_unique_type = (
equipment_unique_type_group_to_update
.equipment_unique_types.all()[0])
equipment_unique_type_group_to_update.equipment_data_fields.set( # noqa: E501
(first_equipment_unique_type.equipment_data_fields.exclude(pk=instance.pk) # noqa: E501
if first_equipment_unique_type in equipment_unique_types_to_clear # noqa: E501
else first_equipment_unique_type.equipment_data_fields.all()).union( # noqa: E501
*((equipment_unique_type_group_equipment_unique_type.equipment_data_fields.exclude(pk=instance.pk) # noqa: E501
if equipment_unique_type_group_equipment_unique_type in equipment_unique_types_to_clear # noqa: E501
else equipment_unique_type_group_equipment_unique_type.equipment_data_fields.all()) # noqa: E501
for equipment_unique_type_group_equipment_unique_type in # noqa: E501
equipment_unique_type_group_to_update.equipment_unique_types.all()[1:]), # noqa: E501
all=False),
clear=False)
m2m_changed.connect(
receiver=equipment_unique_types_equipment_data_fields_m2m_changed,
sender=EquipmentUniqueType.equipment_data_fields.through,
weak=True,
dispatch_uid=None,
apps=None)
def equipment_unique_type_groups_equipment_unique_types_m2m_changed(
sender, instance, action, reverse, model, pk_set, using,
*args, **kwargs):
"""M2M-changed signal."""
# pylint: disable=too-many-arguments,too-many-branches,unused-argument
if action == 'pre_add':
invalid_objs = (
model.objects
.filter(pk__in=pk_set)
.exclude(equipment_general_type=instance.equipment_general_type))
if invalid_objs:
warnings.warn(
message=(f'*** {instance}: CANNOT ADD INVALID {invalid_objs} '
'WITH DIFFERENT EQUIPMENT GENERAL TYPE(S) ***'))
pk_set.difference_update(
i['pk']
for i in invalid_objs.values('pk'))
elif action in ('post_add', 'post_remove') and pk_set:
if model is EquipmentUniqueType:
if instance.equipment_unique_types.count():
print(f'{instance}: Changed Equipment Unique Types: '
f'{action.upper()}: Updating Data Fields...')
instance.equipment_data_fields.set(
instance.equipment_unique_types.all()[0].equipment_data_fields.all().union( # noqa: E501
*(equipment_unique_type.equipment_data_fields.all()
for equipment_unique_type in
instance.equipment_unique_types.all()[1:]),
all=False),
clear=False)
else:
print(f'*** {instance}: REMOVED Equipment Unique Types: '
f'{action.upper()}: CLEARING Data Fields... ***')
instance.equipment_data_fields.clear()
elif model is EquipmentUniqueTypeGroup:
equipment_unique_type_groups_to_update = \
model.objects.filter(pk__in=pk_set)
print(f'{instance}: Changed Equipment Unique Type Groups: '
f'{action.upper()}: Updating Data Fields of Added/Removed '
f'{equipment_unique_type_groups_to_update}...')
for equipment_unique_type_group_to_update in \
equipment_unique_type_groups_to_update:
if equipment_unique_type_group_to_update.equipment_unique_types.count(): # noqa: E501
equipment_unique_type_group_to_update.equipment_data_fields.set( # noqa: E501
equipment_unique_type_group_to_update.equipment_unique_types.all()[0].equipment_data_fields.all().union( # noqa: E501
*(equipment_unique_type.equipment_data_fields.all()
for equipment_unique_type in
equipment_unique_type_group_to_update.equipment_unique_types.all()[1:]), # noqa: E501
all=False),
clear=False)
else:
print(f'*** {equipment_unique_type_group_to_update}: '
f'REMOVED Equipment Unique Types: {action.upper()}: '
'CLEARING Data Fields... ***')
equipment_unique_type_group_to_update.equipment_data_fields.clear() # noqa: E501
elif action == 'pre_clear':
if model is EquipmentUniqueType:
print(f'*** {instance}: CLEARING Equipment Unique Types: '
f'{action.upper()}: CLEARING Data Fields... ***')
instance.equipment_data_fields.clear()
elif (model is EquipmentUniqueTypeGroup) and \
instance.equipment_unique_type_groups.count():
equipment_unique_type_groups_to_update = \
instance.equipment_unique_type_groups.all()
print(f'{instance}: CLEARING Equipment Unique Type Groups: '
f'{action.upper()}: Updating Data Fields of '
f'{equipment_unique_type_groups_to_update} to Clear...')
for equipment_unique_type_group_to_update in \
equipment_unique_type_groups_to_update:
remaining_equipment_unique_types = (
equipment_unique_type_group_to_update
.equipment_unique_types.exclude(pk=instance.pk))
if remaining_equipment_unique_types.count():
equipment_unique_type_group_to_update.equipment_data_fields.set( # noqa: E501
remaining_equipment_unique_types.all()[0].equipment_data_fields.all().union( # noqa: E501
*(equipment_unique_type.equipment_data_fields.all()
for equipment_unique_type in
remaining_equipment_unique_types[1:]),
all=False),
clear=False)
else:
print(f'*** {equipment_unique_type_group_to_update}: '
f'REMOVING Equipment Unique Types: {action.upper()}:'
f' CLEARING Data Fields... ***')
equipment_unique_type_group_to_update.equipment_data_fields.clear() # noqa: E501
m2m_changed.connect(
receiver=equipment_unique_type_groups_equipment_unique_types_m2m_changed,
sender=EquipmentUniqueTypeGroup.equipment_unique_types.through,
weak=True,
dispatch_uid=None,
apps=None)
def equipment_unique_type_pre_delete(sender, instance, using, *args, **kwargs):
"""Pre-Delete signal."""
# pylint: disable=unused-argument
if instance.equipment_unique_type_groups.count():
equipment_unique_type_groups_to_update = \
instance.equipment_unique_type_groups.all()
print(f'*** DELETING {instance}: '
'Updating Data Streams of '
f'{equipment_unique_type_groups_to_update}... ***' # noqa: E501
)
for equipment_unique_type_group_to_update in \
equipment_unique_type_groups_to_update:
remaining_equipment_unique_types = (
equipment_unique_type_groups_to_update.equipment_unique_types
.exclude(pk=instance.pk))
if remaining_equipment_unique_types.count():
equipment_unique_type_group_to_update.equipment_data_fields.set( # noqa: E501
remaining_equipment_unique_types.all()[0].equipment_data_fields.all().union( # noqa: E501
*(equipment_unique_type.equipment_data_fields.all()
for equipment_unique_type in
remaining_equipment_unique_types[1:]),
all=False),
clear=False)
else:
print(f'*** DELETING {instance}: '
f'CLEARING Data Streams of {equipment_unique_type_group_to_update}... ***' # noqa: E501
)
equipment_unique_type_group_to_update.equipment_data_fields.clear() # noqa: E501
pre_delete.connect(
receiver=equipment_unique_type_pre_delete,
sender=EquipmentUniqueType,
weak=True,
dispatch_uid=None,
apps=None)
class EquipmentFacility(Model):
"""Equipment Facility."""
RELATED_NAME = 'equipment_facilities'
RELATED_QUERY_NAME = 'equipment_facility'
name = \
CharField(
verbose_name='Equipment Facility',
blank=False,
null=False,
unique=True,
db_index=True,
max_length=MAX_CHAR_LEN)
info = \
JSONField(
blank=True,
null=True)
class Meta:
"""Metadata."""
verbose_name = 'Equipment Facility'
verbose_name_plural = 'Equipment Facilities'
ordering = ('name',)
def __str__(self):
"""Return string repr."""
return f'EqFacility "{self.name}"'
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
super().save(*args, **kwargs)
class EquipmentInstance(Model):
"""Equipment Instance."""
RELATED_NAME = 'equipment_instances'
RELATED_QUERY_NAME = 'equipment_instance'
equipment_general_type = \
ForeignKey(
to=EquipmentGeneralType,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=False,
null=False,
on_delete=PROTECT)
equipment_unique_type = \
ForeignKey(
to=EquipmentUniqueType,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=True,
null=True,
on_delete=PROTECT)
equipment_facility = \
ForeignKey(
to=EquipmentFacility,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=True,
null=True,
on_delete=PROTECT)
name = \
CharField(
verbose_name='Equipment Instance',
blank=False,
null=False,
unique=True,
db_index=True,
max_length=MAX_CHAR_LEN)
info = \
JSONField(
blank=True,
null=True)
equipment_unique_type_groups = \
ManyToManyField(
to=EquipmentUniqueTypeGroup,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=True)
class Meta:
"""Metadata."""
verbose_name = 'Equipment Instance'
verbose_name_plural = 'Equipment Instances'
ordering = 'equipment_general_type', 'equipment_unique_type', 'name'
def __str__(self):
"""Return string repr."""
return (self.equipment_general_type.name.upper() +
(f' UnqTp {self.equipment_unique_type.name}'
if self.equipment_unique_type
else '') +
f' #{self.name}')
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
if self.equipment_unique_type and (
self.equipment_unique_type.equipment_general_type !=
self.equipment_general_type):
warnings.warn(
message=(f'*** EQUIPMENT INSTANCE #{self.name}: '
f'EQUIPMENT UNIQUE TYPE {self.equipment_unique_type} '
'NOT OF EQUIPMENT GENERAL TYPE '
f'{self.equipment_general_type} ***'))
self.equipment_unique_type = None
super().save(*args, **kwargs)
class EquipmentSystem(Model):
"""Equipment System."""
RELATED_NAME = 'equipment_systems'
RELATED_QUERY_NAME = 'equipment_system'
equipment_facility = \
ForeignKey(
to=EquipmentFacility,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=True,
null=True,
on_delete=PROTECT)
name = \
CharField(
verbose_name='Equipment System',
blank=False,
null=False,
default=None,
db_index=True,
max_length=MAX_CHAR_LEN)
date = \
DateField(
blank=False,
null=False,
db_index=True)
equipment_instances = \
ManyToManyField(
to=EquipmentInstance,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=True)
class Meta:
"""Metadata."""
verbose_name = 'Equipment System'
verbose_name_plural = 'Equipment Systems'
unique_together = 'name', 'date'
ordering = 'equipment_facility', 'name', 'date'
def __str__(self):
"""Return string repr."""
return (self.name +
(f' @ EqFacility "{self.equipment_facility.name}"'
if self.equipment_facility
else '') +
f' on {self.date}')
def save(self, *args, **kwargs):
"""Save."""
self.name = clean_lower_str(self.name)
super().save(*args, **kwargs)
class EquipmentUniqueTypeGroupDataFieldProfile(Model):
"""Equipment Unique Type Group Data Field Profile."""
RELATED_NAME = 'equipment_unique_type_group_data_field_profiles'
RELATED_QUERY_NAME = 'equipment_unique_type_group_data_field_profile'
equipment_unique_type_group = \
ForeignKey(
to=EquipmentUniqueTypeGroup,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=False,
null=False,
on_delete=PROTECT)
equipment_data_field = \
ForeignKey(
to=EquipmentDataField,
related_name=RELATED_NAME,
related_query_name=RELATED_QUERY_NAME,
blank=False,
null=False,
on_delete=PROTECT)
to_date = \
DateField(
blank=True,
null=True,
db_index=True)
valid_proportion = \
FloatField(
blank=False,
null=False)
n_distinct_values = \
IntegerField(
blank=False,
null=False)
distinct_values = \
JSONField(
blank=True,
null=True)
sample_min = \
FloatField(
blank=True,
null=True)
outlier_rst_min = \
FloatField(
blank=True,
null=True)
sample_quartile = \
FloatField(
blank=True,
null=True)
sample_median = \
FloatField(
blank=True,
null=True)
sample_3rd_quartile = \
FloatField(
blank=True,
null=True)
outlier_rst_max = \
FloatField(
blank=True,
null=True)
sample_max = \
FloatField(
blank=True,
null=True)
class Meta:
"""Metadata."""
verbose_name = 'Equipment Unique Type Group Data Field Profile'
verbose_name_plural = 'Equipment Unique Type Group Data Field Profiles'
unique_together = \
'equipment_unique_type_group', \
'equipment_data_field', \
'to_date'
ordering = \
'equipment_unique_type_group', \
'equipment_data_field', \
'-to_date' | PypiClean |
/DIRestPlus-0.2.2-py3-none-any.whl/direstplus/iwind.py | from direstplus import api
from flask_restplus import Resource, reqparse
from WindPy import w
import pandas as pd
import logging
from datetime import datetime, date
from direstplus.exceptions import RequestError
logger = logging.getLogger(__name__)
STR_FORMAT_DATE = '%Y-%m-%d'
STR_FORMAT_DATETIME_WIND = '%Y-%m-%d %H:%M:%S' # 2017-03-06 00:00:00
UN_AVAILABLE_DATETIME = datetime.strptime('1900-01-01', STR_FORMAT_DATE)
UN_AVAILABLE_DATE = UN_AVAILABLE_DATETIME.date()
header = {'Content-Type': 'application/json'}
rec = api.namespace('wind', description='wind接口')
ERROR_CODE_MSG_DIC = {
-40522005: "不支持的万得代码",
-40522003: "非法请求",
-40521004: "请求发送失败。无法发送请求,请连接网络",
-40520007: "没有可用数据",
-40521009: "数据解码失败。检查输入参数是否正确,如:日期参数注意大小月月末及短二月",
-40521010: "网络超时",
-40522017: "数据提取量超限",
}
# parser
receive_wset_parser = reqparse.RequestParser().add_argument(
'tablename', type=str, required=True, help="数据集名称"
).add_argument(
'options', type=str, help="可选参数"
)
receive_wsd_parser = reqparse.RequestParser().add_argument(
'codes', type=str, required=True, help="数据集名称"
).add_argument(
'fields', type=str, help="指标"
).add_argument(
'beginTime', type=str, help="开始时间"
).add_argument(
'endTime', type=str, help="截止时间"
).add_argument(
'options', type=str, help="可选参数"
)
receive_wsi_parser = reqparse.RequestParser().add_argument(
'codes', type=str, required=True, help="数据集名称"
).add_argument(
'fields', type=str, help="指标"
).add_argument(
'beginTime', type=str, help="开始时间"
).add_argument(
'endTime', type=str, help="截止时间"
).add_argument(
'options', type=str, help="可选参数"
)
receive_wss_parser = reqparse.RequestParser().add_argument(
'codes', type=str, required=True, help="数据集名称"
).add_argument(
'fields', type=str, help="指标"
).add_argument(
'options', type=str, help="可选参数"
)
tdays_offset_parser = reqparse.RequestParser().add_argument(
'offsets', type=str, required=True, help="偏移值"
).add_argument(
'beginTime', type=str, help="基准时间"
).add_argument(
'options', type=str, help="可选参数"
)
tdays_parser = reqparse.RequestParser().add_argument(
'beginTime', type=str, help="开始时间"
).add_argument(
'endTime', type=str, help="结束时间"
).add_argument(
'options', type=str, help="可选参数"
)
receive_wsq_parser = reqparse.RequestParser().add_argument(
'codes', type=str, required=True, help="数据集名称"
).add_argument(
'fields', type=str, help="指标"
).add_argument(
'options', type=str, help="可选参数"
)
receive_wst_parser = reqparse.RequestParser().add_argument(
'codes', type=str, required=True, help="数据集名称"
).add_argument(
'fields', type=str, help="指标"
).add_argument(
'beginTime', type=str, help="开始时间"
).add_argument(
'endTime', type=str, help="截止时间"
).add_argument(
'options', type=str, help="可选参数"
)
receive_edb_parser = reqparse.RequestParser().add_argument(
'codes', type=str, required=True, help="数据集名称"
).add_argument(
'beginTime', type=str, help="开始时间"
).add_argument(
'endTime', type=str, help="截止时间"
).add_argument(
'options', type=str, help="可选参数"
)
def format_2_date_str(dt):
if dt is None:
return None
dt_type = type(dt)
if dt_type == str:
return dt
elif dt_type == date:
if dt > UN_AVAILABLE_DATE:
return dt.strftime(STR_FORMAT_DATE)
else:
return None
elif dt_type == datetime:
if dt > UN_AVAILABLE_DATETIME:
return dt.strftime(STR_FORMAT_DATE)
else:
return None
else:
return dt
def format_2_datetime_str(dt):
if dt is None:
return None
dt_type = type(dt)
if dt_type == str:
return dt
elif dt_type == date:
if dt > UN_AVAILABLE_DATE:
return dt.strftime(STR_FORMAT_DATE)
else:
return None
elif dt_type == datetime:
if dt > UN_AVAILABLE_DATETIME:
return dt.strftime(STR_FORMAT_DATETIME_WIND)
else:
return None
else:
return dt
@rec.route('/wset/')
class ReceiveWSET(Resource):
@rec.expect(receive_wset_parser)
def post(self):
"""
json str:{"tablename": "sectorconstituent", "options": "date=2017-03-21;sectorid=1000023121000000"}
:return: 返回万得返回数据dict
"""
args = receive_wset_parser.parse_args()
logger.info('/wset/ args:%s' % args)
# print('args:%s' % args)
# table_name = args['table_name']
# options = args['options']
ret_data = w.wset(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error('wset(%s) ErrorCode=%d %s' % (args, error_code, msg))
raise RequestError(msg, None, error_code)
data_count = len(ret_data.Data)
# if data_count > 0:
# print('ret_data.Fields\n', ret_data.Fields)
# ret_data.Data[0] = [format_2_date_str(dt) for dt in ret_data.Data[0]]
# print('ret_data.Data\n', ret_data.Data)
for n_data in range(data_count):
data = ret_data.Data[n_data]
data_len2 = len(data)
if data_len2 > 0:
# 取出第一个部位None的数据
for item_check in data:
if item_check is not None:
break
# 进行类型检查,如果发现是 datetime, date 类型之一,则进行类型转换
if item_check is not None and type(item_check) in (datetime, date):
ret_data.Data[n_data] = [format_2_date_str(dt) for dt in data]
logger.info('%d column["%s"] date to str', n_data, ret_data.Fields[n_data])
ret_df = pd.DataFrame(ret_data.Data, index=ret_data.Fields, columns=ret_data.Codes)
# print('ret_df\n', ret_df)
ret_dic = ret_df.to_dict()
# print('ret_dic:\n', ret_dic)
return ret_dic
@rec.route('/wsd/')
class ReceiveWSD(Resource):
@rec.expect(receive_wsd_parser)
def post(self):
"""
json str:{"codes": "603555.SH", "fields": "close,pct_chg", "begin_time": "2017-01-04", "end_time": "2017-02-28", "options": "PriceAdj=F"}
:return: 返回万得返回数据dict
"""
args = receive_wsd_parser.parse_args()
# print(request.json)
logger.info('/wsd/ args:%s' % args)
# codes = args['codes']
# fields = args['fields']
# begin_time = args['begin_time']
# end_time = args['end_time']
# options = args['options']
ret_data = w.wsd(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error('wsd(%s) ErrorCode=%d %s' % (args, error_code, msg))
raise RequestError(msg, None, error_code)
# if ret_data.ErrorCode != 0:
# logger.error('wsd("%s", "%s", "%s", "%s", "%s") ErrorCode=%d' % (
# codes, fields, begin_time, end_time, options, ret_data.ErrorCode))
# return {'error_code': ret_data.ErrorCode}, 404
# 将 Data数据中所有 datetime date 类型的数据转换为 string
data_len = len(ret_data.Data)
for n_data in range(data_len):
data = ret_data.Data[n_data]
data_len2 = len(data)
if data_len2 > 0:
# 取出第一个部位None的数据
for item_check in data:
if item_check is not None:
break
# 进行类型检查,如果发现是 datetime, date 类型之一,则进行类型转换
if item_check is not None and type(item_check) in (datetime, date):
ret_data.Data[n_data] = [format_2_date_str(dt) for dt in data]
logger.info('%d column["%s"] date to str', n_data, ret_data.Fields[n_data])
# 组成 DataFrame
if len(ret_data.Codes) == 1:
ret_df = pd.DataFrame(ret_data.Data, index=ret_data.Fields,
columns=[format_2_date_str(dt) for dt in ret_data.Times])
elif len(ret_data.Times) == 1:
ret_df = pd.DataFrame(ret_data.Data, index=ret_data.Fields,
columns=ret_data.Codes)
else:
ret_df = pd.DataFrame(ret_data.Data, index=ret_data.Codes,
columns=[format_2_date_str(dt) for dt in ret_data.Times])
# print(ret_df)
ret_dic = ret_df.to_dict()
# print('ret_dic:\n', ret_dic)
return ret_dic
@rec.route('/wsi/')
class ReceiveWSI(Resource):
@rec.expect(receive_wsi_parser)
def post(self):
"""
json str:{"codes": "RU1801.SHF", "fields": "open,high,low,close,volume,amt,oi", "begin_time": "2017-12-11 09:00:00", "end_time": "2017-12-11 10:27:41", "options": ""}
:return: 返回万得返回数据dict
"""
args = receive_wsi_parser.parse_args()
# print(request.json)
logger.info('/wsi/ args:%s' % args)
# codes = args['codes']
# fields = args['fields']
# begin_time = args['begin_time']
# end_time = args['end_time']
# options = args['options']
ret_data = w.wsi(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error('wsi(%s) ErrorCode=%d %s' % (
args, error_code, msg))
raise RequestError(msg, None, error_code)
# if ret_data.ErrorCode != 0:
# logger.error('wsd("%s", "%s", "%s", "%s", "%s") ErrorCode=%d' % (
# codes, fields, begin_time, end_time, options, ret_data.ErrorCode))
# return {'error_code': ret_data.ErrorCode}, 404
# 将 Data数据中所有 datetime date 类型的数据转换为 string
data_len = len(ret_data.Data)
for n_data in range(data_len):
data = ret_data.Data[n_data]
data_len2 = len(data)
if data_len2 > 0:
# 取出第一个部位None的数据
for item_check in data:
if item_check is not None:
break
# 进行类型检查,如果发现是 datetime, date 类型之一,则进行类型转换
if item_check is not None and type(item_check) in (datetime, date):
ret_data.Data[n_data] = [format_2_datetime_str(dt) for dt in data]
logger.info('%d column["%s"] date to str', n_data, ret_data.Fields[n_data])
# 组成 DataFrame
ret_df = pd.DataFrame(ret_data.Data, index=ret_data.Fields,
columns=[format_2_datetime_str(dt) for dt in ret_data.Times])
# print(ret_df)
ret_dic = ret_df.to_dict()
# print('ret_dic:\n', ret_dic)
return ret_dic
@rec.route('/wss/')
class ReceiveWSS(Resource):
@rec.expect(receive_wss_parser)
def post(self):
"""
json str:{"codes": "XT1522613.XT", "fields": "fund_setupdate,fund_maturitydate,fund_mgrcomp,fund_existingyear,fund_ptmyear,fund_type,fund_fundmanager", "options": ""}
:return: 返回万得返回数据dict
"""
args = receive_wss_parser.parse_args()
logger.info('/wss/ args:%s', args)
# codes = args['codes']
# fields = args['fields']
# options = args['options']
ret_data = w.wss(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error('wss(%s) ErrorCode=%d %s' % (args, error_code, msg))
raise RequestError(msg, None, error_code)
# 将 Data数据中所有 datetime date 类型的数据转换为 string
data_len = len(ret_data.Data)
logger.debug('ret_data.Data len:%d', data_len)
logger.debug('ret_data.Codes : %s', ret_data.Codes)
for n_data in range(data_len):
data = ret_data.Data[n_data]
data_len2 = len(data)
if data_len2 > 0:
if type(data[0]) in (datetime, date):
ret_data.Data[n_data] = [format_2_date_str(dt) for dt in data]
logger.info('%d column["%s"] date to str', n_data, ret_data.Fields[n_data])
# print('ret_data.Data:\n', ret_data.Data)
# 组成 DataFrame
ret_df = pd.DataFrame(ret_data.Data, index=ret_data.Fields, columns=ret_data.Codes)
ret_dic = ret_df.to_dict()
# print('ret_dic:\n', ret_dic)
return ret_dic
@rec.route('/tdaysoffset/')
class ReceiveTdaysoffset(Resource):
@rec.expect(tdays_offset_parser)
def post(self):
"""
json str:{"offset": "1", "begin_time": "2017-3-31", "options": ""}
:return: 返回万得返回数据dict
"""
args = tdays_offset_parser.parse_args()
logger.info('/tdaysoffset/ args:%s', args)
# offset = int(args['offset'])
# begin_time = args['begin_time']
# options = args['options']
ret_data = w.tdaysoffset(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error(
'tdaysoffset("%s") ErrorCode=%d %s' % (args, error_code, msg))
raise RequestError(msg, None, error_code)
# if ret_data.ErrorCode != 0:
# logger.error(
# 'tdaysoffset("%s", "%s", "%s") ErrorCode=%d' % (offset, begin_time, options, ret_data.ErrorCode))
# return {'error_code': ret_data.ErrorCode}, 404
# 将 Data数据中所有 datetime date 类型的数据转换为 string
if len(ret_data.Data) > 0 and len(ret_data.Data[0]) > 0:
date_str = format_2_date_str(ret_data.Data[0][0])
else:
logger.warning('tdaysoffset(%s) No value return' % args)
date_str = ''
ret_dic = {'Date': date_str}
# print('offset:\n', ret_dic)
return ret_dic
@rec.route('/tdays/')
class ReceiveTdays(Resource):
@rec.expect(tdays_parser)
def post(self):
"""
json str:{"begin_time": "2017-3-31", "end_time": "2017-3-31", "options": ""}
:return: 返回万得返回数据dict
"""
args = tdays_parser.parse_args()
logger.info('/tdays/ args:%s', args)
# begin_time = args['begin_time']
# end_time = args['end_time']
# options = args['options']
ret_data = w.tdays(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error('tdays(%s) ErrorCode=%d %s' % (args, error_code, msg))
if ret_data.ErrorCode == 40521010:
w.close()
w.start()
logger.warning('网络连接超时,端口重新启动')
raise RequestError(msg, None, error_code)
# if ret_data.ErrorCode != 0:
# logger.error(
# 'tdays("%s", "%s", "%s") ErrorCode=%d' % (begin_time, end_time, options, ret_data.ErrorCode))
# if ret_data.ErrorCode == 40521010:
# w.close()
# w.start()
# logger.warning('网络连接超时,端口重新启动')
# return {'error_code': ret_data.ErrorCode}, 404
# 将 Data数据中所有 datetime date 类型的数据转换为 string
if len(ret_data.Data) > 0 and len(ret_data.Data[0]) > 0:
# date_str = format_datetime_to_str(ret_data.Data[0][0])
# ret_df = pd.DataFrame({'date': [format_datetime_to_str(d) for d in ret_data.Data[0]]})
# ret_df.index = [str(idx) for idx in ret_df.index]
# ret_dic = {'date': [format_datetime_to_str(d) for d in ret_data.Data[0]]}
ret_dic = [format_2_date_str(d) for d in ret_data.Data[0]]
else:
logger.warning('tdays(%s) No value return' % args)
ret_dic = []
# ret_dic = ret_df.to_dict()
# print('tdays:\n', ret_dic)
return ret_dic
@rec.route('/wsq/')
class ReceiveWSQ(Resource):
@rec.expect(receive_wsq_parser)
def post(self):
"""
json str:{"codes": "600008.SH,600010.SH,600017.SH", "fields": "rt_open,rt_low_limit", "options": ""}
:return: 返回万得返回数据dict
"""
args = receive_wsq_parser.parse_args()
logger.info('/wsq/ args:%s', args)
# codes = args['codes']
# fields = args['fields']
# options = args['options']
ret_data = w.wsq(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error('wsq(%s) ErrorCode=%d %s' % args)
raise RequestError(msg, None, error_code)
# 将 Data数据中所有 datetime date 类型的数据转换为 string
data_len = len(ret_data.Data)
logger.debug('ret_data.Data len:%d', data_len)
logger.debug('ret_data.Codes : %s', ret_data.Codes)
for n_data in range(data_len):
data = ret_data.Data[n_data]
data_len2 = len(data)
if data_len2 > 0:
if type(data[0]) in (datetime, date):
ret_data.Data[n_data] = [format_2_date_str(dt) for dt in data]
logger.info('%d column["%s"] date to str', n_data, ret_data.Fields[n_data])
# print('ret_data.Data:\n', ret_data.Data)
# 组成 DataFrame
ret_df = pd.DataFrame(ret_data.Data, index=ret_data.Fields, columns=ret_data.Codes)
ret_dic = ret_df.to_dict()
# print('ret_dic:\n', ret_dic)
return ret_dic
@rec.route('/wst/')
class ReceiveWST(Resource):
@rec.expect(receive_wst_parser)
def post(self):
"""
json str:{"codes": "600008.SH, "fields": "ask1,bid1,asize1,bsize1,volume,amt,pre_close,open,high,low,last", "begin_time": "2017-01-04", "end_time": "2017-02-28", "options": ""}
:return: 返回万得返回数据dict
"""
args = receive_wst_parser.parse_args()
logger.info('/wst/ args:%s', args)
# codes = args['codes']
# fields = args['fields']
# begin_time = args['begin_time']
# end_time = args['end_time']
# options = args['options']
ret_data = w.wst(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error('wst(%s) ErrorCode=%d %s' % (args, error_code, msg))
raise RequestError(msg, None, error_code)
# if ret_data.ErrorCode != 0:
# logger.error('wsd("%s", "%s", "%s", "%s", "%s") ErrorCode=%d' % (
# codes, fields, begin_time, end_time, options, ret_data.ErrorCode))
# return {'error_code': ret_data.ErrorCode}, 404
# 将 Data数据中所有 datetime date 类型的数据转换为 string
data_len = len(ret_data.Data)
for n_data in range(data_len):
data = ret_data.Data[n_data]
data_len2 = len(data)
if data_len2 > 0:
# 取出第一个部位None的数据
for item_check in data:
if item_check is not None:
break
# 进行类型检查,如果发现是 datetime, date 类型之一,则进行类型转换
if item_check is not None and type(item_check) in (datetime, date):
ret_data.Data[n_data] = [format_2_datetime_str(dt) for dt in data]
logger.info('%d column["%s"] date to str', n_data, ret_data.Fields[n_data])
# 组成 DataFrame
ret_df = pd.DataFrame(ret_data.Data, index=ret_data.Fields,
columns=[format_2_datetime_str(dt) for dt in ret_data.Times])
# print(ret_df)
ret_dic = ret_df.to_dict()
# print('ret_dic:\n', ret_dic)
return ret_dic
@rec.route('/edb/')
class ReceiveEDB(Resource):
@rec.expect(receive_edb_parser)
def post(self):
"""
json str:{"codes": "M0017126,M0017127,M0017128", "begin_time": "2016-11-10", "end_time": "2017-11-10", "options": "Fill=Previous"}
:return: 返回万得返回数据dict
"""
args = receive_edb_parser.parse_args()
logger.info('/edb/ args:%s', args)
# codes = args['codes']
# begin_time = args['begin_time']
# end_time = args['end_time']
# options = args['options']
ret_data = w.edb(**args)
if not w.isconnected():
w.start()
if ret_data['options'] == "":
ret_data['options'] = None
error_code = ret_data.ErrorCode
if error_code != 0:
msg = ERROR_CODE_MSG_DIC.setdefault(error_code, "")
logger.error('wst(%s) ErrorCode=%d %s' % (args, error_code, msg))
raise RequestError(msg, None, error_code)
# if ret_data.ErrorCode != 0:
# logger.error('wsd("%s", "%s", "%s", "%s", "%s") ErrorCode=%d' % (
# codes, fields, begin_time, end_time, options, ret_data.ErrorCode))
# return {'error_code': ret_data.ErrorCode}, 404
# 将 Data数据中所有 datetime date 类型的数据转换为 string
data_len = len(ret_data.Data)
for n_data in range(data_len):
data = ret_data.Data[n_data]
data_len2 = len(data)
if data_len2 > 0:
# 取出第一个部位None的数据
for item_check in data:
if item_check is not None:
break
# 进行类型检查,如果发现是 datetime, date 类型之一,则进行类型转换
if item_check is not None and type(item_check) in (datetime, date):
ret_data.Data[n_data] = [format_2_date_str(dt) for dt in data]
logger.info('%d column["%s"] date to str', n_data, ret_data.Fields[n_data])
# 组成 DataFrame
ret_df = pd.DataFrame(ret_data.Data, index=[xx.strip() for xx in codes.split(',')],
columns=[format_2_date_str(dt) for dt in ret_data.Times])
# print(ret_df)
ret_dic = ret_df.to_dict()
# print('ret_dic:\n', ret_dic)
return ret_dic | PypiClean |
/Fern2-1.4.1.tar.gz/Fern2-1.4.1/fern/data/data_utils.py | """data utils"""
from typing import *
import re
import pickle
import pathlib
import pandas as pd
from fern.utils import check_path
from fern.data import FernDataFrame
def save_to_csv(data: Union[pd.DataFrame, FernDataFrame], path: Union[str, pathlib.Path]):
"""
save data to path
Args:
data: 需要保存的data frame
path: path where data save
"""
check_path(path)
if data.empty:
raise ValueError('You should get source data before save')
data = FernDataFrame(data)
data.save(path)
def load_from_csv(path: Union[str, pathlib.Path], index_col: str = None, eval_col: Optional[List[str]] = None):
"""
load data from path
Args:
path: path where data save
index_col: 需要初始化的index 列
eval_col: 需要恢复数据格式的数据列,读取的数据默认是string格式
"""
data = pd.read_csv(path)
if index_col:
data = data.set_index(index_col)
if isinstance(eval_col, list):
for col in eval_col:
data.loc[:, col] = data[col].map(eval)
return data
def save_to_pickle(data: Any, path: Union[str, pathlib.Path]):
"""
save data to path
Args:
data: 待保存的数据
path: path where data save
"""
check_path(path)
if data is None:
raise ValueError('You should get source data before save')
with open(path, 'wb') as f:
pickle.dump(data, f, protocol=4)
def load_from_pickle(path: Union[str, pathlib.Path]):
"""
load data from path
Args:
path: path where data save
"""
with open(path, 'rb') as f:
data = pickle.load(f)
return data
def read_words(words_path):
"""
Read user words, stop words and word library from path
Lines beginning with `#` or consisting entirely of white space characters will be ignored
Parameters
----------
words_path : str, Path, None
words path
Returns
-------
list[str]
user word list and stop word list
"""
def read(path):
res = set()
with open(path, mode='r', encoding='utf-8') as f:
for line in f:
line = line.strip().lower()
if line and line[0] != '#':
res.add(line)
res = list(res)
return res
if words_path is None or not pathlib.Path(words_path).exists():
words = []
else:
words = read(words_path)
return words
def read_regex_words(words_path):
"""
Read words written through the regex
Parameters
----------
words_path : str, Path, None
words path
Returns
-------
list[re.Pattern]
user word list and stop word list
"""
words = read_words(words_path)
word_reg = [re.compile(word) for word in words]
return word_reg
def read_library_size(path):
"""
read the length of the word/label library
this will skip the space line automatically
Parameters
----------
path : str, pathlib.Path
word library path
Returns
-------
int
length of the word library
"""
words = read_words(path)
return len(words) | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/resources/oira/script/chunks/95800.06fc1451fd938f84dbfd.min.js | "use strict";(self.webpackChunk_patternslib_patternslib=self.webpackChunk_patternslib_patternslib||[]).push([[95800],{78774:function(n,e,t){var o=t(87537),s=t.n(o),r=t(23645),a=t.n(r),l=t(61667),i=t.n(l),c=new URL(t(82763),t.b),u=a()(s()),h=i()(c);u.push([n.id,".hljs{display:block;overflow-x:auto;padding:.5em;background:#b7a68e url("+h+")}.hljs-keyword,.hljs-selector-tag,.hljs-literal{color:#059;font-weight:bold}.hljs,.hljs-subst{color:#363c69}.hljs-string,.hljs-title,.hljs-section,.hljs-type,.hljs-attribute,.hljs-symbol,.hljs-bullet,.hljs-built_in,.hljs-addition,.hljs-variable,.hljs-template-tag,.hljs-template-variable,.hljs-link,.hljs-name{color:#2c009f}.hljs-comment,.hljs-quote,.hljs-meta,.hljs-deletion{color:#802022}.hljs-keyword,.hljs-selector-tag,.hljs-literal,.hljs-doctag,.hljs-title,.hljs-section,.hljs-type,.hljs-name,.hljs-strong{font-weight:bold}.hljs-emphasis{font-style:italic}","",{version:3,sources:["webpack://./node_modules/highlight.js/styles/brown-paper.css"],names:[],mappings:"AAMA,MACE,aAAA,CACA,eAAA,CACA,YAAA,CACA,0DAAA,CAGF,+CAGE,UAAA,CACA,gBAAA,CAGF,kBAEE,aAAA,CAGF,0MAcE,aAAA,CAGF,oDAIE,aAAA,CAGF,yHASE,gBAAA,CAGF,eACE,iBAAA",sourcesContent:["/*\n\nBrown Paper style from goldblog.com.ua (c) Zaripov Yura <yur4ik7@ukr.net>\n\n*/\n\n.hljs {\n display: block;\n overflow-x: auto;\n padding: 0.5em;\n background:#b7a68e url(./brown-papersq.png);\n}\n\n.hljs-keyword,\n.hljs-selector-tag,\n.hljs-literal {\n color:#005599;\n font-weight:bold;\n}\n\n.hljs,\n.hljs-subst {\n color: #363c69;\n}\n\n.hljs-string,\n.hljs-title,\n.hljs-section,\n.hljs-type,\n.hljs-attribute,\n.hljs-symbol,\n.hljs-bullet,\n.hljs-built_in,\n.hljs-addition,\n.hljs-variable,\n.hljs-template-tag,\n.hljs-template-variable,\n.hljs-link,\n.hljs-name {\n color: #2c009f;\n}\n\n.hljs-comment,\n.hljs-quote,\n.hljs-meta,\n.hljs-deletion {\n color: #802022;\n}\n\n.hljs-keyword,\n.hljs-selector-tag,\n.hljs-literal,\n.hljs-doctag,\n.hljs-title,\n.hljs-section,\n.hljs-type,\n.hljs-name,\n.hljs-strong {\n font-weight: bold;\n}\n\n.hljs-emphasis {\n font-style: italic;\n}\n"],sourceRoot:""}]),e.Z=u},23645:function(n){n.exports=function(n){var e=[];return e.toString=function(){return this.map((function(e){var t="",o=void 0!==e[5];return e[4]&&(t+="@supports (".concat(e[4],") {")),e[2]&&(t+="@media ".concat(e[2]," {")),o&&(t+="@layer".concat(e[5].length>0?" ".concat(e[5]):""," {")),t+=n(e),o&&(t+="}"),e[2]&&(t+="}"),e[4]&&(t+="}"),t})).join("")},e.i=function(n,t,o,s,r){"string"==typeof n&&(n=[[null,n,void 0]]);var a={};if(o)for(var l=0;l<this.length;l++){var i=this[l][0];null!=i&&(a[i]=!0)}for(var c=0;c<n.length;c++){var u=[].concat(n[c]);o&&a[u[0]]||(void 0!==r&&(void 0===u[5]||(u[1]="@layer".concat(u[5].length>0?" ".concat(u[5]):""," {").concat(u[1],"}")),u[5]=r),t&&(u[2]?(u[1]="@media ".concat(u[2]," {").concat(u[1],"}"),u[2]=t):u[2]=t),s&&(u[4]?(u[1]="@supports (".concat(u[4],") {").concat(u[1],"}"),u[4]=s):u[4]="".concat(s)),e.push(u))}},e}},61667:function(n){n.exports=function(n,e){return e||(e={}),n?(n=String(n.__esModule?n.default:n),/^['"].*['"]$/.test(n)&&(n=n.slice(1,-1)),e.hash&&(n+=e.hash),/["'() \t\n]|(%20)/.test(n)||e.needQuotes?'"'.concat(n.replace(/"/g,'\\"').replace(/\n/g,"\\n"),'"'):n):n}},87537:function(n){n.exports=function(n){var e=n[1],t=n[3];if(!t)return e;if("function"==typeof btoa){var o=btoa(unescape(encodeURIComponent(JSON.stringify(t)))),s="sourceMappingURL=data:application/json;charset=utf-8;base64,".concat(o),r="/*# ".concat(s," */");return[e].concat([r]).join("\n")}return[e].join("\n")}},95800:function(n,e,t){t.r(e);var o=t(93379),s=t.n(o),r=t(7795),a=t.n(r),l=t(3565),i=t.n(l),c=t(19216),u=t.n(c),h=t(44589),p=t.n(h),f=t(78774),d={};d.styleTagTransform=p(),d.setAttributes=i(),d.insert=function(n){var e=document.head.querySelectorAll("*")[0];e?document.head.insertBefore(n,e):document.head.append(n)},d.domAPI=a(),d.insertStyleElement=u();s()(f.Z,d);e.default=f.Z&&f.Z.locals?f.Z.locals:void 0},93379:function(n){var e=[];function t(n){for(var t=-1,o=0;o<e.length;o++)if(e[o].identifier===n){t=o;break}return t}function o(n,o){for(var r={},a=[],l=0;l<n.length;l++){var i=n[l],c=o.base?i[0]+o.base:i[0],u=r[c]||0,h="".concat(c," ").concat(u);r[c]=u+1;var p=t(h),f={css:i[1],media:i[2],sourceMap:i[3],supports:i[4],layer:i[5]};if(-1!==p)e[p].references++,e[p].updater(f);else{var d=s(f,o);o.byIndex=l,e.splice(l,0,{identifier:h,updater:d,references:1})}a.push(h)}return a}function s(n,e){var t=e.domAPI(e);t.update(n);return function(e){if(e){if(e.css===n.css&&e.media===n.media&&e.sourceMap===n.sourceMap&&e.supports===n.supports&&e.layer===n.layer)return;t.update(n=e)}else t.remove()}}n.exports=function(n,s){var r=o(n=n||[],s=s||{});return function(n){n=n||[];for(var a=0;a<r.length;a++){var l=t(r[a]);e[l].references--}for(var i=o(n,s),c=0;c<r.length;c++){var u=t(r[c]);0===e[u].references&&(e[u].updater(),e.splice(u,1))}r=i}}},19216:function(n){n.exports=function(n){var e=document.createElement("style");return n.setAttributes(e,n.attributes),n.insert(e,n.options),e}},3565:function(n,e,t){n.exports=function(n){var e=t.nc;e&&n.setAttribute("nonce",e)}},7795:function(n){n.exports=function(n){if("undefined"==typeof document)return{update:function(){},remove:function(){}};var e=n.insertStyleElement(n);return{update:function(t){!function(n,e,t){var o="";t.supports&&(o+="@supports (".concat(t.supports,") {")),t.media&&(o+="@media ".concat(t.media," {"));var s=void 0!==t.layer;s&&(o+="@layer".concat(t.layer.length>0?" ".concat(t.layer):""," {")),o+=t.css,s&&(o+="}"),t.media&&(o+="}"),t.supports&&(o+="}");var r=t.sourceMap;r&&"undefined"!=typeof btoa&&(o+="\n/*# sourceMappingURL=data:application/json;base64,".concat(btoa(unescape(encodeURIComponent(JSON.stringify(r))))," */")),e.styleTagTransform(o,n,e.options)}(e,n,t)},remove:function(){!function(n){if(null===n.parentNode)return!1;n.parentNode.removeChild(n)}(e)}}}},44589:function(n){n.exports=function(n,e){if(e.styleSheet)e.styleSheet.cssText=n;else{for(;e.firstChild;)e.removeChild(e.firstChild);e.appendChild(document.createTextNode(n))}}},82763:function(n,e,t){n.exports=t.p+"527af3e99ad07977aac0.png"}}]);
//# sourceMappingURL=95800.06fc1451fd938f84dbfd.min.js.map | PypiClean |
/ExifReader-0.1.1-py3-none-any.whl/exifreader/utils.py | from fractions import Fraction
def ord_(dta):
if isinstance(dta, str):
return ord(dta)
return dta
def make_string(seq):
"""
Don't throw an exception when given an out of range character.
"""
string = ''
for c in seq:
# Screen out non-printing characters
try:
if 32 <= c and c < 256:
string += chr(c)
except TypeError:
pass
# If no printing chars
if not string:
return str(seq)
return string
def make_string_uc(seq):
"""
Special version to deal with the code in the first 8 bytes of a user comment.
First 8 bytes gives coding system e.g. ASCII vs. JIS vs Unicode.
"""
seq = seq[8:]
# Of course, this is only correct if ASCII, and the standard explicitly
# allows JIS and Unicode.
return make_string(seq)
def get_gps_coords(tags):
lng_ref_tag_name = "GPS GPSLongitudeRef"
lng_tag_name = "GPS GPSLongitude"
lat_ref_tag_name = "GPS GPSLatitudeRef"
lat_tag_name = "GPS GPSLatitude"
# Check if these tags are present
gps_tags = [
lng_ref_tag_name,
lng_tag_name,
lat_tag_name,
lat_tag_name]
for tag in gps_tags:
if tag not in tags.keys():
return None
lng_ref_val = tags[lng_ref_tag_name].values
lng_coord_val = [c.decimal() for c in tags[lng_tag_name].values]
lat_ref_val = tags[lat_ref_tag_name].values
lat_coord_val = [c.decimal() for c in tags[lat_tag_name].values]
lng_coord = sum([c / 60**i for i, c in enumerate(lng_coord_val)])
lng_coord *= (-1)**(lng_ref_val == "W")
lat_coord = sum([c / 60**i for i, c in enumerate(lat_coord_val)])
lat_coord *= (-1)**(lat_ref_val == "S")
return (lat_coord, lng_coord)
class Ratio(Fraction):
"""
Ratio object that eventually will be able to reduce itself to lowest
common denominator for printing.
"""
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
try:
self = super(Ratio, cls).__new__(cls, numerator, denominator)
except ZeroDivisionError:
self = super(Ratio, cls).__new__(cls)
self._numerator = numerator
self._denominator = denominator
return self
__new__.doc = Fraction.__new__.__doc__
def __repr__(self):
return str(self)
@property
def num(self):
return self.numerator
@property
def den(self):
return self.denominator
def decimal(self):
return float(self) | PypiClean |
/ChemGAPP-0.0.9-py3-none-any.whl/ChemGAPP_Package/ChemGAPP_Big/Condition_Variance.py |
# In[ ]:
import argparse
import pandas as pd
import numpy as np
import scipy.stats as stats
def get_options():
parser = argparse.ArgumentParser(description="The variance of replicate colony sizes is calculated for each plate and these variance values are averaged for each plate within a condition.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--InputFile", help="The normalised csv file from Check_Normalisation.py")
parser.add_argument("-o", "--OutputFile", help="A CSV file of the average variances for each condition.")
return parser.parse_args()
def main():
options = get_options()
inputfile = options.InputFile
outputfile = options.OutputFile
m = pd.read_csv(inputfile,
index_col=[0, 1],
header=[0, 1, 2,3])
m.columns = m.columns.swaplevel(2, 3)
#makes df with same index as the input normalised dataset file.
Var_DF = pd.DataFrame(index=m.index)
conditions = {x[0:3] for x in m.columns}
rounds = 0
#splits into source plate, batch and condition, then compares the variance between the replicates.
for c in sorted(conditions):
rounds = rounds + 1
print(rounds)
df1 = m.xs((c), axis =1, drop_level=False)
ar1 = np.array(df1)
ar2 = np.array([])
for j in range(0,len(ar1)):
#The variance of each row is calculated and added to the variance column
#if all values are nan then variance is set to nan.
if np.count_nonzero(~np.isnan(ar1[j])) == 0:
var = "nan"
#otherwise calculates variance ingnoring nans
else:
var = np.nanvar(ar1[j])
#appends variance to array of variances
ar2 = np.append(ar2, var)
#set array as df
ar_df = pd.DataFrame(ar2, index=m.index)
#appends array of variances to variance df
Var_DF = pd.concat([Var_DF, ar_df], axis=1)
#sets column names to the source plate, batch and condition.
Var_DF.columns = (pd.MultiIndex.from_tuples(sorted(conditions)))
ave_Var_plate = pd.DataFrame(columns=['Condition','Batch','Plate','Average Variance'])
#calculates the average variance for each condition.
for f in Var_DF.columns:
name = (f[1],f[2],f[0],np.nanmean(Var_DF[f].values.astype(float)))
columns = list(ave_Var_plate)
data = []
zipped = zip(columns, name)
a_dictionary = dict(zipped)
data.append(a_dictionary)
ave_Var_plate = ave_Var_plate.append(data, True)
ave_Var_plate = ave_Var_plate.set_index(['Condition',"Batch",'Plate'])
cond3 = {x[0:2] for x in ave_Var_plate.index}
#calculates mean across different source plates and produces df.
ave_Var_cond = pd.DataFrame(columns=(['Condition','Batch','Average Variance']))
for cd3 in sorted(cond3):
dfVC = ave_Var_plate.xs(cd3, axis =0, drop_level=False)
name = (cd3[0],cd3[1],dfVC['Average Variance'].mean())
data = []
columns = list(ave_Var_cond)
zipped = zip(columns, name)
a_dictionary = dict(zipped)
data.append(a_dictionary)
ave_Var_cond = ave_Var_cond.append(data, True)
ave_Var_cond.to_csv(outputfile,index=False)
return ave_Var_cond
if __name__ == "__main__":
main() | PypiClean |
/KiKit-1.3.0-py3-none-any.whl/kikit/drc_ui.py | import click
from enum import Enum
class ReportLevel(Enum):
warning = "warning"
error = "error"
def __str__(self):
return self.value
class EnumType(click.Choice):
def __init__(self, enum: Enum, case_sensitive=False):
self.__enum = enum
super().__init__(choices=[item.value for item in enum], case_sensitive=case_sensitive)
def convert(self, value, param, ctx):
if value is None or isinstance(value, Enum):
return value
converted_str = super().convert(value, param, ctx)
return self.__enum(converted_str)
@click.group()
def drc():
"""
Validate design rules of the board
"""
pass
@click.command()
@click.argument("boardfile", type=click.Path(dir_okay=False))
@click.option("--useMm/--useInch", default=True)
@click.option("--strict/--weak", default=False,
help="Check all track errors")
@click.option("--ignoreExcluded/--reportExcluded", default=True,
help="Report items that are excluded")
@click.option("--level", type=EnumType(ReportLevel), default=ReportLevel.error,
help="Minimum severity to report")
def run(boardfile, usemm, ignoreexcluded, strict, level):
"""
Check DRC rules. If no rules are validated, the process exists with code 0.
If any errors are detected, the process exists with non-zero return code and
prints DRC report on the standard output.
"""
from kikit.drc import runImpl
import sys
from pcbnewTransition import pcbnew
from kikit.common import fakeKiCADGui
app = fakeKiCADGui()
try:
board = pcbnew.LoadBoard(boardfile)
failed = runImpl(board, usemm, ignoreexcluded, strict, level, lambda x: print(x))
if not failed:
print("No DRC errors found.")
else:
print("Found some DRC violations. See the report above.")
sys.exit(failed)
except Exception as e:
raise e
sys.stderr.write("An error occurred: " + str(e) + "\n")
sys.exit(1)
drc.add_command(run) | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/tools/watch/__main__.py | import os
import sys
from optparse import OptionParser
from nuitka.containers.OrderedDicts import OrderedDict
from nuitka.PythonVersions import getTestExecutionPythonVersions
from nuitka.tools.testing.Common import extractNuitkaVersionFromFilePath
from nuitka.Tracing import OurLogger
from nuitka.TreeXML import fromFile
from nuitka.utils.Execution import check_call, executeProcess
from nuitka.utils.FileOperations import (
changeTextFileContents,
getFileContents,
getFileList,
listDir,
makePath,
relpath,
withDirectoryChange,
)
from nuitka.utils.Hashing import getFileContentsHash
from nuitka.utils.InstalledPythons import findPythons
from nuitka.utils.Utils import isLinux, isMacOS, isWin32Windows
from nuitka.utils.Yaml import parseYaml
# TODO: Command line interface
nuitka_update_mode = "newer"
watch_logger = OurLogger("", base_style="blue")
def _compareNuitkaVersions(version_a, version_b):
def _numberize(version):
return tuple(int(d) for d in version.split("rc")[0].split("."))
return _numberize(version_a) < _numberize(version_b)
def scanCases(path):
candidate = os.path.join(path, "case.yml")
if os.path.exists(candidate):
yield candidate
for case_dir_full, _case_name in listDir(path):
if os.path.isdir(case_dir_full):
for case in scanCases(case_dir_full):
yield case
def selectPythons(python_version_req, anaconda):
for _python_version_str, installed_python_for_version in installed_pythons.items():
for installed_python in installed_python_for_version:
if not anaconda and installed_python.isAnacondaPython():
continue
if python_version_req is not None:
# We trust the case yaml files, pylint: disable=eval-used
if not eval(
python_version_req,
None,
{"python_version": installed_python.getHexVersion()},
):
continue
yield installed_python
break
def selectOS(os_values):
for value in os_values:
if value not in ("Linux", "Win32", "macOS"):
watch_logger.sysexit("Illegal value for OS: %s" % value)
if isLinux() and "Linux" in os_values:
return "Linux"
if isWin32Windows() and "Win32" in os_values:
return "Win32"
if isMacOS() and "macOS" in os_values:
return "macOS"
return None
def getPlatformRequirements(installed_python, case_data):
requirements = list(case_data["requirements"])
# Nuitka house keeping, these are from setup.py but we ignore onefile needs
# as that is not currently covered in watches.
# spell-checker: ignore orderedset,imageio
needs_onefile = False
if installed_python.getHexVersion() >= 0x370:
requirements.append("ordered-set >= 4.1.0")
if installed_python.getHexVersion() < 0x300:
requirements.append("subprocess32")
if needs_onefile and installed_python.getHexVersion() >= 0x370:
requirements.append("zstandard >= 0.15")
if (
os.name != "nt"
and sys.platform != "darwin"
and installed_python.getHexVersion() < 0x370
):
requirements.append("orderedset >= 2.0.3")
if sys.platform == "darwin" and installed_python.getHexVersion() < 0x370:
requirements.append("orderedset >= 2.0.3")
# For icon conversion.
if case_data.get("icons", "no") == "yes":
requirements.append("imageio")
return requirements
def _updatePipenvFile(installed_python, case_data, dry_run, result_path):
pipenv_filename = os.path.join(result_path, "Pipfile")
pipenv_package_requirements = []
for requirement in getPlatformRequirements(
installed_python=installed_python, case_data=case_data
):
# Ignore spaces in requirements.
requirement = requirement.replace(" ", "")
if all(char not in requirement for char in "=><"):
pipenv_package_requirements.append('%s = "*"' % requirement)
else:
operator_index = min(
requirement.find(char) for char in "=><" if char in requirement
)
pipenv_package_requirements.append(
'%s = "%s"'
% (requirement[:operator_index], requirement[operator_index:])
)
# TODO: Other indexes, e.g. nvidia might be needed too
changed_pipenv_file = changeTextFileContents(
pipenv_filename,
"""\
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[requires]
python_version = "%(python_version)s"
[packages]
%(pipenv_package_requirements)s
"""
% {
"pipenv_package_requirements": "\n".join(pipenv_package_requirements),
"python_version": installed_python.getPythonVersion(),
},
compare_only=dry_run,
)
return changed_pipenv_file, pipenv_filename
def _updatePipenvLockFile(
installed_python, dry_run, pipenv_filename_full, no_pipenv_update
):
if os.path.exists("Pipfile.lock"):
if no_pipenv_update:
watch_logger.info(
"Keeping existing lock file with pipenv file '%s'."
% pipenv_filename_full
)
elif not dry_run:
watch_logger.info(
"Working with pipenv file '%s' to update virtualenv, may take a while."
% pipenv_filename_full
)
check_call(
[
installed_python.getPythonExe(),
"-m",
"pipenv",
"update",
"--python",
installed_python.getPythonExe(),
]
)
else:
watch_logger.info(
"Working with pipenv file '%s' to install virtualenv, may take a while."
% pipenv_filename_full
)
check_call(
[
installed_python.getPythonExe(),
"-m",
"pipenv",
"install",
"--python",
installed_python.getPythonExe(),
]
)
def _compileCase(case_data, case_dir, installed_python):
check_call(
[
installed_python.getPythonExe(),
"-m",
"pipenv",
"run",
"python",
nuitka_binary,
os.path.join(case_dir, case_data["filename"]),
"--report=compilation-report.xml",
"--report-diffable",
"--report-user-provided=pipenv_hash=%s"
% getFileContentsHash("Pipfile.lock"),
]
)
if case_data["interactive"] == "no":
binaries = getFileList(
".",
ignore_filenames=("__constants.bin",),
only_suffixes=(".exe" if os.name == "nt" else ".bin"),
)
if len(binaries) != 1:
sys.exit("Error, failed to identify created binary.")
stdout, stderr, exit_nuitka = executeProcess([binaries[0]])
if exit_nuitka != 0:
sys.exit(
"Error, failed to execute %s with code %d." % (binaries[0], exit_nuitka)
)
with open("compiled-stdout.txt", "wb") as output:
output.write(stdout)
with open("compiled-stderr.txt", "wb") as output:
output.write(stderr)
def _updateCase(
case_dir, case_data, dry_run, no_pipenv_update, installed_python, result_path
):
# Not good for dry run, but tough life.
makePath(result_path)
# Update the pipenv file in any case, ought to be stable but we follow
# global changes this way.
changed_pipenv_file, pipenv_filename = _updatePipenvFile(
installed_python=installed_python,
case_data=case_data,
dry_run=dry_run,
result_path=result_path,
)
pipenv_filename_full = os.path.join(case_dir, pipenv_filename)
if dry_run and changed_pipenv_file:
watch_logger.info("Would create pipenv file '%s'." % pipenv_filename_full)
return
with withDirectoryChange(result_path):
# Update or create lockfile of pipenv.
_updatePipenvLockFile(
installed_python=installed_python,
dry_run=dry_run,
pipenv_filename_full=pipenv_filename_full,
no_pipenv_update=no_pipenv_update,
)
# Check if compilation is required.
if os.path.exists("compilation-report.xml"):
old_report_root = fromFile("compilation-report.xml")
existing_hash = getFileContentsHash("Pipfile.lock")
old_report_root_hash = (
old_report_root.find("user-data").find("pipenv_hash").text
)
old_nuitka_version = old_report_root.attrib["nuitka_version"]
if nuitka_update_mode == "force":
need_compile = True
elif nuitka_update_mode == "newer":
if _compareNuitkaVersions(old_nuitka_version, nuitka_version):
need_compile = True
else:
if existing_hash != old_report_root_hash:
watch_logger.info(
"Recompilation with identical Nuitka for '%s' due to changed pipfile."
% pipenv_filename_full
)
need_compile = True
elif old_nuitka_version == nuitka_version:
watch_logger.info(
"Skipping compilation with identical Nuitka for '%s'."
% pipenv_filename_full
)
need_compile = False
else:
watch_logger.info(
"Skipping compilation of old Nuitka %s result with Nuitka %s for '%s'."
% (
old_nuitka_version,
nuitka_version,
pipenv_filename_full,
)
)
need_compile = False
else:
need_compile = False
else:
need_compile = True
if need_compile:
_compileCase(
case_data=case_data,
case_dir=case_dir,
installed_python=installed_python,
)
def updateCase(case_dir, case_data, dry_run, no_pipenv_update):
case_name = case_data["case"]
# Wrong OS maybe.
os_name = selectOS(case_data["os"])
if os_name is None:
return
nuitka_min_version = case_data.get("nuitka")
# Too old Nuitka version maybe.
if nuitka_min_version is not None and _compareNuitkaVersions(
nuitka_version, nuitka_min_version
):
return
# For all relevant Pythons applicable to this case.
for installed_python in selectPythons(
anaconda=case_data["anaconda"] == "yes",
python_version_req=case_data.get("python_version_req"),
):
watch_logger.info("Consider with Python %s." % installed_python)
result_path = "result/%(case_name)s/%(python_version)s-%(os_name)s" % {
"case_name": case_name,
"os_name": os_name,
"python_version": installed_python.getPythonVersion(),
}
_updateCase(
case_dir=case_dir,
case_data=case_data,
dry_run=dry_run,
no_pipenv_update=no_pipenv_update,
installed_python=installed_python,
result_path=result_path,
)
def updateCases(case_dir, dry_run, no_pipenv_update):
for case_data in parseYaml(getFileContents("case.yml", mode="rb")):
updateCase(
case_dir=case_dir,
case_data=case_data,
dry_run=dry_run,
no_pipenv_update=no_pipenv_update,
)
installed_pythons = OrderedDict()
nuitka_binary = None
nuitka_version = None
def main():
global nuitka_binary # shared for all run, pylint: disable=global-statement
nuitka_binary = os.path.normpath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", "bin", "nuitka")
)
parser = OptionParser()
parser.add_option(
"--dry-run",
action="store_false",
dest="dry_run",
default=False,
help="""\
Do not change anything, just report what would be done. Default %default.""",
)
parser.add_option(
"--python-version",
action="append",
dest="python_versions",
default=[],
help="""\
Python versions to consider, by default all supported versions in descending order or in given order.""",
)
parser.add_option(
"--nuitka-binary",
action="store",
dest="nuitka_binary",
default=nuitka_binary,
help="""\
Nuitka binary to compile with. Defaults to one near the nuitka-watch usage.""",
)
parser.add_option(
"--no-pipenv-update",
action="store_true",
dest="no_pipenv_update",
default=False,
help="""\
Do not update the pipenv environment. Best to see only effect of Nuitka update. Default %default.""",
)
options, positional_args = parser.parse_args()
assert len(positional_args) <= 1, positional_args
if positional_args and os.path.isdir(positional_args[0]):
base_dir = positional_args[0]
else:
base_dir = os.getcwd()
for python_version in options.python_versions or reversed(
getTestExecutionPythonVersions()
):
installed_pythons[python_version] = findPythons(python_version)
nuitka_binary = os.path.abspath(os.path.expanduser(options.nuitka_binary))
assert os.path.exists(nuitka_binary)
global nuitka_version # singleton, pylint: disable=global-statement
nuitka_version = extractNuitkaVersionFromFilePath(
os.path.join(os.path.dirname(nuitka_binary), "..", "nuitka", "Version.py")
)
watch_logger.info("Working with Nuitka %s." % nuitka_version)
base_dir = os.path.abspath(base_dir)
with withDirectoryChange(base_dir):
for case_filename in scanCases(base_dir):
case_relpath = relpath(case_filename, start=base_dir)
watch_logger.info(
"Consider watch cases from Yaml file '%s'." % case_relpath
)
with withDirectoryChange(os.path.dirname(case_filename)):
updateCases(
os.path.dirname(case_filename),
dry_run=options.dry_run,
no_pipenv_update=options.no_pipenv_update,
)
if __name__ == "__main__":
main() | PypiClean |
/Finger_balabolka-1.0.25-py3-none-any.whl/fb_client/ui/gui_class.py |
import sys
from PyQt5 import QtWidgets, QtGui
from fb_client.accounts.account import UserManager
from fb_client.core.handlers import GuiReciever
from fb_client.protocol.jim import Messages
from fb_client.utils.utils import get_path
from .ui_files.client_ui import Ui_MainWindow
from .dialogs import EnterDialog, HelpDialog, AboutDialog
from .acc_settings import AccSetiingsDialog
class UserGUI(QtWidgets.QMainWindow):
'''Класс графического интерфейса'''
messages = Messages()
def __init__(self, socket, parent=None):
super().__init__()
# иницилизация клиента
self.user = None
self.socket = socket
self.thread = None
self.contacts = []
# Иконки для контактов
self.icon_contact = QtGui.QIcon()
self.icon_contact.addPixmap(QtGui.QPixmap(get_path("finger-man.png")),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.icon = QtGui.QIcon()
self.icon.addPixmap(QtGui.QPixmap(get_path("icon.png")),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.initUI()
def initUI(self):
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowIcon(self.icon)
self.center()
self.start_chat()
self.set_avatar()
# MENU
self.ui.actionExit.triggered.connect(self.quit)
self.ui.actionAccSet.triggered.connect(self.acc_settings)
self.ui.actionHelp.triggered.connect(self.get_help)
self.ui.actionAbout.triggered.connect(self.about)
# Buttons and chatContacts
self.ui.addButton.clicked.connect(self.add_contact)
self.ui.delButton.clicked.connect(self.del_contact)
self.ui.sendButton.clicked.connect(self.send)
self.ui.chatText.returnPressed.connect(self.send)
self.ui.contactListWidget.itemDoubleClicked.connect(self.add_privat)
def center(self):
screen = QtWidgets.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width()-size.width())/2,
(screen.height()-size.height())/2)
def start_chat(self):
''' Вывод диалога входа, загрузка начальных параметров
Контакт лист, имя и тд
'''
enter_dialog = EnterDialog(parent=self)
enter = enter_dialog.exec()
if enter == QtWidgets.QDialog.Accepted:
data = enter_dialog.data
self.user = data["user"]
presence = self.messages.presence_msg(account_name=self.user.username)
self.socket.send_msg(presence)
data_msg = self.socket.get_msg()
response = data_msg.get('alert', None)
msg = """<p style="margin-top:0px; margin-bottom:0px;
margin-left:0px; margin-right:0px;
-qt-block-indent:0; text-indent:0px;">
<span style="color:red;">SERVER#</span>
{}</p>""".format(response)
#msg = "<SERVER> {}".format(response)
#self.ui.chatWindow.append(msg)
self.ui.chatWindow.insertHtml(msg)
self.ui.username.setText(self.user.username)
contacts = data_msg.get('contacts', None)
if contacts:
self.contacts = contacts
self.get_contacts()
def acc_settings(self):
'''Параметры аккаунта'''
user_manager = UserManager()
acc = AccSetiingsDialog(self.user, parent=self)
dialog = acc.exec()
if dialog == QtWidgets.QDialog.Accepted:
user_manager.create_avatar(acc.fname['fname'], self.user)
self.set_avatar()
def set_avatar(self):
'''Уставить аватар'''
if self.user.avatar is None:
self.ui.avatar.setPixmap(QtGui.QPixmap(get_path("icon.png")))
else:
self.ui.avatar.setPixmap(QtGui.QPixmap(self.user.avatar))
def get_help(self):
'''Выводит диалог с описанием чата'''
hd = HelpDialog(parent=self)
hd.exec()
def about(self):
'''Выводит диалог с описанием программы'''
ab = AboutDialog(parent=self)
ab.exec()
def get_chat(self):
'''Метод для вызова в потоке'''
return self.ui.chatWindow
def get_contacts(self):
'''Получить контакты'''
self.ui.contactListWidget.clear()
for contact in self.contacts:
item = QtWidgets.QListWidgetItem()
item.setIcon(self.icon_contact)
item.setText(contact)
self.ui.contactListWidget.addItem(item)
def add_contact(self):
'''добавление контакта'''
contact_name, ok = QtWidgets.QInputDialog.getText(self,
'Новый контакт',
'Имя(nickname):')
if ok:
add_contact = self.messages.edit_contact('add',
self.user.username,
contact_name)
self.socket.send_msg(add_contact)
self.contacts.append(contact_name)
self.get_contacts()
def del_contact(self):
'''удаление контакта'''
item = self.ui.contactListWidget.currentIndex()
contact_name = item.data()
del_contact = self.messages.edit_contact('del',
self.user.username,
contact_name)
self.socket.send_msg(del_contact)
self.contacts.remove(contact_name)
self.get_contacts()
def add_privat(self):
'''по двойному клику устанавливает команду на приватное сообщение'''
name = self.ui.contactListWidget.currentIndex().data()
self.ui.chatText.setStyleSheet('color: purple')
msg = self.ui.chatText.setText('/ш {}'.format(name))
def send(self):
'''Отправка сообщения!'''
# Нужно переделать с broadcast на '#all'!!!!!!!!!!
action = 'broadcast'
to = '#all'
name=self.user.username
msg = self.ui.chatText.text()
if msg.startswith('/ш'):
action = 'msg'
line = msg.split()
to = line[1]
msg = ' '.join(line[2:])
message = self.messages.get_user_msg(action, msg, to=to, name=name)
self.socket.send_msg(message)
show_msg = """<p><span style="color:green;">Вы#</span>
{}</p>""".format(msg)
self.ui.chatWindow.append(show_msg)
self.ui.chatText.clear()
self.ui.chatText.setStyleSheet('color: black')
def quit(self):
'''посылает сообщение серверу об отключение клиента и выходит'''
message = self.messages.action(username=self.user.username)
self.socket.send_msg(message)
if self.thread:
self.thread.exit()
self.close() | PypiClean |
/FetchCord-2.7.7.tar.gz/FetchCord-2.7.7/fetch_cord/run_rpc.py |
from typing import Callable, Dict
from pypresence import Presence, exceptions
import time, sys
# import info about system
from .args import parse_args
from .config import ConfigError, load_config
from .computer.Computer import Computer
args = parse_args()
class Run_rpc:
rpcs: Dict[str, Presence]
config: Dict
loops: Dict[str, Callable[['Run_rpc', str, Computer], None]] # Cannot use Run_rpc for type hinting unless doing the __future__.annotations import
loops_indexes: Dict[int, str]
poll_rate: int
update: Callable
def __init__(self):
self.rpcs = {}
try:
self.config = load_config()
except ConfigError as e:
print("Error loading config file, using default values." % str(e))
def set_loop(
self, loops: Dict, loops_indexes: Dict, update: Callable, poll_rate: int = 3
):
self.loops = loops
self.loops_indexes = loops_indexes
self.poll_rate = poll_rate
self.update = update
def run_loop(self, computer: Computer):
try:
loop = 0
while True:
for i in range(len(self.loops_indexes)):
if loop == self.poll_rate:
self.update()
loop = 0
try:
client_id, func = self.loops[self.loops_indexes[i]]
if args.debug:
print(self.rpcs)
print(
"{} not in : {}".format(
self.loops_indexes[i],
self.loops_indexes[i] not in self.rpcs,
)
)
if self.loops_indexes[i] not in self.rpcs:
self.rpcs[self.loops_indexes[i]] = Presence(client_id)
self.try_connect(self.loops_indexes[i])
func(self, self.loops_indexes[i], computer)
loop += 1
except ConnectionResetError:
self.try_connect(self.loops_indexes[i])
except KeyboardInterrupt:
print("Closing connection.")
sys.exit(0)
def try_connect(self, key: str):
while True:
try:
if args.debug:
print('try_connect(key="{}") on {}'.format(key, self.rpcs[key]))
self.rpcs[key].connect()
break
except ConnectionRefusedError:
print(
"RPC connection refused (is Discord open?); trying again in 30 seconds"
)
time.sleep(30)
def try_clear(self, key: str):
# Pypresence clear doesn't work anymore
# try:
# if args.debug:
# print(
# "[key={}] try_clear(pid={} on {}".format(
# key, os.getpid(), self.rpcs[key]
# )
# )
# self.rpcs[key].clear(pid=os.getpid())
# except exceptions.InvalidID:
# pass
# except exceptions.ServerError as e:
# print(e)
# pass
self.rpcs[key].close()
def try_update(
self,
key: str,
state,
details,
large_image,
large_text,
small_image,
small_text,
start,
):
try:
if args.debug:
print('try_update(key="{}") on {}'.format(key, self.rpcs[key]))
self.rpcs[key].update(
state=state,
details=details,
large_image=large_image,
large_text=large_text,
small_image=small_image,
small_text=small_text,
start=start,
)
# ConnectionResetError is here to avoid crashing if Discord is still just starting
except (ConnectionResetError, exceptions.InvalidID):
pass | PypiClean |
/FairDynamicRec-0.0.123-py3-none-any.whl/fair_dynamic_rec/core/rankers/linear_submodular_bandit_2.py | import numpy as np
from .abstract_ranker import AbstractRanker
class LSB1(AbstractRanker):
def __init__(self, config, dataObj, parameters=None):
super(LSB1, self).__init__(config, dataObj)
self.n_samples = np.zeros(dataObj.n_users)
self.n_clicks = np.zeros(dataObj.n_users)
self.dim = self.dataObj.feature_data['train_item_topical_features'].shape[1]
self.prng = np.random.RandomState(seed=config.seed)
self.alpha = float(parameters["alpha"]["value"])
self.sigma = float(parameters["sigma"]["value"])
# self.t = 1
# self.seed = seed
# parameters
self.ill_matrix_counter = 0
self.theta = np.ones((self.dataObj.n_users, self.dim)) # d-dimensional
self.b = np.zeros(self.dim) # d
self.M = np.eye(self.dim) # d by d
self.MInv = np.eye(self.dim) # for fast matrix inverse computation, d by d
# for ill inverse
self.b_tmp = np.zeros(self.dim)
self.MInv_tmp = np.zeros((self.dim, self.dim))
self.batch_features = None
def get_ranking(self, batch_users, sampled_item=None, round=None):
"""
:param x: features
:param k: number of positions
:return: ranking: the ranked item id.
delta: the conditional topic coverage of each item. Eq. (3) of NIPS 11 paper.
"""
# assert x.shape[0] >= k
rankings = np.zeros((len(batch_users), self.config.list_size), dtype=int)
self.batch_features = np.zeros((len(batch_users), self.config.list_size, self.dim))
tie_breaker = self.prng.rand(len(self.dataObj.feature_data['train_item_topical_features']))
for i in range(len(batch_users)):
coverage = np.zeros(self.dim)
ranking = []
for j in range(self.config.list_size):
# Line 8 - 11 of Nips 11
gain_in_topic_coverage = self.conditional_coverage(x=self.dataObj.feature_data['train_item_topical_features'], coverage=coverage)
cb = self.alpha * np.sqrt(np.multiply(np.dot(gain_in_topic_coverage, self.MInv), gain_in_topic_coverage).sum(axis=1))
score = np.dot(gain_in_topic_coverage, self.theta[batch_users[i]])
ucb = score + cb + 1e-6 * tie_breaker
winner = np.argmax(ucb)
while winner in ranking:
ucb[winner] = -np.inf
winner = np.argmax(ucb)
ranking.append(winner)
self.batch_features[i][j] = gain_in_topic_coverage[winner]
coverage = self.ranking_coverage(self.dataObj.feature_data['train_item_topical_features'][ranking])
rankings[i] = np.asarray(ranking)
return rankings
def update(self, batch_users, rankings, clicks):
for i in range(len(batch_users)):
_clicks, _batch_features = self.__collect_feedback(clicks, i)
"""
This is for computing self.theta (Line 3-5 of Alogirthm 1 of NIPS 11)
For fast matrix inverse, we use Woodbury matrix identity (https://en.wikipedia.org/wiki/Woodbury_matrix_identity)
Return: self.theta is updated.
"""
# for the inverse of M, feature matrix
# x * m^-1 * x^T
xmx = np.dot(_batch_features, np.dot(self.MInv, _batch_features.T))
# (1/sigma I + xmx)^-1
try:
tmp_inv = np.linalg.inv(1 / self.sigma * np.eye(len(_batch_features)) + xmx)
except np.linalg.LinAlgError:
# for the ill matrix. if the matrix is not invertible, we ignore this update
self.ill_matrix_counter += 1
return
# m^-1*x^T
MInv_xT = self.MInv.dot(_batch_features.T)
# MInv_xT*tmp_inv*MInv_xT^T
self.MInv_tmp = np.dot(np.dot(MInv_xT, tmp_inv), MInv_xT.T)
# MInv - the new part
self.MInv -= self.MInv_tmp
self.M += self.sigma * _batch_features.T.dot(_batch_features)
# for b: feedback
self.b_tmp = np.dot(_clicks, _batch_features)
self.b += self.b_tmp
# for parameter theta
self.theta[batch_users[i]] = np.dot(self.MInv, self.b)
# self.theta[self.theta < 0] = 0
self.n_samples[batch_users[i]] += len(_clicks)
self.n_clicks[batch_users[i]] += sum(_clicks)
def __collect_feedback(self, clicks, batch_user_id):
"""
:param y:
:return: the last observed position.
"""
# With Cascade assumption, only the first click counts.
if self.config.feedback_model == 'cascade':
if np.sum(clicks[batch_user_id]) == 0:
return clicks[batch_user_id], self.batch_features[batch_user_id]
first_click = np.where(clicks[batch_user_id])[0][0]
return clicks[batch_user_id][:first_click + 1], self.batch_features[batch_user_id][:first_click + 1]
elif self.config.feedback_model == 'dcm':
if np.sum(clicks[batch_user_id]) == 0:
return clicks[batch_user_id], self.batch_features[batch_user_id]
last_click = np.where(clicks[batch_user_id])[0][-1]
return clicks[batch_user_id][:last_click + 1], self.batch_features[batch_user_id][:last_click + 1]
# all items are observed
else:
return clicks[batch_user_id], self.batch_features[batch_user_id] | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/utils/cache.py | import time
from collections import defaultdict
from django.conf import settings
from django.core.cache import caches
from django.http import HttpResponse, HttpResponseNotModified
from django.utils.crypto import md5
from django.utils.http import http_date, parse_etags, parse_http_date_safe, quote_etag
from django.utils.log import log_response
from django.utils.regex_helper import _lazy_re_compile
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = _lazy_re_compile(r"\s*,\s*")
def patch_cache_control(response, **kwargs):
"""
Patch the Cache-Control header by adding all keyword arguments to it.
The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split("=", 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(*t):
if t[1] is True:
return t[0]
else:
return "%s=%s" % (t[0], t[1])
cc = defaultdict(set)
if response.get("Cache-Control"):
for field in cc_delim_re.split(response.headers["Cache-Control"]):
directive, value = dictitem(field)
if directive == "no-cache":
# no-cache supports multiple field names.
cc[directive].add(value)
else:
cc[directive] = value
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if "max-age" in cc and "max_age" in kwargs:
kwargs["max_age"] = min(int(cc["max-age"]), kwargs["max_age"])
# Allow overriding private caching and vice versa
if "private" in cc and "public" in kwargs:
del cc["private"]
elif "public" in cc and "private" in kwargs:
del cc["public"]
for k, v in kwargs.items():
directive = k.replace("_", "-")
if directive == "no-cache":
# no-cache supports multiple field names.
cc[directive].add(v)
else:
cc[directive] = v
directives = []
for directive, values in cc.items():
if isinstance(values, set):
if True in values:
# True takes precedence.
values = {True}
directives.extend([dictvalue(directive, value) for value in values])
else:
directives.append(dictvalue(directive, values))
cc = ", ".join(directives)
response.headers["Cache-Control"] = cc
def get_max_age(response):
"""
Return the max-age from the response Cache-Control header as an integer,
or None if it wasn't found or wasn't an integer.
"""
if not response.has_header("Cache-Control"):
return
cc = dict(
_to_tuple(el) for el in cc_delim_re.split(response.headers["Cache-Control"])
)
try:
return int(cc["max-age"])
except (ValueError, TypeError, KeyError):
pass
def set_response_etag(response):
if not response.streaming and response.content:
response.headers["ETag"] = quote_etag(
md5(response.content, usedforsecurity=False).hexdigest(),
)
return response
def _precondition_failed(request):
response = HttpResponse(status=412)
log_response(
"Precondition Failed: %s",
request.path,
response=response,
request=request,
)
return response
def _not_modified(request, response=None):
new_response = HttpResponseNotModified()
if response:
# Preserve the headers required by RFC 9110 Section 15.4.5, as well as
# Last-Modified.
for header in (
"Cache-Control",
"Content-Location",
"Date",
"ETag",
"Expires",
"Last-Modified",
"Vary",
):
if header in response:
new_response.headers[header] = response.headers[header]
# Preserve cookies as per the cookie specification: "If a proxy server
# receives a response which contains a Set-cookie header, it should
# propagate the Set-cookie header to the client, regardless of whether
# the response was 304 (Not Modified) or 200 (OK).
# https://curl.haxx.se/rfc/cookie_spec.html
new_response.cookies = response.cookies
return new_response
def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Only return conditional responses on successful requests.
if response and not (200 <= response.status_code < 300):
return response
# Get HTTP request headers.
if_match_etags = parse_etags(request.META.get("HTTP_IF_MATCH", ""))
if_unmodified_since = request.META.get("HTTP_IF_UNMODIFIED_SINCE")
if_unmodified_since = if_unmodified_since and parse_http_date_safe(
if_unmodified_since
)
if_none_match_etags = parse_etags(request.META.get("HTTP_IF_NONE_MATCH", ""))
if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
if_modified_since = if_modified_since and parse_http_date_safe(if_modified_since)
# Evaluation of request preconditions below follows RFC 9110 Section
# 13.2.2.
# Step 1: Test the If-Match precondition.
if if_match_etags and not _if_match_passes(etag, if_match_etags):
return _precondition_failed(request)
# Step 2: Test the If-Unmodified-Since precondition.
if (
not if_match_etags
and if_unmodified_since
and not _if_unmodified_since_passes(last_modified, if_unmodified_since)
):
return _precondition_failed(request)
# Step 3: Test the If-None-Match precondition.
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
if request.method in ("GET", "HEAD"):
return _not_modified(request, response)
else:
return _precondition_failed(request)
# Step 4: Test the If-Modified-Since precondition.
if (
not if_none_match_etags
and if_modified_since
and not _if_modified_since_passes(last_modified, if_modified_since)
and request.method in ("GET", "HEAD")
):
return _not_modified(request, response)
# Step 5: Test the If-Range precondition (not supported).
# Step 6: Return original response since there isn't a conditional response.
return response
def _if_match_passes(target_etag, etags):
"""
Test the If-Match comparison as defined in RFC 9110 Section 13.1.1.
"""
if not target_etag:
# If there isn't an ETag, then there can't be a match.
return False
elif etags == ["*"]:
# The existence of an ETag means that there is "a current
# representation for the target resource", even if the ETag is weak,
# so there is a match to '*'.
return True
elif target_etag.startswith("W/"):
# A weak ETag can never strongly match another ETag.
return False
else:
# Since the ETag is strong, this will only return True if there's a
# strong match.
return target_etag in etags
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in RFC 9110 Section
13.1.4.
"""
return last_modified and last_modified <= if_unmodified_since
def _if_none_match_passes(target_etag, etags):
"""
Test the If-None-Match comparison as defined in RFC 9110 Section 13.1.2.
"""
if not target_etag:
# If there isn't an ETag, then there isn't a match.
return True
elif etags == ["*"]:
# The existence of an ETag means that there is "a current
# representation for the target resource", so there is a match to '*'.
return False
else:
# The comparison should be weak, so look for a match after stripping
# off any weak indicators.
target_etag = target_etag.strip("W/")
etags = (etag.strip("W/") for etag in etags)
return target_etag not in etags
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in RFC 9110 Section
13.1.3.
"""
return not last_modified or last_modified > if_modified_since
def patch_response_headers(response, cache_timeout=None):
"""
Add HTTP caching headers to the given HttpResponse: Expires and
Cache-Control.
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header("Expires"):
response.headers["Expires"] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Add headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(
response, no_cache=True, no_store=True, must_revalidate=True, private=True
)
def patch_vary_headers(response, newheaders):
"""
Add (or update) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". If headers
contains an asterisk, then "Vary" header will consist of a single asterisk
'*'. Otherwise, existing headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header("Vary"):
vary_headers = cc_delim_re.split(response.headers["Vary"])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = {header.lower() for header in vary_headers}
additional_headers = [
newheader
for newheader in newheaders
if newheader.lower() not in existing_headers
]
vary_headers += additional_headers
if "*" in vary_headers:
response.headers["Vary"] = "*"
else:
response.headers["Vary"] = ", ".join(vary_headers)
def has_vary_header(response, header_query):
"""
Check to see if the response has a given header name in its Vary header.
"""
if not response.has_header("Vary"):
return False
vary_headers = cc_delim_re.split(response.headers["Vary"])
existing_headers = {header.lower() for header in vary_headers}
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, add the current locale or time zone to the cache key."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += ".%s" % getattr(request, "LANGUAGE_CODE", get_language())
if settings.USE_TZ:
cache_key += ".%s" % get_current_timezone_name()
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Return a cache key from the headers given in the header list."""
ctx = md5(usedforsecurity=False)
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(value.encode())
url = md5(request.build_absolute_uri().encode("ascii"), usedforsecurity=False)
cache_key = "views.decorators.cache.cache_page.%s.%s.%s.%s" % (
key_prefix,
method,
url.hexdigest(),
ctx.hexdigest(),
)
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Return a cache key for the header cache."""
url = md5(request.build_absolute_uri().encode("ascii"), usedforsecurity=False)
cache_key = "views.decorators.cache.cache_header.%s.%s" % (
key_prefix,
url.hexdigest(),
)
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method="GET", cache=None):
"""
Return a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there isn't a headerlist stored, return None, indicating that the page
needs to be rebuilt.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learn what headers to take into account for some request URL from the
response object. Store those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header("Vary"):
is_accept_language_redundant = settings.USE_I18N
# If i18n is used, the generated cache key will be suffixed with the
# current locale. Adding the raw value of Accept-Language is redundant
# in that case and would result in storing the same content under
# multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response.headers["Vary"]):
header = header.upper().replace("-", "_")
if header != "ACCEPT_LANGUAGE" or not is_accept_language_redundant:
headerlist.append("HTTP_" + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split("=", 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True | PypiClean |
/NetworkSim-0.2.2.tar.gz/NetworkSim-0.2.2/examples/.ipynb_checkpoints/simulation_wrapper-checkpoint.ipynb | ```
from NetworkSim.simulation.setup.simulator import Simulator
from NetworkSim.architecture.setup.model import Model
from NetworkSim.architecture.base.network import Network
model = Model(network=Network(num_nodes=5))
simulator = Simulator(model=model, until=1000)
simulator.initialise()
simulator.RAM
simulator.run()
simulator.model.control_ring.packet_record_df
simulator.model.data_rings[0].packet_record_df
simulator.receiver[1].received_control_packet_df
simulator.receiver[1].received_data_packet_df
simulator.receiver[1].queue_df
simulator.model.get_data_packet_duration()
simulator.latency_df
simulator.latency_df.plot(y='Latency')
```
| PypiClean |
/Febiss-0.9.0.tar.gz/Febiss-0.9.0/febiss/utilities/io_handling.py | __copyright__ = """
This code is licensed under the MIT license.
Copyright University Innsbruck, Institute for General, Inorganic, and Theoretical Chemistry, Podewitz Group
See LICENSE for details
"""
from typing import Union
import numpy as np
from febiss.utilities.structures import *
def read_pdb(pdb, solute: Solute, water: Water):
with open(pdb, 'r') as f:
for line in f:
if 'HETATM' in line:
row = line.split()
water.elements.append(row[-1])
water.atoms.append(np.array([float(r) for r in row[-6:-3]]))
if row[-1] == "O":
water.values.append(-1 * float(row[-2]))
water.all_values.append(-1 * float(row[-2]))
water.all_values.append(-1 * float(row[-2]))
water.all_values.append(-1 * float(row[-2]))
elif row[-1] != 'H':
raise NotImplementedError("ERROR: NON-WATER HETATM present in pdb file")
elif 'ATOM' in line:
row = line.split()
solute.elements.append(row[-1])
solute.atoms.append(np.array([float(r) for r in row[-6:-3]]))
solute.values.append(0.0)
solute.atoms = np.asarray(solute.atoms)
solute.determine_polar_hydrogen_and_non_hydrogen()
water.atoms = np.asarray(water.atoms)
water.sort_by_value()
def write_pdb(pdb: str, structure: Union[Solute, Water], solute: bool = False):
if solute:
atomcounter = 1
f = open(pdb, 'w')
else:
atomcounter = len(open(pdb, 'r').readlines()) + 1
f = open(pdb, 'a')
for count, (ele, atom) in enumerate(zip(structure.elements, structure.atoms)):
j = []
if solute:
j.append('ATOM'.ljust(6)) # atom#6s
else:
j.append('HETATM'.ljust(6)) # atom#6s
j.append(str(atomcounter + count).rjust(5)) # aomnum#5d
j.append(ele.center(4)) # atomname$#4s
if solute:
j.append('SOL'.ljust(3)) # resname#1s
else:
j.append('FEB'.ljust(3)) # resname#1s
j.append('A'.rjust(1)) # Astring
if solute:
j.append('1'.rjust(4)) # resnum
else:
j.append('2'.rjust(4)) # resnum
j.append(str('%8.3f' % (float(atom[0]))).rjust(8)) # x
j.append(str('%8.3f' % (float(atom[1]))).rjust(8)) # y
j.append(str('%8.3f' % (float(atom[2]))).rjust(8)) # z
j.append(str('%6.2f' % 1.0).rjust(6)) # occ
value = float(structure.values[count])
if value == 0.0:
j.append(str('%7.2f' % value).ljust(7)) # delta G
else:
j.append(str('%7.2f' % (-1 * value)).ljust(7)) # delta G
j.append(ele.rjust(12)) # elname
f.write("%s%s %s %s %s%s %s%s%s%s%s%s\n" % (
j[0], j[1], j[2], j[3], j[4], j[5], j[6], j[7], j[8], j[9], j[10], j[11]))
f.close()
def write_style_file() -> str:
filename = 'style.pml'
with open(filename, 'w') as f:
f.write('hide everything\n')
f.write('show sticks\n')
f.write('set stick_radius, .15\n')
f.write('set sphere_scale, .18\n')
f.write('set sphere_scale, .13, elem H\n')
f.write('set bg_rgb=[1, 1, 1]\n')
f.write('set stick_quality, 50\n')
f.write('set sphere_quality, 4\n')
f.write('color gray35, elem C\n')
f.write('color red, elem O\n')
f.write('color blue, elem N\n')
f.write('color gray98, elem H\n')
f.write('set ray_texture, 2\n')
f.write('set antialias, 3\n')
f.write('set ambient, 0.5\n')
f.write('set spec_count, 5\n')
f.write('set shininess, 50\n')
f.write('set specular, 1\n')
f.write('set reflect, .1\n')
f.write('set cartoon_ring_finder, 4\n')
f.write('set cartoon_ring_mode,1\n')
f.write('set cartoon_ring_transparency, 0.6\n')
f.write('set cartoon_ring_color, black\n')
f.write('show cartoon\n')
f.write('set h_bond_cutoff_center, 3.5\n')
f.write('set h_bond_cutoff_edge, 3.5\n')
f.write('set h_bond_max_angle, 135\n')
f.write('set dash_gap, .25\n')
f.write('set dash_length, .02\n')
f.write('set dash_round_ends, 1\n')
f.write('set dash_radius, .05\n')
f.write('set opaque_background, off\n')
f.write('set stick_h_scale, 1\n')
f.write('set label_digits, 2\n')
f.write('label ele o and resn FEB, b\n')
f.write('select solute, not resn "FEB"\n')
f.write('select waterMolecules, resn "FEB"\n')
f.write('distance solute-water, solute, waterMolecules, cutoff=3.2, mode=2\n')
f.write('set dash_color, green\n')
f.write('spectrum b, magenta_white_yellow, ele o and resn FEB\n')
f.write('hide labels, solute-water\n')
f.write('center\n')
return filename | PypiClean |
/Botic-1.1.4.tar.gz/Botic-1.1.4/botic/trader/hodlstoploss.py | import time
from decimal import Decimal
import typing as t
import datetime
from .base import BaseTrader
from ..util import str2bool, parse_datetime
from ..exchange.exceptions import ExchangeSellLimitError
class HodlStopLoss(BaseTrader):
"""HodlStopLoss trader"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=attribute-defined-outside-init
# pylint: disable=no-member
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
# pylint: disable=too-many-function-args
def __init__(self, config) -> None:
super().__init__(config)
self.usd_decimal_places = None
self.size_decimal_places = None
self.current_price = None
self.current_price_target = None
self.taker_fee = None
self.maker_fee = None
self.usd_volume = None
self.product_info = None
self.current_price_increase = None
self.wallet = None
self.can_buy = False
self._rate_limit_log = time.time()
self._last_buy_time = None
self._hodl_value = None
def configure(self) -> None:
self.max_outstanding_sells = int(self.max_outstanding_sells)
self.max_buys_per_hour = int(self.max_buys_per_hour)
self.sell_target = Decimal(self.sell_target)/100
self.buy_barrier = Decimal(self.buy_barrier)/100
self.buy_percent = Decimal(self.buy_percent)/100
self.buy_max = Decimal(self.buy_max)
self.buy_min = Decimal(self.buy_min)
self.stoploss_enable = str2bool(self.stoploss_enable)
self.stoploss_percent = Decimal(self.stoploss_percent)/100
self.stoploss_seconds = int(self.stoploss_seconds)
self.stoploss_strategy = str(self.stoploss_strategy)
def _time2datetime(self) -> datetime.datetime:
return datetime.datetime.fromtimestamp(self.exchange.get_time())
def run_trading_algorithm(self) -> None:
self.product_info = self.exchange.get_product_info()
self.current_price = self.exchange.get_price()
self.maker_fee, self.taker_fee, self.usd_volume = self.exchange.get_fees()
self.size_decimal_places, self.usd_decimal_places = self.exchange.get_precisions()
self.wallet = self.exchange.get_usd_wallet()
self._get_current_price_target()
self.can_buy = self._check_if_can_buy()
self._maybe_buy_sell()
self._check_sell_orders()
hold_value = self.exchange.get_hold_value()
if not self._hodl_value:
self._hodl_value = self.wallet / self.current_price
if time.time() - self._rate_limit_log > 0.5:
self._rate_limit_log = time.time()
total_value = hold_value + self.wallet
self.logit(
'wallet:{:2f} open:{} price:{} coins:{:.4f} held:{} canbuy:{} total-value:{} hodl-value:{}'.format(
self.wallet, self._total_open_orders, self.current_price, self.exchange._coins, hold_value,
self.can_buy, total_value,
round(self._hodl_value * self.current_price)),
custom_datetime=self._time2datetime())
@property
def _total_open_orders(self) -> int:
total = 0
for _, order in self.data.items():
if not order['completed'] and order['sell_order']:
total += 1
return total
@property
def _total_sells_in_past_hour(self) -> int:
current_time = self.exchange.get_time()
last_hour_time = current_time - (60 * 60)
total = 0
for _, order in self.data.items():
if order['time'] >= last_hour_time:
total += 1
return total
def _get_current_price_target(self) -> Decimal:
current_percent_increase = (self.maker_fee + self.taker_fee) + (self.sell_target)
self.current_price_target = round(
self.current_price * current_percent_increase + self.current_price,
self.usd_decimal_places
)
self.current_price_increase = self.current_price * current_percent_increase
return self.current_price_target
def _check_if_can_buy(self) -> bool:
"""Check orders if a sell price is <= current_price_target.
If so, this means no buy is allowed until that order is filled or out of range.
Only allow within the fee range though to keep buy/sells further apart.
"""
can = True
self._get_current_price_target()
current_time = self.exchange.get_time()
if self._last_buy_time and current_time - self._last_buy_time < 86400:
can = False
# Check how many buys were placed in past hour and total open
if self._total_sells_in_past_hour > self.max_buys_per_hour:
self.logit('WARNING: max_buys_per_hour({}) hit'.format(self.max_buys_per_hour),
custom_datetime=self._time2datetime())
return False
# Don't count other orders now, only ones being tracked here
# if len(self.open_sells) >= self.max_outstanding_sells:
if self._total_open_orders >= self.max_outstanding_sells:
self.logit('WARNING: max_outstanding_sells hit ({} of {})'.format(
self._total_open_orders, self.max_outstanding_sells),
custom_datetime=self._time2datetime()
)
return False
for _, order in self.data.items(): # self.open_sells:
if order['completed']:
continue
sell_order = order['sell_order']
if not sell_order:
continue
if not 'price' in sell_order:
continue
sell_price = Decimal(sell_order['price'])
fees = self.maker_fee + self.taker_fee
barrier = self.buy_barrier
adjusted_sell_price = round(
sell_price - ((Decimal(barrier) + fees) * sell_price),
self.usd_decimal_places
)
if adjusted_sell_price <= self.current_price_target:
can = False
break
return can
def _maybe_buy_sell(self) -> None:
assert self.wallet is not None, 'Wallet must be set.'
assert self.current_price is not None, 'Current price must be set.'
if not self.can_buy:
return
# Check if USD wallet has enough available
if self.wallet < Decimal(self.product_info.min_market_funds):
return
# Calculate & check if size is big enough (sometimes its not if wallet is too small)
buy_amount = round(
Decimal(self.buy_percent) * Decimal(self.wallet), self.usd_decimal_places
)
buy_size = round(Decimal(buy_amount) / self.current_price, self.size_decimal_places)
if buy_size <= self.product_info.base_min_size:
buy_amount = self.buy_min
buy_size = round(Decimal(buy_amount) / self.current_price, self.size_decimal_places)
# Check if USD wallet has enough available
if buy_amount < Decimal(self.product_info.min_market_funds):
self.logit('WARNING: Buy amount too small (<${}): {}'.format(
self.product_info.min_market_funds, buy_amount),
custom_datetime=self._time2datetime()
)
buy_amount = self.buy_min
buy_size = round(Decimal(buy_amount) / self.current_price, self.size_decimal_places)
self.logit('DEFAULT_BUY_SIZE_TO_MIN: {} {}'.format(buy_amount, buy_size),
custom_datetime=self._time2datetime())
# Make sure buy_amount is within buy_min/max
if buy_amount < self.buy_min:
self.logit('WARNING: buy_min hit. Setting to min.',
custom_datetime=self._time2datetime())
buy_amount = self.buy_min
elif buy_amount > self.buy_max:
self.logit('WARNING: buy_max hit. Setting to max.',
custom_datetime=self._time2datetime())
buy_amount = self.buy_max
if Decimal(self.wallet) < Decimal(self.buy_min):
return
# adjust size to fit with fee
buy_size = round(
Decimal(buy_size) - Decimal(buy_size) * Decimal(self.taker_fee),
self.size_decimal_places
)
self.logit('BUY: price:{} amount:{} size:{}'.format(
self.current_price, buy_amount, buy_size),
custom_datetime=self._time2datetime()
)
response = self.exchange.buy_market(buy_amount)
self.logit('BUY-RESPONSE: {}'.format(response), custom_datetime=self._time2datetime())
if 'message' in response:
self.logit('WARNING: Failed to buy', custom_datetime=self._time2datetime())
return
order_id = response['id']
errors = 0
self.last_buy = None
# Wait until order is completely filled
if order_id in self.data:
self.logit('ERROR: order_id exists in data. ????: {}'.format(order_id),
custom_datetime=self._time2datetime())
self.data[order_id] = {
'first_status': response, 'last_status': None, 'time': self.exchange.get_time(),
'sell_order': None, 'sell_order_completed': None,
'completed': False, 'profit_usd': None
}
self.write_data()
done = False
status_errors = 0
buy = {}
while 1:
try:
buy = self.exchange.get_order(order_id)
self.data[order_id]['last_status'] = buy
self.write_data()
if 'settled' in buy:
if buy['settled']:
self.logit('FILLED: size:{} funds:{}'.format(
buy['filled_size'], buy['funds']),
custom_datetime=self._time2datetime())
self.last_buy = buy
done = True
break
else:
self._handle_failed_order_status(order_id, buy, status_errors)
status_errors += 1
if status_errors > 10:
errors += 1
except Exception as err:
self.logit('WARNING: get_order() failed:' + str(err),
custom_datetime=self._time2datetime())
errors += 1
time.sleep(10)
if errors > 5:
self.logit('WARNING: Failed to get order. Manual intervention needed.: {}'.format(
order_id),
custom_datetime=self._time2datetime())
break
time.sleep(2)
# Buy order done, now place sell
if done:
msg = 'BUY-FILLED: size:{} funds:{}\n'.format(buy['filled_size'], buy['funds'])
self.logit(msg, custom_datetime=self._time2datetime())
self.write_data()
self.last_buy = None
self._last_buy_time = self.exchange.get_time()
else:
# buy was placed but could not get order status
if 'message' in buy:
msg = 'BUY-PLACED-NOSTATUS: {}\n'.format(buy['message'])
else:
msg = 'BUY-PLACED-NOSTATUS: size:{} funds:{}\n'.format(
buy['filled_size'], buy['funds'])
self.logit(msg, custom_datetime=self._time2datetime())
self.send_email('BUY-ERROR', msg=msg)
def _handle_failed_order_status(self, order_id: str, status: t.Mapping[str, t.Any]) -> None:
if 'message' in status:
self.logit('WARNING: Failed to get order status: {}'.format(status['message']),
custom_datetime=self._time2datetime())
self.logit(
'WARNING: Order status error may be temporary, due to coinbase issues or exchange '
'delays. Check: https://status.pro.coinbase.com',
custom_datetime=self._time2datetime()
)
else:
self.logit('WARNING: Failed to get order status: {}'.format(order_id),
custom_datetime=self._time2datetime())
time.sleep(10)
def _run_stoploss(self, buy_order_id: t.AnyStr) -> None:
""" Cancel sell order, place new market sell to fill immediately
get response and update data
"""
print('-----------------------')
print('STOPLOSS_FOR: {}'.format(buy_order_id))
print('-----------------------')
info = self.data[buy_order_id]
sell = info['first_status']
# cancel
#response = self.exchange.cancel(sell['id'])
#self.logit('STOPLOSS: CANCEL-RESPONSE: {}'.format(response),
# custom_datetime=self._time2datetime())
# new order
response = self.exchange.sell_market(sell['filled_size'])
self.data[buy_order_id]['sell_order'] = response
self.write_data()
self.logit('STOPLOSS: SELL-RESPONSE: {}'.format(response),
custom_datetime=self._time2datetime())
order_id = response['id']
done = False
errors = 0
status_errors = 0
while 1:
try:
status = self.exchange.get_order(order_id)
self.data[buy_order_id]['sell_order'] = status
self.write_data()
if 'settled' in status:
if status['settled']:
self.logit('SELL-FILLED: {}'.format(status),
custom_datetime=self._time2datetime())
self.data[buy_order_id]['sell_order_completed'] = status
self.data[buy_order_id]['completed'] = True
self.write_data()
done = True
break
else:
self.handle_failed_order_status(order_id, status)
status_errors += 1
if status_errors > 10:
errors += 1
except Exception as err:
self.logit('WARNING: get_order() failed:' + str(err),
custom_datetime=self._time2datetime())
errors += 1
time.sleep(8)
if errors > 5:
self.logit('WARNING: Failed to get order. Manual intervention needed.: {}'.format(
order_id),
custom_datetime=self._time2datetime())
break
time.sleep(2)
if not done:
self.logit(
'ERROR: Failed to get_order() for stoploss. This is a TODO item on how to handle',
custom_datetime=self._time2datetime()
)
def _check_sell_orders(self) -> None:
""" Check if any sell orders have completed """
# pylint: disable=too-many-locals
# pylint: disable=bare-except
for buy_order_id, info in self.data.items():
if self.data[buy_order_id]['completed']:
continue
#if not info['sell_order']:
# continue
if 'sell_order' in info and info['sell_order'] and 'message' in info['sell_order']:
self.logit(
'WARNING: Corrupted sell order, mark as done: {}'.format(info['sell_order']),
custom_datetime=self._time2datetime())
self.data[buy_order_id]['completed'] = True
self.data[buy_order_id]['sell_order'] = None
self.write_data()
self.send_email('SELL-CORRUPTED',
msg='WARNING: Corrupted sell order, mark as done: {}'.format(
info['sell_order'])
)
time.sleep(3600)
continue
if 'sell_order' in info and info['sell_order']:
sell = self.exchange.get_order(info['sell_order']['id'])
if 'message' in sell:
self.logit('WARNING: Failed to get sell order status (retrying later): {}'.format(
sell['message']), custom_datetime=self._time2datetime())
if self.exchange.get_time() - info['time'] > 60 * 60 * 2:
self.logit('WARNING: Failed to get order status:',
custom_datetime=self._time2datetime())
self.logit('WARNING: Writing as done/error since it has been > 2 hours.',
custom_datetime=self._time2datetime())
self.data[buy_order_id]['completed'] = True
self.write_data()
continue
if 'status' in sell and sell['status'] != 'open':
# calculate profit from buy to sell
# done, remove buy/sell
self.data[buy_order_id]['completed'] = True
self.data[buy_order_id]['sell_order_completed'] = sell
if sell['status'] == 'done':
try:
first_time = self.data[buy_order_id]['first_status']['created_at']
except:
first_time = None
sell_value = Decimal(sell['executed_value'])
#sell_filled_size = Decimal(sell['filled_size'])
#buy_filled_size = Decimal(info['last_status']['filled_size'])
buy_value = Decimal(info['last_status']['executed_value'])
buy_sell_diff = round(sell_value - buy_value, 2)
if first_time:
done_at = time.mktime(
time.strptime(parse_datetime(first_time), '%Y-%m-%dT%H:%M:%S'))
else:
done_at = time.mktime(
time.strptime(parse_datetime(sell['done_at']), '%Y-%m-%dT%H:%M:%S'))
self.data[buy_order_id]['profit_usd'] = buy_sell_diff
msg = 'SOLD: duration:{:.2f} bought:{} sold:{} profit:{}'.format(
self.exchange.get_time() - done_at,
round(buy_value, 2),
round(sell_value, 2),
buy_sell_diff
)
self.logit(msg, custom_datetime=self._time2datetime())
self.send_email('SOLD', msg=msg)
else:
self.logit('SOLD-WITH-OTHER-STATUS: {}'.format(sell['status']),
custom_datetime=self._time2datetime())
self.write_data()
if self.stoploss_enable:
created_at = time.mktime(
time.strptime(parse_datetime(info['first_status']['created_at']), '%Y-%m-%dT%H:%M:%S'))
duration = self.exchange.get_time() - created_at
bought_price = round(
Decimal(info['last_status']['executed_value']) /
Decimal(info['last_status']['filled_size']),
4
)
# oops, had this backwards
#(bought_price-self.current_price) / bought_price
percent_change = (self.current_price - bought_price) / bought_price
stop_seconds = False
stop_percent = False
if duration >= self.stoploss_seconds:
stop_seconds = True
if percent_change <= self.stoploss_percent:
stop_percent = True
if (stop_seconds or stop_percent) and self.stoploss_strategy == 'report':
self.logit('STOPLOSS: percent:{} duration:{}'.format(
percent_change, duration), custom_datetime=self._time2datetime())
if self.stoploss_strategy == 'both' and stop_percent and stop_seconds:
self.logit('STOPLOSS: strategy:{} percent:{} bought_price:{} cur_price:{} duration:{}'.format(
self.stoploss_strategy,
percent_change, bought_price, self.current_price,
duration,
), custom_datetime=self._time2datetime())
self._run_stoploss(buy_order_id)
elif self.stoploss_strategy == 'either' and (stop_percent or stop_seconds):
self.logit('STOPLOSS: strategy:{} percent:{} bought_price:{} cur_price:{} duration:{}'.format(
self.stoploss_strategy,
percent_change, bought_price, self.current_price,
duration,
), custom_datetime=self._time2datetime())
self._run_stoploss(buy_order_id) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/iron-resizable-behavior/.github/ISSUE_TEMPLATE.md | <!-- Instructions: https://github.com/PolymerElements/iron-resizable-behavior/CONTRIBUTING.md#filing-issues -->
### Description
<!-- Example: The `paper-foo` element causes the page to turn pink when clicked. -->
### Expected outcome
<!-- Example: The page stays the same color. -->
### Actual outcome
<!-- Example: The page turns pink. -->
### Live Demo
<!-- Example: https://jsbin.com/cagaye/edit?html,output -->
### Steps to reproduce
<!-- Example
1. Put a `paper-foo` element in the page.
2. Open the page in a web browser.
3. Click the `paper-foo` element.
-->
### Browsers Affected
<!-- Check all that apply -->
- [ ] Chrome
- [ ] Firefox
- [ ] Safari 9
- [ ] Safari 8
- [ ] Safari 7
- [ ] Edge
- [ ] IE 11
- [ ] IE 10
| PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/docs/releasenotes/0.6.7.rst | ===========================
Djblets 0.6.7 Release Notes
===========================
**Release date**: January 9, 2011
djblets.datagrid
================
* The datagrids now use a RequestContext when rendering cells, allowing
the columns or templates to access data from context processors.
djblets.siteconfig
==================
* The form body of a siteconfig settings page can now be replaced.
It's now stored in the "form_content" block.
* SiteConfigurationManager no longer crashes if trying to clear
the cache for a SiteConfiguration that no longer exists.
djblets.testing
===============
* The Selenium test suite has been updated to support Django 1.2's
multi-database support. Previously, fixtures would fail to load
if using the new ``settings.DATABASES`` variable.
djblets.util
============
* The ``@augment_method_from`` decorator wasn't properly calling up the
decorator chain, preventing some decorators from being invoked. This
has been fixed to ensure all decorators are called.
djblets.webapi
==============
* Due to the ``@augment_method_from`` breakage listed above, webapi
decorators could fail to add their own checks, causing various
problems in field checking and authentication. This is now fixed.
* The Permission Denied (HTTP 403) errors being returned weren't
sufficient for clients that weren't authenticated. Now, an
unauthenticated client will instead see Not Logged In (HTTP 401)
errors.
* The ``HTTP_AUTHORIZATION`` header is now checked on all requests. When
provided by the client, it will be used for authentication. This
means that clients can now force a login from their very first
request on, instead of requiring a HTTP 401 Unauthorized being
sent out first.
This will also prevent multiple logins across different requests
from the same client, when the ``HTTP_AUTHORIZATION`` header is passed
on each request. This makes requests less heavy-weight and prevents
the last_login timestamp on the User from being updated on each
request.
As part of this change, any webapps manually using the
``@webapi_login_required`` decorator without the new resource code
will no longer support HTTP Basic auth. However, this was never
a supported feature anyway, and was more there by accident.
* The ``api_format`` parameter in requests is now treated specially
and doesn't trigger any invalid attribute errors during field
validation.
* :py:meth:`WebAPIResource.delete` now uses get_object instead of fetching
the object directly, which simplifies the function and guarantees
that the correct object is used (especially when a resource
overrides ``get_object``).
* Redirects now preserve any special parameters (``callback``,
``_method``, ``expand``, and ``api_format``) passed to the request.
This works around problems in HTTP implementations that don't
allow the caller to know that redirects occurred (such as major
browsers), which would lead to this information being stripped and
the wrong results being returned.
* The ``expand`` parameter for expanding links in payloads is now
supported for POST and PUT requests.
Contributors
============
* Christian Hammond
* David Trowbridge
| PypiClean |
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/develop/aa_cnn_mnist.py | from aa_deep_learning.AADeepLearning import AADeepLearning
from aa_deep_learning.AADeepLearning.datasets import mnist
from aa_deep_learning.AADeepLearning.datasets import np_utils
import numpy as np
np.random.seed(0)
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 100,
# 每次用多少个样本训练
"batch_size": 16,
# 迭代多少次打印一次信息
"display": 10,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名
"name": "convolutional_1",
# 层类型,卷积层
"type": "convolutional",
# 卷积核个数
"kernel_number": 1,
# 卷积核高
"kernel_height": 2,
# 卷积核宽
"kernel_width": 2,
# 填充数,1:在图片最外层填充1圈0,2:填充2圈0,以此类推
"padding": 1,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1,
# 权重初始化 gaussian/xavier/msra
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型, 激活函数层
"type": "relu"
},
{
# 层名
"name": "pooling_1",
# 层类型,池化层
"type": "pooling",
# 模式 max(最大池化)/average(平均池化)
"mode": "max",
# 池化核高
"kernel_height": 2,
# 池化核宽
"kernel_width": 2,
# 滑动步长,1:水平或垂直方向滑动步长都为1,2:水平或垂直方向滑动步长都为2,以此类推
"stride": 1
},
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_2",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | PypiClean |
/Mantissa-0.9.0.tar.gz/Mantissa-0.9.0/xmantissa/webnav.py |
from epsilon.structlike import record
from zope.interface import implements
from nevow.inevow import IQ
from nevow import url
from nevow.stan import NodeNotFound
from xmantissa.ixmantissa import ITab
from xmantissa.fragmentutils import dictFillSlots
class TabMisconfiguration(Exception):
def __init__(self, info, tab):
Exception.__init__(
self,
"Inconsistent tab item factory information",
info, tab)
TabInfo = record('priority storeID children linkURL authoritative',
authoritative=None)
class Tab(object):
"""
Represent part or all of the layout of a single navigation tab.
@ivar name: This tab's name.
@type storeID: C{int}
@ivar storeID: The Axiom store identifier of the Item to which the user
should be directed when this tab is activated.
@ivar priority: A float between 0 and 1 indicating the relative ordering of
this tab amongst its peers. Higher priorities sort sooner.
@ivar children: A tuple of tabs beneath this one.
@ivar authoritative: A flag indicating whether this instance of the
conceptual tab with this name takes precedent over any other instance of
the conceptual tab with this name. It is an error for two instances of the
same conceptual tab to be authoritative.
@type linkURL: C{NoneType} or C{str}
@ivar linkURL: If not C{None}, the location to which the user should be
directed when this tab is activated. This will override whatever value
is supplied for C{storeID}.
"""
_item = None
implements(ITab)
def __init__(self, name, storeID, priority, children=(),
authoritative=True, linkURL=None):
self.name = name
self.storeID = storeID
self.priority = priority
self.children = tuple(children)
self.authoritative = authoritative
self.linkURL = linkURL
def __repr__(self):
return '<%s%s %r/%0.3f %r [%r]>' % (self.authoritative and '*' or '',
self.__class__.__name__,
self.name,
self.priority,
self.storeID,
self.children)
def __iter__(self):
raise TypeError("%r are not iterable" % (self.__class__.__name__,))
def __getitem__(self, key):
"""Retrieve a sub-tab from this tab by name.
"""
tabs = [t for t in self.children if t.name == key]
assert len(tabs) < 2, "children mis-specified for " + repr(self)
if tabs:
return tabs[0]
raise KeyError(key)
def pathFromItem(self, item, avatar):
"""
@param item: A thing that we linked to, and such.
@return: a list of [child, grandchild, great-grandchild, ...] that
indicates a path from me to the navigation for that item, or [] if
there is no path from here to there.
"""
for subnav in self.children:
subpath = subnav.pathFromItem(item, avatar)
if subpath:
subpath.insert(0, self)
return subpath
else:
myItem = self.loadForAvatar(avatar)
if myItem is item:
return [self]
return []
def getTabs(navElements):
# XXX TODO: multiple levels of nesting, this is hard-coded to 2.
# Map primary tab names to a TabInfo
primary = {}
# Merge tab information from all nav plugins into one big structure
for plg in navElements:
for tab in plg.getTabs():
if tab.name not in primary:
primary[tab.name] = TabInfo(
priority=tab.priority,
storeID=tab.storeID,
children=list(tab.children),
linkURL=tab.linkURL)
else:
info = primary[tab.name]
if info.authoritative:
if tab.authoritative:
raise TabMisconfiguration(info, tab)
else:
if tab.authoritative:
info.authoritative = True
info.priority = tab.priority
info.storeID = tab.storeID
info.linkURL = tab.linkURL
info.children.extend(tab.children)
# Sort the tabs and their children by their priority
def key(o):
return -o.priority
resultTabs = []
for (name, info) in primary.iteritems():
info.children.sort(key=key)
resultTabs.append(
Tab(name, info.storeID, info.priority, info.children,
linkURL=info.linkURL))
resultTabs.sort(key=key)
return resultTabs
def setTabURLs(tabs, webTranslator):
"""
Sets the C{linkURL} attribute on each L{Tab} instance
in C{tabs} that does not already have it set
@param tabs: sequence of L{Tab} instances
@param webTranslator: L{xmantissa.ixmantissa.IWebTranslator}
implementor
@return: None
"""
for tab in tabs:
if not tab.linkURL:
tab.linkURL = webTranslator.linkTo(tab.storeID)
setTabURLs(tab.children, webTranslator)
def getSelectedTab(tabs, forURL):
"""
Returns the tab that should be selected when the current
resource lives at C{forURL}. Call me after L{setTabURLs}
@param tabs: sequence of L{Tab} instances
@param forURL: L{nevow.url.URL}
@return: L{Tab} instance
"""
flatTabs = []
def flatten(tabs):
for t in tabs:
flatTabs.append(t)
flatten(t.children)
flatten(tabs)
forURL = '/' + forURL.path
for t in flatTabs:
if forURL == t.linkURL:
return t
flatTabs.sort(key=lambda t: len(t.linkURL), reverse=True)
for t in flatTabs:
if not t.linkURL.endswith('/'):
linkURL = t.linkURL + '/'
else:
linkURL = t.linkURL
if forURL.startswith(linkURL):
return t
def startMenu(translator, navigation, tag):
"""
Drop-down menu-style navigation view.
For each primary navigation element available, a copy of the I{tab}
pattern will be loaded from the tag. It will have its I{href} slot
filled with the URL for that navigation item. It will have its I{name}
slot filled with the user-visible name of the navigation element. It
will have its I{kids} slot filled with a list of secondary navigation
for that element.
For each secondary navigation element available beneath each primary
navigation element, a copy of the I{subtabs} pattern will be loaded
from the tag. It will have its I{kids} slot filled with a self-similar
structure.
@type translator: L{IWebTranslator} provider
@type navigation: L{list} of L{Tab}
@rtype: {nevow.stan.Tag}
"""
setTabURLs(navigation, translator)
getp = IQ(tag).onePattern
def fillSlots(tabs):
for tab in tabs:
if tab.children:
kids = getp('subtabs').fillSlots('kids', fillSlots(tab.children))
else:
kids = ''
yield dictFillSlots(getp('tab'), dict(href=tab.linkURL,
name=tab.name,
kids=kids))
return tag.fillSlots('tabs', fillSlots(navigation))
def settingsLink(translator, settings, tag):
"""
Render the URL of the settings page.
"""
return tag[translator.linkTo(settings.storeID)]
# This is somewhat redundant with startMenu. The selected/not feature of this
# renderer should be added to startMenu and then templates can just use that
# and this can be deleted.
def applicationNavigation(ctx, translator, navigation):
"""
Horizontal, primary-only navigation view.
For the navigation element currently being viewed, copies of the
I{selected-app-tab} and I{selected-tab-contents} patterns will be
loaded from the tag. For all other navigation elements, copies of the
I{app-tab} and I{tab-contents} patterns will be loaded.
For either case, the former pattern will have its I{name} slot filled
with the name of the navigation element and its I{tab-contents} slot
filled with the latter pattern. The latter pattern will have its
I{href} slot filled with a link to the corresponding navigation
element.
The I{tabs} slot on the tag will be filled with all the
I{selected-app-tab} or I{app-tab} pattern copies.
@type ctx: L{nevow.context.WebContext}
@type translator: L{IWebTranslator} provider
@type navigation: L{list} of L{Tab}
@rtype: {nevow.stan.Tag}
"""
setTabURLs(navigation, translator)
selectedTab = getSelectedTab(navigation,
url.URL.fromContext(ctx))
getp = IQ(ctx.tag).onePattern
tabs = []
for tab in navigation:
if tab == selectedTab or selectedTab in tab.children:
p = 'selected-app-tab'
contentp = 'selected-tab-contents'
else:
p = 'app-tab'
contentp = 'tab-contents'
childTabs = []
for subtab in tab.children:
try:
subtabp = getp("subtab")
except NodeNotFound:
continue
childTabs.append(
dictFillSlots(subtabp, {
'name': subtab.name,
'href': subtab.linkURL,
'tab-contents': getp("subtab-contents")
}))
tabs.append(dictFillSlots(
getp(p),
{'name': tab.name,
'tab-contents': getp(contentp).fillSlots(
'href', tab.linkURL),
'subtabs': childTabs}))
ctx.tag.fillSlots('tabs', tabs)
return ctx.tag | PypiClean |
/HBT_IP_Test-1.0.1-py3-none-any.whl/HBT_IP_Test/libs/isom/python/IsomObject_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import IsomCommonHeaders_pb2 as IsomCommonHeaders__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='IsomObject.proto',
package='Honeywell.Security.ISOM',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x10IsomObject.proto\x12\x17Honeywell.Security.ISOM\x1a\x17IsomCommonHeaders.proto\"C\n\x13\x41uthorizationHeader\x12\x10\n\x08userName\x18\x0b \x01(\t\x12\x10\n\x08password\x18\x0c \x01(\t*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xac\x01\n\x0c\x44omainObject\x12\x12\n\nobjectType\x18\x0b \x01(\x04\x12)\n\x03uri\x18\x0c \x01(\x0b\x32\x1c.Honeywell.Security.ISOM.URI\x12@\n\nauthHeader\x18\r \x01(\x0b\x32,.Honeywell.Security.ISOM.AuthorizationHeader\x12\x11\n\tsessionId\x18\x0e \x01(\t*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xce\x01\n\x11IsomRequestObject\x12\x15\n\rtransactionId\x18\x0b \x01(\x04\x12\x38\n\tdomainObj\x18\x0c \x01(\x0b\x32%.Honeywell.Security.ISOM.DomainObject\x12I\n\x13payloadTransferMode\x18\r \x01(\x0e\x32,.Honeywell.Security.ISOM.PayloadTransferMode\x12\x13\n\x0bpayloadSize\x18\x0e \x01(\x04*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xab\x01\n\x12IsomResponseObject\x12\x15\n\rtransactionId\x18\x0b \x01(\x04\x12\x14\n\x0cresponseCode\x18\x0c \x01(\x04\x12I\n\x13payloadTransferMode\x18\r \x01(\x0e\x32,.Honeywell.Security.ISOM.PayloadTransferMode\x12\x13\n\x0bpayloadSize\x18\x0e \x01(\x04*\x08\x08\xc0\x84=\x10\xe0\x91\x43*B\n\x13PayloadTransferMode\x12\x0e\n\nBy_PayLoad\x10\x0b\x12\x0b\n\x07\x42y_File\x10\x0c\x12\x0e\n\nBy_Chunked\x10\r')
,
dependencies=[IsomCommonHeaders__pb2.DESCRIPTOR,])
_PAYLOADTRANSFERMODE = _descriptor.EnumDescriptor(
name='PayloadTransferMode',
full_name='Honeywell.Security.ISOM.PayloadTransferMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='By_PayLoad', index=0, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='By_File', index=1, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='By_Chunked', index=2, number=13,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=697,
serialized_end=763,
)
_sym_db.RegisterEnumDescriptor(_PAYLOADTRANSFERMODE)
PayloadTransferMode = enum_type_wrapper.EnumTypeWrapper(_PAYLOADTRANSFERMODE)
By_PayLoad = 11
By_File = 12
By_Chunked = 13
_AUTHORIZATIONHEADER = _descriptor.Descriptor(
name='AuthorizationHeader',
full_name='Honeywell.Security.ISOM.AuthorizationHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='userName', full_name='Honeywell.Security.ISOM.AuthorizationHeader.userName', index=0,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='password', full_name='Honeywell.Security.ISOM.AuthorizationHeader.password', index=1,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=70,
serialized_end=137,
)
_DOMAINOBJECT = _descriptor.Descriptor(
name='DomainObject',
full_name='Honeywell.Security.ISOM.DomainObject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objectType', full_name='Honeywell.Security.ISOM.DomainObject.objectType', index=0,
number=11, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uri', full_name='Honeywell.Security.ISOM.DomainObject.uri', index=1,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='authHeader', full_name='Honeywell.Security.ISOM.DomainObject.authHeader', index=2,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sessionId', full_name='Honeywell.Security.ISOM.DomainObject.sessionId', index=3,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=140,
serialized_end=312,
)
_ISOMREQUESTOBJECT = _descriptor.Descriptor(
name='IsomRequestObject',
full_name='Honeywell.Security.ISOM.IsomRequestObject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transactionId', full_name='Honeywell.Security.ISOM.IsomRequestObject.transactionId', index=0,
number=11, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='domainObj', full_name='Honeywell.Security.ISOM.IsomRequestObject.domainObj', index=1,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payloadTransferMode', full_name='Honeywell.Security.ISOM.IsomRequestObject.payloadTransferMode', index=2,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=11,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payloadSize', full_name='Honeywell.Security.ISOM.IsomRequestObject.payloadSize', index=3,
number=14, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=315,
serialized_end=521,
)
_ISOMRESPONSEOBJECT = _descriptor.Descriptor(
name='IsomResponseObject',
full_name='Honeywell.Security.ISOM.IsomResponseObject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transactionId', full_name='Honeywell.Security.ISOM.IsomResponseObject.transactionId', index=0,
number=11, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='responseCode', full_name='Honeywell.Security.ISOM.IsomResponseObject.responseCode', index=1,
number=12, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payloadTransferMode', full_name='Honeywell.Security.ISOM.IsomResponseObject.payloadTransferMode', index=2,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=11,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payloadSize', full_name='Honeywell.Security.ISOM.IsomResponseObject.payloadSize', index=3,
number=14, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=524,
serialized_end=695,
)
_DOMAINOBJECT.fields_by_name['uri'].message_type = IsomCommonHeaders__pb2._URI
_DOMAINOBJECT.fields_by_name['authHeader'].message_type = _AUTHORIZATIONHEADER
_ISOMREQUESTOBJECT.fields_by_name['domainObj'].message_type = _DOMAINOBJECT
_ISOMREQUESTOBJECT.fields_by_name['payloadTransferMode'].enum_type = _PAYLOADTRANSFERMODE
_ISOMRESPONSEOBJECT.fields_by_name['payloadTransferMode'].enum_type = _PAYLOADTRANSFERMODE
DESCRIPTOR.message_types_by_name['AuthorizationHeader'] = _AUTHORIZATIONHEADER
DESCRIPTOR.message_types_by_name['DomainObject'] = _DOMAINOBJECT
DESCRIPTOR.message_types_by_name['IsomRequestObject'] = _ISOMREQUESTOBJECT
DESCRIPTOR.message_types_by_name['IsomResponseObject'] = _ISOMRESPONSEOBJECT
DESCRIPTOR.enum_types_by_name['PayloadTransferMode'] = _PAYLOADTRANSFERMODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AuthorizationHeader = _reflection.GeneratedProtocolMessageType('AuthorizationHeader', (_message.Message,), {
'DESCRIPTOR' : _AUTHORIZATIONHEADER,
'__module__' : 'IsomObject_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AuthorizationHeader)
})
_sym_db.RegisterMessage(AuthorizationHeader)
DomainObject = _reflection.GeneratedProtocolMessageType('DomainObject', (_message.Message,), {
'DESCRIPTOR' : _DOMAINOBJECT,
'__module__' : 'IsomObject_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.DomainObject)
})
_sym_db.RegisterMessage(DomainObject)
IsomRequestObject = _reflection.GeneratedProtocolMessageType('IsomRequestObject', (_message.Message,), {
'DESCRIPTOR' : _ISOMREQUESTOBJECT,
'__module__' : 'IsomObject_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.IsomRequestObject)
})
_sym_db.RegisterMessage(IsomRequestObject)
IsomResponseObject = _reflection.GeneratedProtocolMessageType('IsomResponseObject', (_message.Message,), {
'DESCRIPTOR' : _ISOMRESPONSEOBJECT,
'__module__' : 'IsomObject_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.IsomResponseObject)
})
_sym_db.RegisterMessage(IsomResponseObject)
# @@protoc_insertion_point(module_scope) | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/databrowse/datastructures.py | from __future__ import unicode_literals
from django.db import models
from django.utils import formats
from django.utils.text import capfirst
from django.utils.encoding import smart_text, force_str, iri_to_uri
from django.db.models.query import QuerySet
from django.utils.encoding import python_2_unicode_compatible
EMPTY_VALUE = '(None)'
DISPLAY_SIZE = 100
class EasyModel(object):
def __init__(self, site, model):
self.site = site
self.model = model
self.model_list = list(site.registry.keys())
self.verbose_name = model._meta.verbose_name
self.verbose_name_plural = model._meta.verbose_name_plural
def __repr__(self):
return force_str('<EasyModel for %s>' % self.model._meta.object_name)
def model_databrowse(self):
"Returns the ModelDatabrowse class for this model."
return self.site.registry[self.model]
def url(self):
return '%s%s/%s/' % (self.site.root_url, self.model._meta.app_label, self.model._meta.module_name)
def objects(self, **kwargs):
return self.get_query_set().filter(**kwargs)
def get_query_set(self):
easy_qs = self.model._default_manager.get_query_set()._clone(klass=EasyQuerySet)
easy_qs._easymodel = self
return easy_qs
def object_by_pk(self, pk):
return EasyInstance(self, self.model._default_manager.get(pk=pk))
def sample_objects(self):
for obj in self.model._default_manager.all()[:3]:
yield EasyInstance(self, obj)
def field(self, name):
try:
f = self.model._meta.get_field(name)
except models.FieldDoesNotExist:
return None
return EasyField(self, f)
def fields(self):
return [EasyField(self, f) for f in (self.model._meta.fields + self.model._meta.many_to_many)]
class EasyField(object):
def __init__(self, easy_model, field):
self.model, self.field = easy_model, field
def __repr__(self):
return force_str('<EasyField for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def choices(self):
for value, label in self.field.choices:
yield EasyChoice(self.model, self, value, label)
def url(self):
if self.field.choices:
return '%s%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name)
elif self.field.rel:
return '%s%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name)
class EasyChoice(object):
def __init__(self, easy_model, field, value, label):
self.model, self.field = easy_model, field
self.value, self.label = value, label
def __repr__(self):
return force_str('<EasyChoice for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def url(self):
return '%s%s/%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.field.name, iri_to_uri(self.value))
@python_2_unicode_compatible
class EasyInstance(object):
def __init__(self, easy_model, instance):
self.model, self.instance = easy_model, instance
def __repr__(self):
return force_str('<EasyInstance for %s (%s)>' % (self.model.model._meta.object_name, self.instance._get_pk_val()))
def __str__(self):
val = smart_text(self.instance)
if len(val) > DISPLAY_SIZE:
return val[:DISPLAY_SIZE] + '...'
return val
def pk(self):
return self.instance._get_pk_val()
def url(self):
return '%s%s/%s/objects/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, iri_to_uri(self.pk()))
def fields(self):
"""
Generator that yields EasyInstanceFields for each field in this
EasyInstance's model.
"""
for f in self.model.model._meta.fields + self.model.model._meta.many_to_many:
yield EasyInstanceField(self.model, self, f)
def related_objects(self):
"""
Generator that yields dictionaries of all models that have this
EasyInstance's model as a ForeignKey or ManyToManyField, along with
lists of related objects.
"""
for rel_object in self.model.model._meta.get_all_related_objects() + self.model.model._meta.get_all_related_many_to_many_objects():
if rel_object.model not in self.model.model_list:
continue # Skip models that aren't in the model_list
em = EasyModel(self.model.site, rel_object.model)
yield {
'model': em,
'related_field': rel_object.field.verbose_name,
'object_list': [EasyInstance(em, i) for i in getattr(self.instance, rel_object.get_accessor_name()).all()],
}
class EasyInstanceField(object):
def __init__(self, easy_model, instance, field):
self.model, self.field, self.instance = easy_model, field, instance
self.raw_value = getattr(instance.instance, field.name)
def __repr__(self):
return force_str('<EasyInstanceField for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def values(self):
"""
Returns a list of values for this field for this instance. It's a list
so we can accomodate many-to-many fields.
"""
# This import is deliberately inside the function because it causes
# some settings to be imported, and we don't want to do that at the
# module level.
if self.field.rel:
if isinstance(self.field.rel, models.ManyToOneRel):
objs = getattr(self.instance.instance, self.field.name)
elif isinstance(self.field.rel, models.ManyToManyRel): # ManyToManyRel
return list(getattr(self.instance.instance, self.field.name).all())
elif self.field.choices:
objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)
elif isinstance(self.field, models.DateField) or isinstance(self.field, models.TimeField):
if self.raw_value:
if isinstance(self.field, models.DateTimeField):
objs = capfirst(formats.date_format(self.raw_value, 'DATETIME_FORMAT'))
elif isinstance(self.field, models.TimeField):
objs = capfirst(formats.time_format(self.raw_value, 'TIME_FORMAT'))
else:
objs = capfirst(formats.date_format(self.raw_value, 'DATE_FORMAT'))
else:
objs = EMPTY_VALUE
elif isinstance(self.field, models.BooleanField) or isinstance(self.field, models.NullBooleanField):
objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value]
else:
objs = self.raw_value
return [objs]
def urls(self):
"Returns a list of (value, URL) tuples."
# First, check the urls() method for each plugin.
plugin_urls = []
for plugin_name, plugin in self.model.model_databrowse().plugins.items():
urls = plugin.urls(plugin_name, self)
if urls is not None:
return zip(self.values(), urls)
if self.field.rel:
m = EasyModel(self.model.site, self.field.rel.to)
if self.field.rel.to in self.model.model_list:
lst = []
for value in self.values():
if value is None:
continue
url = '%s%s/%s/objects/%s/' % (self.model.site.root_url, m.model._meta.app_label, m.model._meta.module_name, iri_to_uri(value._get_pk_val()))
lst.append((smart_text(value), url))
else:
lst = [(value, None) for value in self.values()]
elif self.field.choices:
lst = []
for value in self.values():
url = '%s%s/%s/fields/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name, iri_to_uri(self.raw_value))
lst.append((value, url))
elif isinstance(self.field, models.URLField):
val = list(self.values())[0]
lst = [(val, iri_to_uri(val))]
else:
lst = [(list(self.values())[0], None)]
return lst
class EasyQuerySet(QuerySet):
"""
When creating (or cloning to) an `EasyQuerySet`, make sure to set the
`_easymodel` variable to the related `EasyModel`.
"""
def iterator(self, *args, **kwargs):
for obj in super(EasyQuerySet, self).iterator(*args, **kwargs):
yield EasyInstance(self._easymodel, obj)
def _clone(self, *args, **kwargs):
c = super(EasyQuerySet, self)._clone(*args, **kwargs)
c._easymodel = self._easymodel
return c | PypiClean |
/dragonflow-4.0.0.tar.gz/dragonflow-4.0.0/rally-jobs/README.rst | Rally job related files
=======================
This directory contains rally tasks and plugins that are run by OpenStack CI.
Structure
---------
* plugins - directory where you can add rally plugins. Almost everything in
Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic
cleanup resources, ....
* extra - all files from this directory will be copy pasted to gates, so you
are able to use absolute paths in rally tasks.
Files will be located in ~/.rally/extra/*
* dragonflow.yaml is a task that is run in gates against OpenStack with
Neutron service configured with Dragonflow plugin
Useful links
------------
* More about Rally: https://rally.readthedocs.org/en/latest/
* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html
* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html
* About plugins: https://rally.readthedocs.org/en/latest/plugins.html
* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins
| PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/static/js/codemirror/mode/nginx/nginx.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("nginx", function(config) {
function words(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var keywords = words(
/* ngxDirectiveControl */ "break return rewrite set" +
/* ngxDirective */ " accept_mutex accept_mutex_delay access_log add_after_body add_before_body add_header addition_types aio alias allow ancient_browser ancient_browser_value auth_basic auth_basic_user_file auth_http auth_http_header auth_http_timeout autoindex autoindex_exact_size autoindex_localtime charset charset_types client_body_buffer_size client_body_in_file_only client_body_in_single_buffer client_body_temp_path client_body_timeout client_header_buffer_size client_header_timeout client_max_body_size connection_pool_size create_full_put_path daemon dav_access dav_methods debug_connection debug_points default_type degradation degrade deny devpoll_changes devpoll_events directio directio_alignment empty_gif env epoll_events error_log eventport_events expires fastcgi_bind fastcgi_buffer_size fastcgi_buffers fastcgi_busy_buffers_size fastcgi_cache fastcgi_cache_key fastcgi_cache_methods fastcgi_cache_min_uses fastcgi_cache_path fastcgi_cache_use_stale fastcgi_cache_valid fastcgi_catch_stderr fastcgi_connect_timeout fastcgi_hide_header fastcgi_ignore_client_abort fastcgi_ignore_headers fastcgi_index fastcgi_intercept_errors fastcgi_max_temp_file_size fastcgi_next_upstream fastcgi_param fastcgi_pass_header fastcgi_pass_request_body fastcgi_pass_request_headers fastcgi_read_timeout fastcgi_send_lowat fastcgi_send_timeout fastcgi_split_path_info fastcgi_store fastcgi_store_access fastcgi_temp_file_write_size fastcgi_temp_path fastcgi_upstream_fail_timeout fastcgi_upstream_max_fails flv geoip_city geoip_country google_perftools_profiles gzip gzip_buffers gzip_comp_level gzip_disable gzip_hash gzip_http_version gzip_min_length gzip_no_buffer gzip_proxied gzip_static gzip_types gzip_vary gzip_window if_modified_since ignore_invalid_headers image_filter image_filter_buffer image_filter_jpeg_quality image_filter_transparency imap_auth imap_capabilities imap_client_buffer index ip_hash keepalive_requests keepalive_timeout kqueue_changes kqueue_events large_client_header_buffers limit_conn limit_conn_log_level limit_rate limit_rate_after limit_req limit_req_log_level limit_req_zone limit_zone lingering_time lingering_timeout lock_file log_format log_not_found log_subrequest map_hash_bucket_size map_hash_max_size master_process memcached_bind memcached_buffer_size memcached_connect_timeout memcached_next_upstream memcached_read_timeout memcached_send_timeout memcached_upstream_fail_timeout memcached_upstream_max_fails merge_slashes min_delete_depth modern_browser modern_browser_value msie_padding msie_refresh multi_accept open_file_cache open_file_cache_errors open_file_cache_events open_file_cache_min_uses open_file_cache_valid open_log_file_cache output_buffers override_charset perl perl_modules perl_require perl_set pid pop3_auth pop3_capabilities port_in_redirect postpone_gzipping postpone_output protocol proxy proxy_bind proxy_buffer proxy_buffer_size proxy_buffering proxy_buffers proxy_busy_buffers_size proxy_cache proxy_cache_key proxy_cache_methods proxy_cache_min_uses proxy_cache_path proxy_cache_use_stale proxy_cache_valid proxy_connect_timeout proxy_headers_hash_bucket_size proxy_headers_hash_max_size proxy_hide_header proxy_ignore_client_abort proxy_ignore_headers proxy_intercept_errors proxy_max_temp_file_size proxy_method proxy_next_upstream proxy_pass_error_message proxy_pass_header proxy_pass_request_body proxy_pass_request_headers proxy_read_timeout proxy_redirect proxy_send_lowat proxy_send_timeout proxy_set_body proxy_set_header proxy_ssl_session_reuse proxy_store proxy_store_access proxy_temp_file_write_size proxy_temp_path proxy_timeout proxy_upstream_fail_timeout proxy_upstream_max_fails random_index read_ahead real_ip_header recursive_error_pages request_pool_size reset_timedout_connection resolver resolver_timeout rewrite_log rtsig_overflow_events rtsig_overflow_test rtsig_overflow_threshold rtsig_signo satisfy secure_link_secret send_lowat send_timeout sendfile sendfile_max_chunk server_name_in_redirect server_names_hash_bucket_size server_names_hash_max_size server_tokens set_real_ip_from smtp_auth smtp_capabilities smtp_client_buffer smtp_greeting_delay so_keepalive source_charset ssi ssi_ignore_recycled_buffers ssi_min_file_chunk ssi_silent_errors ssi_types ssi_value_length ssl ssl_certificate ssl_certificate_key ssl_ciphers ssl_client_certificate ssl_crl ssl_dhparam ssl_engine ssl_prefer_server_ciphers ssl_protocols ssl_session_cache ssl_session_timeout ssl_verify_client ssl_verify_depth starttls stub_status sub_filter sub_filter_once sub_filter_types tcp_nodelay tcp_nopush thread_stack_size timeout timer_resolution types_hash_bucket_size types_hash_max_size underscores_in_headers uninitialized_variable_warn use user userid userid_domain userid_expires userid_mark userid_name userid_p3p userid_path userid_service valid_referers variables_hash_bucket_size variables_hash_max_size worker_connections worker_cpu_affinity worker_priority worker_processes worker_rlimit_core worker_rlimit_nofile worker_rlimit_sigpending worker_threads working_directory xclient xml_entities xslt_stylesheet xslt_typesdrew@li229-23"
);
var keywords_block = words(
/* ngxDirectiveBlock */ "http mail events server types location upstream charset_map limit_except if geo map"
);
var keywords_important = words(
/* ngxDirectiveImportant */ "include root server server_name listen internal proxy_pass memcached_pass fastcgi_pass try_files"
);
var indentUnit = config.indentUnit, type;
function ret(style, tp) {type = tp; return style;}
function tokenBase(stream, state) {
stream.eatWhile(/[\w\$_]/);
var cur = stream.current();
if (keywords.propertyIsEnumerable(cur)) {
return "keyword";
}
else if (keywords_block.propertyIsEnumerable(cur)) {
return "variable-2";
}
else if (keywords_important.propertyIsEnumerable(cur)) {
return "string-2";
}
/**/
var ch = stream.next();
if (ch == "@") {stream.eatWhile(/[\w\\\-]/); return ret("meta", stream.current());}
else if (ch == "/" && stream.eat("*")) {
state.tokenize = tokenCComment;
return tokenCComment(stream, state);
}
else if (ch == "<" && stream.eat("!")) {
state.tokenize = tokenSGMLComment;
return tokenSGMLComment(stream, state);
}
else if (ch == "=") ret(null, "compare");
else if ((ch == "~" || ch == "|") && stream.eat("=")) return ret(null, "compare");
else if (ch == "\"" || ch == "'") {
state.tokenize = tokenString(ch);
return state.tokenize(stream, state);
}
else if (ch == "#") {
stream.skipToEnd();
return ret("comment", "comment");
}
else if (ch == "!") {
stream.match(/^\s*\w*/);
return ret("keyword", "important");
}
else if (/\d/.test(ch)) {
stream.eatWhile(/[\w.%]/);
return ret("number", "unit");
}
else if (/[,.+>*\/]/.test(ch)) {
return ret(null, "select-op");
}
else if (/[;{}:\[\]]/.test(ch)) {
return ret(null, ch);
}
else {
stream.eatWhile(/[\w\\\-]/);
return ret("variable", "variable");
}
}
function tokenCComment(stream, state) {
var maybeEnd = false, ch;
while ((ch = stream.next()) != null) {
if (maybeEnd && ch == "/") {
state.tokenize = tokenBase;
break;
}
maybeEnd = (ch == "*");
}
return ret("comment", "comment");
}
function tokenSGMLComment(stream, state) {
var dashes = 0, ch;
while ((ch = stream.next()) != null) {
if (dashes >= 2 && ch == ">") {
state.tokenize = tokenBase;
break;
}
dashes = (ch == "-") ? dashes + 1 : 0;
}
return ret("comment", "comment");
}
function tokenString(quote) {
return function(stream, state) {
var escaped = false, ch;
while ((ch = stream.next()) != null) {
if (ch == quote && !escaped)
break;
escaped = !escaped && ch == "\\";
}
if (!escaped) state.tokenize = tokenBase;
return ret("string", "string");
};
}
return {
startState: function(base) {
return {tokenize: tokenBase,
baseIndent: base || 0,
stack: []};
},
token: function(stream, state) {
if (stream.eatSpace()) return null;
type = null;
var style = state.tokenize(stream, state);
var context = state.stack[state.stack.length-1];
if (type == "hash" && context == "rule") style = "atom";
else if (style == "variable") {
if (context == "rule") style = "number";
else if (!context || context == "@media{") style = "tag";
}
if (context == "rule" && /^[\{\};]$/.test(type))
state.stack.pop();
if (type == "{") {
if (context == "@media") state.stack[state.stack.length-1] = "@media{";
else state.stack.push("{");
}
else if (type == "}") state.stack.pop();
else if (type == "@media") state.stack.push("@media");
else if (context == "{" && type != "comment") state.stack.push("rule");
return style;
},
indent: function(state, textAfter) {
var n = state.stack.length;
if (/^\}/.test(textAfter))
n -= state.stack[state.stack.length-1] == "rule" ? 2 : 1;
return state.baseIndent + n * indentUnit;
},
electricChars: "}"
};
});
CodeMirror.defineMIME("text/x-nginx-conf", "nginx");
}); | PypiClean |
/LinSATNet-0.0.8.tar.gz/LinSATNet-0.0.8/README.md | # LinSATNet
This is the official implementation of our ICML 2023 paper "LinSATNet: The Positive Linear Satisfiability Neural Networks".
* [[paper]](https://runzhong.wang/files/icml2023_LinSATNet.pdf)
With LinSATNet, you can enforce the satisfiability of general **positive linear constraints** to the output of neural networks.

The LinSAT layer is fully differentiable, and the gradients are exactly computed. Our implementation now supports PyTorch.
You can install it by
```shell
pip install linsatnet
```
And get started by
```python
from LinSATNet import linsat_layer
```
### Table of contents
- [LinSATNet](#linsatnet)
* [A Quick Example](#a-quick-example)
* [API Reference](#api-reference)
+ [The ``linsat_layer`` function](#the-linsat_layer-function)
+ [Some practical notes](#some-practical-notes)
* [How it works?](#how-it-works-)
+ [Classic Sinkhorn with single-set marginals](#classic-sinkhorn-with-single-set-marginals)
+ [Extended Sinkhorn with multi-set marginals](#extended-sinkhorn-with-multi-set-marginals)
+ [Transforming positive linear constraints into marginals](#transforming-positive-linear-constraints-into-marginals)
- [Encoding neural network's output](#encoding-neural-networks-output)
- [From linear constraints to marginals](#from-linear-constraints-to-marginals)
* [More Complicated Use Cases (appeared in our paper)](#more-complicated-use-cases-appeared-in-our-paper)
+ [I. Neural Solver for Traveling Salesman Problem with Extra Constraints](#i-neural-solver-for-traveling-salesman-problem-with-extra-constraints)
+ [II. Partial Graph Matching with Outliers on Both Sides](#ii-partial-graph-matching-with-outliers-on-both-sides)
+ [III. Portfolio Allocation](#iii-portfolio-allocation)
* [Citation](#citation)
## A Quick Example
There is a quick example if you run ``LinSATNet/linsat.py`` directly. In this
example, the doubly-stochastic constraint is enforced for 3x3 variables.
To run the example, first clone the repo:
```shell
git clone https://github.com/Thinklab-SJTU/LinSATNet.git
```
Go into the repo, and run the example code:
```shell
cd LinSATNet
python LinSATNet/linsat.py
```
In this example, we try to enforce doubly-stochastic constraint to a 3x3 matrix.
The doubly-stochastic constraint means that all rows and columns of the matrix
should sum to 1.
The 3x3 matrix is flattened into a vector, and the following positive
linear constraints are considered (for $\mathbf{E}\mathbf{x}=\mathbf{f}$):
```python
E = torch.tensor(
[[1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 1]], dtype=torch.float32
)
f = torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.float32)
```
We randomly init ``w`` and regard it as the output of some neural networks:
```python
w = torch.rand(9) # w could be the output of neural network
w = w.requires_grad_(True)
```
We also have a "ground-truth target" for the output of ``linsat_layer``, which
is an orthogonal matrix in this example:
```python
x_gt = torch.tensor(
[1, 0, 0,
0, 1, 0,
0, 0, 1], dtype=torch.float32
)
```
The forward/backward passes of LinSAT follow the standard PyTorch style and are
readily integrated into existing deep learning pipelines.
The forward pass:
```python
linsat_outp = linsat_layer(w, E=E, f=f, tau=0.1, max_iter=10, dummy_val=0)
```
The backward pass:
```python
loss = ((linsat_outp - x_gt) ** 2).sum()
loss.backward()
```
We can also do gradient-based optimization over ``w`` to make the output of
``linsat_layer`` closer to ``x_gt``. This is what's happening when you train a
neural network.
```python
niters = 10
opt = torch.optim.SGD([w], lr=0.1, momentum=0.9)
for i in range(niters):
x = linsat_layer(w, E=E, f=f, tau=0.1, max_iter=10, dummy_val=0)
cv = torch.matmul(E, x.t()).t() - f.unsqueeze(0)
loss = ((x - x_gt) ** 2).sum()
loss.backward()
opt.step()
opt.zero_grad()
print(f'{i}/{niters}\n'
f' underlying obj={torch.sum(w * x)},\n'
f' loss={loss},\n'
f' sum(constraint violation)={torch.sum(cv[cv > 0])},\n'
f' x={x},\n'
f' constraint violation={cv}')
```
And you are likely to see the loss decreasing during the gradient steps.
## API Reference
To use LinSATNet in your own project, make sure you have the package installed:
```shell
pip install linsatnet
```
and import the pacakge at the beginning of your code:
```python
from LinSATNet import linsat_layer
```
### The ``linsat_layer`` function
> **LinSATNet.linsat_layer**(x, A=None, b=None, C=None, d=None, E=None, f=None, tau=0.05, max_iter=100, dummy_val=0, mode='v1', no_warning=False) [[source]](https://github.com/Thinklab-SJTU/LinSATNet/blob/main/LinSATNet/linsat.py#L11)
LinSAT layer enforces positive linear constraints to the input ``x`` and
projects it with the constraints
$$\mathbf{A} \mathbf{x} <= \mathbf{b}, \mathbf{C} \mathbf{x} >= \mathbf{d}, \mathbf{E} \mathbf{x} = \mathbf{f}$$
and all elements in $\mathbf{A}, \mathbf{b}, \mathbf{C}, \mathbf{d}, \mathbf{E}, \mathbf{f}$ must be non-negative.
**Parameters:**
* ``x``: PyTorch tensor of size ($n_v$), it can optionally have a batch size ($b \times n_v$)
* ``A``, ``C``, ``E``: PyTorch tensor of size ($n_c \times n_v$), constraint matrix on the left hand side
* ``b``, ``d``, ``f``: PyTorch tensor of size ($n_c$), constraint vector on the right hand side
* ``tau``: (``default=0.05``) parameter to control the discreteness of the projection. Smaller value leads to more discrete (harder) results, larger value leads to more continuous (softer) results.
* ``max_iter``: (``default=100``) max number of iterations
* ``dummy_val``: (``default=0``) the value of dummy variables appended to the input vector
* ``mode``: (``default='v1'``) EXPERIMENTAL the mode of LinSAT kernel. ``v2`` is sometimes faster than ``v1``.
* ``no_warning``: (``default=False``) turn off warning message
**return:** PyTorch tensor of size ($n_v$) or ($b \times n_v$), the projected variables
Notations:
* $b$ means the batch size.
* $n_c$ means the number of constraints ($\mathbf{A}$, $\mathbf{C}$, $\mathbf{E}$ may have different $n_c$)
* $n_v$ means the number of variables
### Some practical notes
1. You must ensure that your input constraints have a non-empty feasible space.
Otherwise, ``linsat_layer`` will not converge.
2. You may tune the value of ``tau`` for your specific tasks. Monitor the output
of LinSAT so that the "smoothness" of the output meets your task. Reasonable
choices of ``tau`` may range from ``1e-4`` to ``100`` in our experience.
3. Be careful of potential numerical issues. Sometimes ``A x <= 1`` does not
work, but ``A x <= 0.999`` works.
4. The input vector ``x`` may have a batch dimension, but the constraints can
not have a batch dimension. The constraints should be consistent for all data in
one batch.
## How it works?
Here we introduce the mechanism inside LinSAT. It works by extending the
Sinkhorn algorithm to multiple sets of marginals (to our best knowledge, we are
the first to study Sinkhorn with multi-sets of marginals). The positive linear
constraints are then enforced by transforming the constraints into marginals.
For more details and formal proofs, please refer to
[our paper](https://runzhong.wang/files/icml2023_LinSATNet.pdf).
### Classic Sinkhorn with single-set marginals
Let's start with the classic Sinkhorn algorithm. Given non-negative score matrix
$`\mathbf{S}\in\mathbb{R}_{\geq 0}^{m\times n}`$ and a set of marginal
distributions on rows $`\mathbf{v}\in \mathbb{R}_{\geq 0}^m`$ and columns
$`\mathbf{u} \in \mathbb{R}_{\geq 0}^n`$, where
$$\sum_{i=1}^m v_i = \sum_{j=1}^n u_j = h,$$
the Sinkhorn algorithm outputs a normalized matrix
$`\mathbf{\Gamma}\in[0,1]^{m\times n}`$ so that
$$\sum_{i=1}^m \Gamma_{i,j}u_{j}=u_j, \sum_{j=1}^n \Gamma_{i,j}u_{j}=v_i.$$
Conceptually, $`\Gamma_{i,j}`$ means the **proportion** of $`u_j`$ moved to $`v_i`$.
> If you are seeing the math formulas not rendered correctly, it is [an issue of github](https://github.com/orgs/community/discussions/17051).
> Please refer to [our main paper](https://runzhong.wang/files/icml2023_LinSATNet.pdf) for better view.
The algorithm steps are:
Initialize $`\Gamma_{i,j}=\frac{s_{i,j}}{\sum_{i=1}^m s_{i,j}}`$
$`\quad`$**repeat**:
$`\qquad{\Gamma}_{i,j}^{\prime} = \frac{{\Gamma}_{i,j}v_{i}}{\sum_{j=1}^n {\Gamma}_{i,j}u_{j}}`$; $`\triangleright`$ normalize w.r.t. $`\mathbf{v}`$
$`\qquad{\Gamma}_{i,j} = \frac{{\Gamma}_{i,j}^{\prime}u_{j}}{\sum_{i=1}^m {\Gamma}_{i,j}^{\prime}u_{j}}`$; $`\triangleright`$ normalize w.r.t. $`\mathbf{u}`$
$`\quad`$**until** convergence.
> Note that the above formulation is modified from the conventional Sinkhorn
formulation. $`\Gamma_{i,j}u_j`$ is equivalent to the elements in the "transport"
matrix in papers such as [(Cuturi 2013)](https://arxiv.org/pdf/1306.0895v1.pdf).
We prefer this new formulation as it generalize smoothly to Sinkhorn with
multi-set marginals in the following.
>
> To make a clearer comparison, the transportation matrix in [(Cuturi 2013)](https://arxiv.org/pdf/1306.0895v1.pdf)
is $`\mathbf{P}\in\mathbb{R}_{\geq 0}^{m\times n}`$, and the constraints are
$$\sum_{i=1}^m P_{i,j}=u_{j},\quad \sum_{j=1}^n P_{i,j}=v_{i}$$
$`P_{i,j}`$ means the _exact mass_ moved from $`u_{j}`$ to $`v_{i}`$.
>
> The algorithm steps are:
>
> Initialize $`\Gamma_{i,j}=\frac{s_{i,j}}{\sum_{i=1}^m s_{i,j}}`$
>
> $`\quad`$**repeat**:
>
> $`\qquad{P}_{i,j}^{\prime} = \frac{P_{i,j}v_{i}}{\sum_{j=1}^n {P}_{i,j}}`$; $`\triangleright`$ normalize w.r.t. $`\mathbf{v}`$
>
> $`\qquad{P}_{i,j} = \frac{{P}_{i,j}^{\prime}u_j}{\sum_{i=1}^m {P}_{i,j}^{\prime}}`$; $`\triangleright`$ normalize w.r.t. $`\mathbf{u}`$
>
> $`\quad`$**until** convergence.
### Extended Sinkhorn with multi-set marginals
We discover that the Sinkhorn algorithm can generalize to multiple sets of
marginals.
Recall that $`\Gamma_{i,j}\in[0,1]`$ means the proportion of $`u_i`$ moved to
$`v_j`$. Interestingly, it yields the same formulation if we simply replace
$`\mathbf{u},\mathbf{v}`$ by another set of marginal distributions, suggesting
the potential of extending the Sinkhorn algorithm to multiple sets of marginal
distributions. Denote that there are $k$ sets of marginal distributions that are
jointly enforced to fit more complicated real-world scenarios. The sets of
marginal distributions are
$`\mathbf{u}_\eta\in \mathbb{R}_{\geq 0}^n, \mathbf{v}_\eta\in \mathbb{R}_{\geq 0}^m`$,
and we have:
$$\forall \eta\in \{1, \cdots,k\}: \sum_{i=1}^m v_{\eta,i}=\sum_{j=1}^n u_{\eta,j}=h_\eta.$$
It assumes the existence of a normalized $`\mathbf{Z} \in [0,1]^{m\times n}`$ s.t.
$$\forall \eta\in \{1,\cdots, k\}: \sum_{i=1}^m z_{i,j} u_{\eta,j}=u_{\eta,j}, \sum_{j=1}^n z_{i,j} u_{\eta,j}=v_{\eta,i}$$
i.e., the multiple sets of marginal distributions have a non-empty feasible
region (you may understand the meaning of "non-empty feasible region" after
reading the next section about how to handle positive linear constraints).
Multiple sets of marginal distributions could be jointly enforced by traversing
the Sinkhorn iterations over $k$ sets of marginal distributions.
The algorithm steps are:
Initialize $`\Gamma_{i,j}=\frac{s_{i,j}}{\sum_{i=1}^m s_{i,j}}`$
$`\quad`$**repeat**:
$`\qquad`$**for** $`\eta=1`$ **to** $k$ **do**
$`\quad\qquad{\Gamma}_{i,j}^{\prime} = \frac{{\Gamma}_{i,j}v_{\eta,i}}{\sum_{j=1}^n {\Gamma}_{i,j}u_{\eta,j}}`$; $`\triangleright`$ normalize w.r.t. $`\mathbf{v}_\eta`$
$`\quad\qquad{\Gamma}_{i,j} = \frac{{\Gamma}_{i,j}^{\prime}u_{\eta,j}}{\sum_{i=1}^m {\Gamma}_{i,j}^{\prime}u_{\eta,j}}`$; $`\triangleright`$ normalize w.r.t. $`\mathbf{u}_\eta`$
$`\qquad`$**end for**
$`\quad`$**until** convergence.
In [our paper](https://runzhong.wang/files/icml2023_LinSATNet.pdf), we prove
that the Sinkhorn algorithm for multi-set marginals shares the same convergence
pattern with the classic Sinkhorn, and its underlying formulation is also
similar to the classic Sinkhorn.
### Transforming positive linear constraints into marginals
Then we show how to transform the positive linear constraints into marginals,
which are handled by our proposed multi-set Sinkhorn.
#### Encoding neural network's output
For an $l$-length vector denoted as $`\mathbf{y}`$ (which can be the output of a
neural network, also it is the input to ``linsat_layer``), the following matrix
is built
$`\mathbf{W} = {y}_1 \quad {y}_2 \quad ... \quad {y}_l \quad \beta`$
$`\qquad \ \ \beta \ \quad \beta \ \quad ... \quad \ \beta \quad \ \beta`$
where $`\mathbf{W}`$ is of size $`2 \times (l+1)`$, and $`\beta`$ is the dummy
variable, the default is $`\beta=0`$. $`\mathbf{y}`$ is put at the upper-left
region of $`\mathbf{W}`$. The entropic regularizer is then enforced to control
discreteness and handle potential negative inputs:
$$\mathbf{S} = \exp \left(\frac{\mathbf{W}}{\tau}\right).$$
The score matrix $`\mathbf{S}`$ is taken as the input of Sinkhorn for multi-set
marginals.
#### From linear constraints to marginals
* **Packing constraint** $`\mathbf{A}\mathbf{x}\leq \mathbf{b}`$. Assuming that
there is only one constraint, we rewrite the constraint as
$$\sum_{i=1}^l a_ix_i \leq b.$$
Following the "transportation" view of Sinkhorn, the output $`\mathbf{x}`$
_moves_ at most $`b`$ unit of mass from $`a_1, a_2, \cdots, a_l`$, and the
dummy dimension allows the inequality by _moving_ mass from the dummy
dimension. It is also ensured that the sum of $`\mathbf{u}_p`$ equals the sum
of $`\mathbf{v}_p`$. The marginal distributions are defined as
```math
\mathbf{u}_p = \underbrace{\left[a_1 \quad a_2 \quad ...\quad a_l \quad b\right]}_{l \text{ dims}+1 \text{ dummy dim}}, \quad \mathbf{v}_p^\top = \left[b \quad \sum_{i=1}^l a_i \right]
```
* **Covering constraint** $`\mathbf{C}\mathbf{x}\geq \mathbf{d}`$. Assuming that
there is only one constraint, we rewrite the constraint as
$$\sum_{i=1}^l c_ix_i\geq d.$$ We introduce the multiplier
$$\gamma=\left\lfloor\sum_{i=1}^lc_i / d \right\rfloor$$
because we always have $$\sum_{i=1}^l c_i \geq d$$ (else the
constraint is infeasible), and we cannot reach the feasible solution where all
elements in $`\mathbf{x}`$ are 1s without this multiplier. Our formulation
ensures that at least $`d`$ unit of mass is _moved_ from $`c_1, c_2, \cdots, c_l`$
by $`\mathbf{x}`$, thus representing the covering constraint of "greater than".
It is also ensured that the sum of $`\mathbf{u}_c`$ equals the sum of
$`\mathbf{v}_c`$. The marginal distributions are defined as
```math
\mathbf{u}_c = \underbrace{\left[c_1 \quad c_2 \quad ...\quad c_l \quad \gamma d\right]}_{l \text{ dims} + 1 \text{ dummy dim}}, \quad
\mathbf{v}_c^\top = \left[ (\gamma+1) d \quad \sum_{i=1}^l c_i - d \right]
```
* **Equality constraint** $`\mathbf{E}\mathbf{x}= \mathbf{f}`$. Representing the
equality constraint is more straightforward. Assuming that there is only one
constraint, we rewrite the constraint as $$\sum_{i=1}^l e_ix_i= f.$$ The
output $`\mathbf{x}`$ _moves_ $`e_1, e_2, \cdots, e_l`$ to $`f`$, and we need
no dummy element in $`\mathbf{u}_e`$ because it is an equality constraint. It
is also ensured that the sum of $`\mathbf{u}_e`$ equals the sum of
$`\mathbf{v}_e`$. The marginal distributions are defined as
```math
\mathbf{u}_e = \underbrace{\left[e_1 \quad e_2 \quad ...\quad e_l \quad 0\right]}_{l \text{ dims} + \text{dummy dim}=0}, \quad
\mathbf{v}_e^\top = \left[f \quad \sum_{i=1}^l e_i - f \right]
```
After encoding all constraints and stack them as multiple sets of marginals,
we can call the Sinkhorn algorithm for multi-set marginals to enforce the
constraints.
## More Complicated Use Cases (appeared in our paper)
### I. Neural Solver for Traveling Salesman Problem with Extra Constraints
The Traveling Salesman Problem (TSP) is a classic NP-hard problem. The standard
TSP aims at finding a cycle visiting all cities with minimal length, and
developing neural solvers for TSP receives increasing interes. Beyond standard
TSP, here we develop a neural solver for TSP with extra constraints using LinSAT
layer.
**Contributing author: Yunhao Zhang**
Details will be updated soon.
### II. Partial Graph Matching with Outliers on Both Sides
Standard graph matching (GM) assumes an outlier-free setting namely bijective
mapping. One-shot GM neural networks
[(Wang et al., 2022)](https://ieeexplore.ieee.org/abstract/document/9426408/)
effectively enforce the satisfiability of one-to-one matching constraint by
single-set Sinkhorn. Partial GM refers to the realistic case with outliers on
both sides so that only a partial set of nodes are matched. There lacks a
principled approach to enforce matching constraints for partial GM. The main
challenge for existing GM networks is that they cannot discard outliers because
the single-set Sinkhorn is outlier-agnostic and tends to match as many nodes as
possible. The only exception is BBGM
[(Rolinek et al., 2020)](https://link.springer.com/chapter/10.1007/978-3-030-58604-1_25)
which incorporates a traditional solver that can reject outliers, yet its
performance still has room for improvement.
**Contributing author: Ziao Guo**
To run the GM experiment, please follow the code and instructions in
[ThinkMatch/LinSAT](https://github.com/Thinklab-SJTU/ThinkMatch/tree/master/models/LinSAT).
### III. Portfolio Allocation
Predictive portfolio allocation is the process of selecting the best asset
allocation based on predictions of future financial markets. The goal is to
design an allocation plan to best trade-off between the return and the potential
risk (i.e. the volatility). In an allocation plan, each asset is assigned a
non-negative weight and all weights should sum to 1. Existing learning-based
methods [(Zhang et al., 2020)](https://arxiv.org/pdf/2005.13665.pdf),
[(Butler et al., 2021)](https://www.tandfonline.com/doi/abs/10.1080/14697688.2022.2162432)
only consider the sum-to-one constraint without introducing personal preference
or expert knowledge. In contrast, we achieve such flexibility for the target
portfolio via positive linear constraints: a mix of covering and equality
constraints, which is widely considered for its real-world demand.
**Contributing author: Tianyi Chen**
To run the portfolio experiment, please follow the code and instructions in
[``portfolio_exp/``](portfolio_exp).
## Citation
If you find our paper/code useful in your research, please cite
```
@inproceedings{WangICML23,
title={LinSATNet: The Positive Linear Satisfiability Neural Networks},
author={Wang, Runzhong and Zhang, Yunhao and Guo, Ziao and Chen, Tianyi and Yang, Xiaokang and Yan, Junchi},
booktitle={International Conference on Machine Learning (ICML)},
year={2023}
}
```
| PypiClean |
/NlpToolkit-Hmm-Cy-1.0.4.tar.gz/NlpToolkit-Hmm-Cy-1.0.4/README.md | Hidden Markov Models
============
Video Lectures
============
[<img src="https://github.com/StarlangSoftware/Hmm/blob/master/video1.jpg" width="50%">](https://youtu.be/zHj5mK3jcyk)[<img src="https://github.com/StarlangSoftware/Hmm/blob/master/video2.jpg" width="50%">](https://youtu.be/LM0ld3UKCEs)
For Developers
============
You can also see [Python](https://github.com/starlangsoftware/Hmm-Py), [Java](https://github.com/starlangsoftware/Hmm), [C++](https://github.com/starlangsoftware/Hmm-CPP), [Swift](https://github.com/starlangsoftware/Hmm-Swift), [Js](https://github.com/starlangsoftware/Hmm-Js), or [C#](https://github.com/starlangsoftware/Hmm-CS) repository.
## Requirements
* [Python 3.7 or higher](#python)
* [Git](#git)
### Python
To check if you have a compatible version of Python installed, use the following command:
python -V
You can find the latest version of Python [here](https://www.python.org/downloads/).
### Git
Install the [latest version of Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
## Pip Install
pip3 install NlpToolkit-Hmm-Cy
## Download Code
In order to work on code, create a fork from GitHub page.
Use Git for cloning the code to your local or below line for Ubuntu:
git clone <your-fork-git-link>
A directory called Hmm will be created. Or you can use below link for exploring the code:
git clone https://github.com/starlangsoftware/Hmm-Py.git
## Open project with Pycharm IDE
Steps for opening the cloned project:
* Start IDE
* Select **File | Open** from main menu
* Choose `Hmm-PY` file
* Select open as project option
* Couple of seconds, dependencies will be downloaded.
Detailed Description
============
+ [Hmm](#hmm)
## Hmm
Hmm modelini üretmek için
Hmm(self, states: set, observations: list, emittedSymbols: list)
Viterbi algoritması ile en olası State listesini elde etmek için
viterbi(self, s: list) -> list
| PypiClean |
/Flask-API.yandex-0.6.2.1.tar.gz/Flask-API.yandex-0.6.2.1/flask_api/mediatypes.py | from __future__ import unicode_literals
class MediaType(object):
def __init__(self, media_type):
self.main_type, self.sub_type, self.params = self._parse(media_type)
@property
def full_type(self):
return self.main_type + '/' + self.sub_type
@property
def precedence(self):
"""
Precedence is determined by how specific a media type is:
3. 'type/subtype; param=val'
2. 'type/subtype'
1. 'type/*'
0. '*/*'
"""
if self.main_type == '*':
return 0
elif self.sub_type == '*':
return 1
elif not self.params or list(self.params.keys()) == ['q']:
return 2
return 3
def satisfies(self, other):
"""
Returns `True` if this media type is a superset of `other`.
Some examples of cases where this holds true:
'application/json; version=1.0' >= 'application/json; version=1.0'
'application/json' >= 'application/json; indent=4'
'text/*' >= 'text/plain'
'*/*' >= 'text/plain'
"""
for key in self.params.keys():
if key != 'q' and other.params.get(key, None) != self.params.get(key, None):
return False
if self.sub_type != '*' and other.sub_type != '*' and other.sub_type != self.sub_type:
return False
if self.main_type != '*' and other.main_type != '*' and other.main_type != self.main_type:
return False
return True
def _parse(self, media_type):
"""
Parse a media type string, like "application/json; indent=4" into a
three-tuple, like: ('application', 'json', {'indent': 4})
"""
full_type, sep, param_string = media_type.partition(';')
params = {}
for token in param_string.strip().split(','):
key, sep, value = [s.strip() for s in token.partition('=')]
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if key:
params[key] = value
main_type, sep, sub_type = [s.strip() for s in full_type.partition('/')]
return (main_type, sub_type, params)
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, str(self))
def __str__(self):
"""
Return a canonical string representing the media type.
Note that this ensures the params are sorted.
"""
if self.params:
params_str = ', '.join([
'%s="%s"' % (key, val)
for key, val in sorted(self.params.items())
])
return self.full_type + '; ' + params_str
return self.full_type
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
# Compare two MediaType instances, ignoring parameter ordering.
return (
self.full_type == other.full_type and
self.params == other.params
)
def parse_accept_header(accept):
"""
Parses the value of a clients accept header, and returns a list of sets
of media types it included, ordered by precedence.
For example, 'application/json, application/xml, */*' would return:
[
set([<MediaType "application/xml">, <MediaType "application/json">]),
set([<MediaType "*/*">])
]
"""
ret = [set(), set(), set(), set()]
for token in accept.split(','):
media_type = MediaType(token.strip())
ret[3 - media_type.precedence].add(media_type)
return [media_types for media_types in ret if media_types] | PypiClean |
/Django-clear-s2s-0.1.5.tar.gz/Django-clear-s2s-0.1.5/README.rst | =============================
Django Clear S2S
=============================
.. image:: https://badge.fury.io/py/Django-clear-s2s.svg
:target: https://badge.fury.io/py/Django-clear-s2s
.. image:: https://travis-ci.org/sal-git/Django-clear-s2s.svg?branch=master
:target: https://travis-ci.org/sal-git/Django-clear-s2s
.. image:: https://codecov.io/gh/sal-git/Django-clear-s2s/branch/master/graph/badge.svg
:target: https://codecov.io/gh/sal-git/Django-clear-s2s
Your project description goes here
Documentation
-------------
The full documentation is at https://Django-clear-s2s.readthedocs.io.
Quickstart
----------
Install Django Clear S2S::
pip install Django-clear-s2s
Add it to your `INSTALLED_APPS`:
.. code-block:: python
INSTALLED_APPS = (
...
'django_clear_s2s.apps.DjangoClearS2sConfig',
...
)
Add Django Clear S2S's URL patterns:
.. code-block:: python
from django_clear_s2s import urls as django_clear_s2s_urls
urlpatterns = [
...
url(r'^', include(django_clear_s2s_urls)),
...
]
Features
--------
* TODO
Running Tests
-------------
Does the code actually work?
::
source <YOURVIRTUALENV>/bin/activate
(myenv) $ pip install tox
(myenv) $ tox
Development commands
---------------------
::
pip install -r requirements_dev.txt
invoke -l
Credits
-------
Tools used in rendering this package:
* Cookiecutter_
* `cookiecutter-djangopackage`_
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`cookiecutter-djangopackage`: https://github.com/pydanny/cookiecutter-djangopackage
| PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/dblite.py | import os
import pickle
import shutil
import time
from SCons.compat import PICKLE_PROTOCOL
KEEP_ALL_FILES = False
IGNORE_CORRUPT_DBFILES = False
def corruption_warning(filename):
"""Local warning for corrupt db.
Used for self-tests. SCons overwrites this with a
different warning function in SConsign.py.
"""
print("Warning: Discarding corrupt database:", filename)
DBLITE_SUFFIX = '.dblite'
TMP_SUFFIX = '.tmp'
class dblite:
"""
Squirrel away references to the functions in various modules
that we'll use when our __del__() method calls our sync() method
during shutdown. We might get destroyed when Python is in the midst
of tearing down the different modules we import in an essentially
arbitrary order, and some of the various modules's global attributes
may already be wiped out from under us.
See the discussion at:
http://mail.python.org/pipermail/python-bugs-list/2003-March/016877.html
"""
_open = open
_pickle_dump = staticmethod(pickle.dump)
_pickle_protocol = PICKLE_PROTOCOL
try:
_os_chown = os.chown
except AttributeError:
_os_chown = None
_os_replace = os.replace
_os_chmod = os.chmod
_shutil_copyfile = shutil.copyfile
_time_time = time.time
def __init__(self, file_base_name, flag, mode):
assert flag in (None, "r", "w", "c", "n")
if flag is None:
flag = "r"
base, ext = os.path.splitext(file_base_name)
if ext == DBLITE_SUFFIX:
# There's already a suffix on the file name, don't add one.
self._file_name = file_base_name
self._tmp_name = base + TMP_SUFFIX
else:
self._file_name = file_base_name + DBLITE_SUFFIX
self._tmp_name = file_base_name + TMP_SUFFIX
self._flag = flag
self._mode = mode
self._dict = {}
self._needs_sync = False
if self._os_chown is not None and (os.geteuid() == 0 or os.getuid() == 0):
# running as root; chown back to current owner/group when done
try:
statinfo = os.stat(self._file_name)
self._chown_to = statinfo.st_uid
self._chgrp_to = statinfo.st_gid
except OSError:
# db file doesn't exist yet.
# Check os.environ for SUDO_UID, use if set
self._chown_to = int(os.environ.get('SUDO_UID', -1))
self._chgrp_to = int(os.environ.get('SUDO_GID', -1))
else:
self._chown_to = -1 # don't chown
self._chgrp_to = -1 # don't chgrp
if self._flag == "n":
with self._open(self._file_name, "wb", self._mode):
pass # just make sure it exists
else:
try:
f = self._open(self._file_name, "rb")
except IOError as e:
if self._flag != "c":
raise e
with self._open(self._file_name, "wb", self._mode):
pass # just make sure it exists
else:
p = f.read()
f.close()
if len(p) > 0:
try:
self._dict = pickle.loads(p, encoding='bytes')
except (pickle.UnpicklingError, EOFError, KeyError):
# Note how we catch KeyErrors too here, which might happen
# when we don't have cPickle available (default pickle
# throws it).
if IGNORE_CORRUPT_DBFILES:
corruption_warning(self._file_name)
else:
raise
def close(self):
if self._needs_sync:
self.sync()
def __del__(self):
self.close()
def sync(self):
self._check_writable()
with self._open(self._tmp_name, "wb", self._mode) as f:
self._pickle_dump(self._dict, f, self._pickle_protocol)
try:
self._os_replace(self._tmp_name, self._file_name)
except PermissionError:
# If we couldn't replace due to perms, try to change and retry.
# This is mainly for Windows - on POSIX the file permissions
# don't matter, the os.replace would have worked anyway.
# We're giving up if the retry fails, just let the Python
# exception abort us.
try:
self._os_chmod(self._file_name, 0o777)
except PermissionError:
pass
self._os_replace(self._tmp_name, self._file_name)
if self._os_chown is not None and self._chown_to > 0: # don't chown to root or -1
try:
self._os_chown(self._file_name, self._chown_to, self._chgrp_to)
except OSError:
pass
self._needs_sync = False
if KEEP_ALL_FILES:
self._shutil_copyfile(
self._file_name,
self._file_name + "_" + str(int(self._time_time()))
)
def _check_writable(self):
if self._flag == "r":
raise IOError("Read-only database: %s" % self._file_name)
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._check_writable()
if not isinstance(key, str):
raise TypeError("key `%s' must be a string but is %s" % (key, type(key)))
if not isinstance(value, bytes):
raise TypeError("value `%s' must be a bytes but is %s" % (value, type(value)))
self._dict[key] = value
self._needs_sync = True
def keys(self):
return list(self._dict.keys())
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def open(file, flag=None, mode=0o666):
return dblite(file, flag, mode)
def _exercise():
db = open("tmp", "n")
assert len(db) == 0
db["foo"] = b"bar"
assert db["foo"] == b"bar"
db.sync()
db = open("tmp", "c")
assert len(db) == 1, len(db)
assert db["foo"] == b"bar"
db["bar"] = b"foo"
assert db["bar"] == b"foo"
db.sync()
db = open("tmp", "r")
assert len(db) == 2, len(db)
assert db["foo"] == b"bar"
assert db["bar"] == b"foo"
try:
db.sync()
except IOError as e:
assert str(e) == "Read-only database: tmp.dblite"
else:
raise RuntimeError("IOError expected.")
db = open("tmp", "w")
assert len(db) == 2, len(db)
db["ping"] = b"pong"
db.sync()
try:
db[(1, 2)] = "tuple"
except TypeError as e:
assert str(e) == "key `(1, 2)' must be a string but is <class 'tuple'>", str(e)
else:
raise RuntimeError("TypeError exception expected")
try:
db["list"] = [1, 2]
except TypeError as e:
assert str(e) == "value `[1, 2]' must be a bytes but is <class 'list'>", str(e)
else:
raise RuntimeError("TypeError exception expected")
db = open("tmp", "r")
assert len(db) == 3, len(db)
db = open("tmp", "n")
assert len(db) == 0, len(db)
dblite._open("tmp.dblite", "w")
db = open("tmp", "r")
dblite._open("tmp.dblite", "w").write("x")
try:
db = open("tmp", "r")
except pickle.UnpicklingError:
pass
else:
raise RuntimeError("pickle exception expected.")
global IGNORE_CORRUPT_DBFILES
IGNORE_CORRUPT_DBFILES = True
db = open("tmp", "r")
assert len(db) == 0, len(db)
os.unlink("tmp.dblite")
try:
db = open("tmp", "w")
except IOError as e:
assert str(e) == "[Errno 2] No such file or directory: 'tmp.dblite'", str(e)
else:
raise RuntimeError("IOError expected.")
print("Completed _exercise()")
if __name__ == "__main__":
_exercise()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/Gyre-Pagella/Normal/Regular/Main.js | MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS.GyrePagellaMathJax_Normal={directory:"Normal/Regular",family:"GyrePagellaMathJax_Normal",testString:"\u00A0\u210E\uD835\uDC00\uD835\uDC01\uD835\uDC02\uD835\uDC03\uD835\uDC04\uD835\uDC05\uD835\uDC06\uD835\uDC07\uD835\uDC08\uD835\uDC09\uD835\uDC0A\uD835\uDC0B\uD835\uDC0C",32:[0,0,250,0,0],160:[0,0,250,0,0],8462:[733,9,500,10,471],119808:[686,3,778,24,757],119809:[681,3,667,39,611],119810:[695,17,722,44,695],119811:[681,3,833,35,786],119812:[681,3,611,39,577],119813:[681,3,556,28,539],119814:[695,17,833,47,776],119815:[681,3,833,36,796],119816:[681,3,389,39,350],119817:[681,213,389,-11,350],119818:[681,3,778,39,763],119819:[681,3,611,39,577],119820:[681,10,1000,32,968],119821:[681,16,833,35,798],119822:[695,17,833,47,787],119823:[681,3,611,39,594],119824:[695,184,833,47,787],119825:[681,3,722,39,708],119826:[695,17,611,57,559],119827:[681,3,667,17,650],119828:[681,17,778,26,760],119829:[681,3,778,20,763],119830:[686,3,1000,17,988],119831:[695,3,667,17,650],119832:[695,3,667,15,660],119833:[681,3,667,24,627],119834:[471,17,500,40,478],119835:[720,17,611,10,556],119836:[471,17,444,37,414],119837:[720,17,611,42,577],119838:[471,17,500,42,461],119839:[720,3,389,34,381],119840:[471,266,556,26,535],119841:[720,3,611,24,587],119842:[666,3,333,34,298],119843:[666,266,333,3,233],119844:[720,3,611,21,597],119845:[720,3,333,24,296],119846:[471,3,889,24,864],119847:[471,3,611,24,587],119848:[471,17,556,40,517],119849:[471,258,611,29,567],119850:[471,258,611,52,589],119851:[471,3,389,30,389],119852:[471,17,444,39,405],119853:[632,17,333,22,324],119854:[471,17,611,25,583],119855:[459,3,556,11,545],119856:[471,3,833,13,820],119857:[471,3,500,20,483],119858:[459,266,556,10,546],119859:[457,3,500,16,464],119860:[705,3,722,-19,677],119861:[692,3,611,26,559],119862:[706,18,667,45,651],119863:[692,3,778,28,741],119864:[692,3,611,30,570],119865:[692,3,556,0,548],119866:[706,18,722,50,694],119867:[692,3,778,-3,800],119868:[692,3,333,7,354],119869:[692,206,333,-35,358],119870:[692,3,667,13,683],119871:[692,3,556,16,523],119872:[692,18,944,-19,940],119873:[692,11,778,2,804],119874:[706,18,778,53,748],119875:[692,3,611,9,594],119876:[706,201,778,53,748],119877:[692,3,667,9,639],119878:[706,18,556,42,506],119879:[692,3,611,53,635],119880:[692,19,778,88,798],119881:[692,8,722,75,754],119882:[700,8,944,71,980],119883:[692,3,722,20,734],119884:[705,3,667,52,675],119885:[692,3,667,20,637],119886:[482,11,444,4,406],119887:[733,11,463,37,433],119888:[482,11,407,25,389],119889:[733,11,500,17,483],119890:[482,11,389,15,374],119891:[733,276,278,-162,413],119892:[482,276,500,-37,498],119894:[670,9,278,34,266],119895:[670,276,278,-70,273],119896:[733,9,444,8,449],119897:[733,9,278,36,251],119898:[482,9,778,24,740],119899:[482,9,556,24,514],119900:[482,11,444,17,411],119901:[482,276,500,-7,465],119902:[482,276,463,24,432],119903:[482,9,389,26,384],119904:[482,11,389,9,345],119905:[646,9,333,41,310],119906:[482,11,556,32,512],119907:[482,11,500,21,477],119908:[482,11,722,21,699],119909:[482,11,500,9,484],119910:[482,276,500,-8,490],119911:[482,11,444,-1,416],119912:[683,3,722,-35,685],119913:[682,3,667,8,629],119914:[695,17,685,69,695],119915:[682,3,778,0,747],119916:[681,3,611,11,606],119917:[681,3,556,-6,593],119918:[695,17,778,72,750],119919:[681,3,778,-12,826],119920:[681,3,389,-1,412],119921:[681,207,389,-29,417],119922:[681,3,722,-10,746],119923:[681,3,611,26,578],119924:[681,17,944,-23,985],119925:[681,3,778,-2,829],119926:[695,17,833,76,794],119927:[681,3,667,11,673],119928:[695,222,833,76,794],119929:[681,3,722,4,697],119930:[695,17,556,50,517],119931:[681,3,611,56,674],119932:[681,17,778,83,825],119933:[681,3,667,67,745],119934:[689,3,1000,67,1073],119935:[681,3,722,-9,772],119936:[695,3,611,54,675],119937:[681,3,667,1,676],119938:[470,17,556,44,519],119939:[726,17,537,44,494],119940:[469,17,444,32,436],119941:[726,17,556,38,550],119942:[469,17,444,28,418],119943:[726,271,333,-130,449],119944:[469,271,500,-50,529],119945:[726,17,556,22,522],119946:[675,17,333,26,301],119947:[675,271,333,-64,311],119948:[726,17,556,34,528],119949:[726,17,333,64,318],119950:[469,17,833,19,803],119951:[469,17,556,17,521],119952:[469,17,556,48,502],119953:[469,271,556,-21,516],119954:[469,271,537,32,513],119955:[469,17,389,20,411],119956:[469,17,444,25,406],119957:[636,17,389,42,409],119958:[469,17,556,22,521],119959:[469,17,556,19,513],119960:[469,17,833,27,802],119961:[469,17,500,-8,500],119962:[469,271,556,13,541],119963:[469,17,500,31,470],120484:[482,9,278,34,241],120485:[482,276,278,-70,228],120488:[686,3,748,6,739],120489:[681,3,659,31,603],120490:[681,3,562,31,542],120491:[686,3,662,25,637],120492:[681,3,606,31,569],120493:[681,3,670,25,628],120494:[681,3,822,31,791],120495:[695,17,831,47,787],120496:[681,3,389,40,351],120497:[681,3,761,31,755],120498:[686,3,748,6,739],120499:[681,10,1009,38,974],120500:[681,16,822,31,794],120501:[681,3,719,42,676],120502:[695,17,832,46,786],120503:[681,3,822,31,791],120504:[681,3,611,31,586],120505:[695,17,831,47,787],120506:[681,3,669,25,628],120507:[681,3,673,20,653],120508:[695,3,675,15,660],120509:[681,3,833,47,787],120510:[695,3,620,-8,625],120511:[681,3,742,4,738],120512:[695,3,827,27,804],120513:[676,13,662,25,637],120514:[469,17,563,43,563],120515:[718,272,617,71,576],120516:[469,232,571,-14,572],120517:[718,17,482,41,440],120518:[471,17,491,41,467],120519:[718,232,491,45,468],120520:[469,271,569,5,499],120521:[695,17,550,49,502],120522:[469,17,359,79,349],120523:[469,17,623,22,601],120524:[718,19,613,10,603],120525:[469,271,608,16,601],120526:[469,17,533,-9,494],120527:[718,232,476,54,477],120528:[469,17,539,41,496],120529:[493,17,777,55,754],120530:[469,271,570,69,529],120531:[469,232,486,48,464],120532:[482,17,660,54,637],120533:[493,17,618,32,594],120534:[469,17,538,-5,495],120535:[469,271,727,41,684],120536:[469,232,654,22,656],120537:[636,271,728,-5,687],120538:[469,17,802,41,759],120539:[740,17,571,47,512],120540:[471,17,576,69,536],120541:[695,17,602,22,580],120542:[469,17,693,39,654],120543:[633,268,722,41,680],120544:[469,271,561,70,519],120545:[559,17,803,41,760],120546:[700,3,744,-35,697],120547:[692,3,624,33,601],120548:[692,3,539,-17,609],120549:[700,3,616,-33,572],120550:[692,3,615,7,640],120551:[692,3,661,-23,705],120552:[692,3,819,-17,878],120553:[709,20,833,67,813],120554:[692,3,334,-17,393],120555:[692,3,698,-18,761],120556:[700,3,720,-46,685],120557:[692,13,934,-22,987],120558:[692,20,836,-18,885],120559:[692,3,693,16,683],120560:[709,20,833,66,811],120561:[692,3,819,-17,878],120562:[692,3,592,-17,627],120563:[709,20,833,67,813],120564:[692,3,696,4,672],120565:[692,3,602,79,666],120566:[705,3,634,78,717],120567:[692,3,833,71,806],120568:[700,3,643,-31,704],120569:[692,3,767,94,832],120570:[709,3,822,4,799],120571:[690,13,616,80,684],120572:[482,11,537,22,496],120573:[711,277,582,7,534],120574:[482,226,571,14,589],120575:[711,11,458,48,450],120576:[484,11,444,39,401],120577:[711,226,454,47,539],120578:[482,276,526,46,506],120579:[711,11,492,71,493],120580:[482,9,285,54,264],120581:[482,9,518,61,526],120582:[711,12,569,-32,543],120583:[482,276,596,32,549],120584:[482,12,499,41,517],120585:[711,226,456,48,540],120586:[482,11,484,53,454],120587:[493,11,677,68,705],120588:[482,276,524,-6,495],120589:[482,226,472,38,454],120590:[494,11,582,52,639],120591:[493,11,559,68,594],120592:[482,11,528,56,510],120593:[482,276,638,50,610],120594:[482,226,557,-44,588],120595:[646,276,646,48,640],120596:[482,11,765,42,759],120597:[733,9,545,64,526],120598:[482,11,489,54,491],120599:[711,11,553,57,581],120600:[483,17,660,72,609],120601:[644,274,637,54,605],120602:[482,276,535,55,492],120603:[548,11,765,42,759],120604:[686,3,733,-35,690],120605:[681,3,646,22,627],120606:[681,3,551,-10,609],120607:[686,3,649,-13,593],120608:[681,3,594,22,625],120609:[681,3,657,-16,692],120610:[681,3,806,-10,855],120611:[695,17,814,56,799],120612:[681,3,381,-1,424],120613:[681,3,746,-10,795],120614:[686,3,733,-35,690],120615:[681,10,989,-3,1035],120616:[681,16,806,-10,858],120617:[681,3,705,22,693],120618:[695,17,815,55,798],120619:[681,3,806,-10,855],120620:[681,3,599,-10,630],120621:[695,17,814,56,799],120622:[681,3,656,-16,640],120623:[681,3,660,84,719],120624:[695,3,661,84,726],120625:[681,3,816,61,795],120626:[695,3,608,-48,691],120627:[681,3,727,77,803],120628:[695,3,810,-4,806],120629:[676,13,649,95,701],120630:[469,17,536,20,514],120631:[718,272,588,-21,551],120632:[469,232,544,12,585],120633:[718,17,459,34,483],120634:[471,17,468,24,444],120635:[718,232,468,40,525],120636:[469,271,542,41,523],120637:[695,17,524,61,526],120638:[469,17,342,48,313],120639:[469,17,593,55,570],120640:[718,19,584,-29,552],120641:[469,271,579,3,551],120642:[469,17,508,27,527],120643:[718,232,453,49,534],120644:[469,17,513,33,495],120645:[493,17,740,61,778],120646:[469,271,543,-13,526],120647:[469,232,463,36,451],120648:[482,17,629,46,664],120649:[493,17,589,65,626],120650:[469,17,512,33,507],120651:[469,271,692,31,675],120652:[469,232,623,-42,582],120653:[636,271,693,33,690],120654:[469,17,764,37,759],120655:[740,17,544,70,529],120656:[471,17,549,64,538],120657:[695,17,573,59,618],120658:[469,17,660,67,609],120659:[633,268,688,36,667],120660:[469,271,534,54,517],120661:[559,17,765,37,760],120782:[660,17,500,33,468],120783:[670,3,500,35,455],120784:[660,3,500,25,472],120785:[660,17,500,22,458],120786:[672,3,500,12,473],120787:[656,17,500,42,472],120788:[660,17,500,37,469],120789:[656,3,500,46,493],120790:[660,17,500,34,467],120791:[660,17,500,31,463]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"GyrePagellaMathJax_Normal"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Normal/Regular/Main.js"]); | PypiClean |
/MLatom-2.3.3.tar.gz/MLatom-2.3.3/README.md | # Brief Introduction
A Package for Atomistic Simulations with Machine Learning
**manual**: http://mlatom.com/manual/
**tutorial**: http://mlatom.com/tutorial/
# Tasks Performed by MLatom
A brief overview of MLatom capabilities (see above links for more up-to-date version). See sections below for more details.
## Tasks
- Estimating accuracy of ML models.
- Creating ML model and saving it to a file.
- Loading existing ML model from a file and performing ML calculations with this model.
- ML-accelerated calculation of absorption spectra within nuclear ensemble approach
- Learning curves
- ML-two photon absorption
## Data Set Operations
- Converting XYZ coordinates into an input vector (molecular descriptor) for ML.
- Sampling subsets from a data set.
# Sampling
- none: simply splitting the data set into the training, test, and, if necessary, training set into the subtraining and validation sets (in this order) without changing the order of indices.
- random sampling.
- user-defined: requests MLatom to read indices for the training, test, and, if necessary, for the subtraining and validation sets from files.
- [ structure-based sampling ](http://mlatom.com/self-correcting-machine-learning-and-structure-based-sampling/)
- from unsliced and sliced data
- [ farthest-point traversal iterative procedure ](https://en.wikipedia.org/wiki/Farthest-first_traversal), which starts from two points farthest apart.
# ML Algorithm
[ Kernel ridge regression](https://web.stanford.edu/~hastie/ElemStatLearn/) with the following kernels:
- [ Gaussian ](https://doi.org/10.1103/PhysRevLett.108.058301).
- [ Laplacian ](https://doi.org/10.1103/PhysRevLett.108.058301).
- exponential.
- [ Matérn ](http://dx.doi.org/10.1198/jasa.2010.tm09420) ([ details of implementation ](http://dx.doi.org/10.1021/acs.jpclett.8b02469)).
Permutationally invariant kernel and self-correction are also supported.
# Hybrid QM/ML Approaches
[ Δ-machine learning ](http://dx.doi.org/10.1021/acs.jctc.5b00099).
# Molecular Descriptors
- [ Coulomb matrix ](https://doi.org/10.1103/PhysRevLett.108.058301)
- [ sorted by norms of its rows ](http://dx.doi.org/10.1021/ct400195d);
- unsorted;
- permuted.
- [ Normalized inverse internuclear distances (RE descriptor)](http://mlatom.com/self-correcting-machine-learning-and-structure-based-sampling/)
- sorted for user-defined atoms by the sum of their nuclear repulsions to all other atoms;
- unsorted;
- permuted.
# ML models
The [ KREG (Kernel-ridge-regression using RE descriptor and the Gaussian kernel function )](http://dx.doi.org/10.1021/acs.jpclett.8b02469) model is the default ML method.
## General-purpose ML models
- AIQM1 (requires interfaces to other programs as described in http://MLatom.com/AIQM1)
- Models available via interface to [TorchANI](https://doi.org/10.1021/acs.jcim.0c00451)
- ANI-1x
- ANI-1ccx
- ANI-2x
# Model Validation
[ ML model can be validated (generalization error can be estimated) in several ways: ](https://web.stanford.edu/~hastie/ElemStatLearn/)
- on a hold-out test set not used for training. Both training and test sets can be sampled in one of the ways described above;
- by performing N-fold cross-validation. User can define the number of folds N. If N is equal to the number of data points, leave-one-out cross-validation is performed. Only random or no sampling can be used for cross-validation.
- by performing leave-one-out cross-validation (special case of N-fold cross-validation).
MLatom prints out mean absolute error (MAE), mean signed error (MSE), root-mean-squared error (RMSE), mean values of reference and estimated values, largest positive and negative outliers, correlation coefficient and its squared value R2 as well as coefficients of linear regression and corresponding standard deviations.
# Hyperparameter Tuning
Gaussian, Laplacian, and Matérn kernels have σ and λ tunable hyperparameters. MLatom can determine them by performing user-defined number of iterations of hyperparameter optimization on a logarithmic grid. User can adjust number of grid points, starting and finishing points on the grid. Hyperparameter are tuned to minimize either mean absolute error or root-mean-square error as defined by the user. [ Hyperparameters can be tuned to minimize ](https://web.stanford.edu/~hastie/ElemStatLearn/)
- the error of the ML model trained on the subtraining set in a hold-out validation set. Both subtraining and validation sets are parts of the training set, which can be used at the end with optimal parameters for training the final ML model. These sets ideally should not overlap and can be [ sampled ](http://mlatom.com/features/#Sampling) from the training set in one of the ways described above;
- N-fold cross-validation error. User can define the number of folds N. If N is equal to the number of data points, leave-one-out cross-validation is performed. Only random or no sampling can be used for cross-validation.
Note that hyperparameter tuning can be performed together with model validation. This means that for example one can perform outer loop of the cross-validation for model validation and tune hyperparameters via inner loop of the cross-validation.
Apart from natively implemented logarithmic grid search for hyperparameters, MLatom also provides the interface to the [ hyperopt package ](http://hyperopt.github.io/hyperopt/) implementing hyperparameter optimization using Bayesian methods with Tree-structured Parzen Estimator (TPE).
# First Derivatives
MLatom can be also used to estimate first derivatives from an ML model. Two scenarios are possible:
- partial derivatives are calculated for each dimension of given input vectors (analytical derivatives for Gaussian and Matern kernels);
- first derivatives are calculated in XYZ coordinates for input files containing molecular XYZ coordinates (analytical derivatives for the RE and Coulomb matrix descriptors).
- derivatives for interfaced models
# UV/vis spectra
MLatom can significantly accelerate the calculation of cross-section with the Nuclear Ensemble Approach (NEA).
In brief, this feature uses fewer QC calculation to achieve higher precision and reduce computational cost. You can find more detail on this paper (please cite it when using this feature):
> Bao-Xin Xue, Mario Barbatti, Pavlo O. Dral, [ Machine Learning for Absorption Cross Sections ](https://doi.org/10.1021/acs.jpca.0c05310), J. Phys. Chem. A 2020, 124, 7199–7210. DOI: 10.1021/acs.jpca.0c05310.
# Interfaces to 3<sup>rd</sup>-party software
MLatom also provides interfaces to some third-party software where extra ML model types are natively implemented. It allows users to access other popular ML model types within MLatom's workflow. Currently available third-party model types are:
- [ANI](https://doi.org/10.1039/c6sc05720a) (through [TorchANI](https://doi.org/10.1021/acs.jcim.0c00451))
- [DeepPot-SE](https://papers.nips.cc/paper/2018/hash/e2ad76f2326fbc6b56a45a56c59fafdb-Abstract.html) and [DPMD](https://doi.org/10.1103/PhysRevLett.120.143001) (through [DeePMD-kit](https://doi.org/10.1016/j.cpc.2018.03.016))
- [GAP](https://doi.org/10.1103/Physrevlett.104.136403)-[SOAP](https://doi.org/10.1103/physrevb.87.184115) (through [GAP](www.libatoms.org) suite and [QUIP](http://github.com/libAtoms/QUIP))
- [PhysNet](https://doi.org/10.1021/acs.jctc.9b00181) (through [PhysNet](github.com/MMunibas/PhysNet))
- [sGDML](https://doi.org/10.1038/s41467-018-06169-2) (through [sGDML](www.sgdml.org))
# About Program
MLatom: a Package for Atomistic Simulations with Machine Learning
Version 2.3.3
http://mlatom.com/
Copyright (c) 2013-2022 Pavlo O. Dral
http://dr-dral.com/
All rights reserved. This work is licensed under the [Attribution-NonCommercial-NoDerivatives 4.0 International](http://creativecommons.org/licenses/by-nc-nd/4.0/) license. See LICENSE.CC-BY-NC-ND-4.0.
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
The software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the software or the use or other dealings in the software.
Cite as:
1. Pavlo O. Dral, J. Comput. Chem. 2019, 40, 2339-2347
2. Pavlo O. Dral, Fuchun Ge, Bao-Xin Xue, Yi-Fan Hou, Max Pinheiro Jr, Jianxing Huang, Mario Barbatti, Top. Curr. Chem. 2021, 379, 27
3. Pavlo O. Dral, Peikun Zheng, Bao-Xin Xue, Fuchun Ge, Yi-Fan Hou, Max Pinheiro Jr, Yuming Su, Yiheng Dai, Yangtao Chen, MLatom: A Package for Atomistic Simulations with Machine Learning, version 2.3.3, Xiamen University, Xiamen, China, 2013-2022.
# License
This work is licensed under the [Attribution-NonCommercial-NoDerivatives 4.0 International](http://creativecommons.org/licenses/by-nc-nd/4.0/) license. See LICENSE.CC-BY-NC-ND-4.0.
<a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png" /></a>
| PypiClean |
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/model/interfaces/squid.py | import platform
import serial
import serial.tools.list_ports
import time
import numpy as np
import threading
from qtpy.QtCore import *
from qtpy.QtWidgets import *
from qtpy.QtGui import *
from imswitch.imcommon.model import initLogger
from imswitch.imcontrol.model.interfaces.squid_def import *
# add user to the dialout group to avoid the need to use sudo
# done (7/20/2021) - remove the time.sleep in all functions (except for __init__) to
# make all callable functions nonblocking, instead, user should check use is_busy() to
# check if the microcontroller has finished executing the more recent command
# to do (7/28/2021) - add functions for configuring the stepper motors
class SQUID():
def __init__(self,parent=None,port=None):
self.__logger = initLogger(self)
self.serial = None
self.platform_name = platform.system()
self.tx_buffer_length = MicrocontrollerDef.CMD_LENGTH
self.rx_buffer_length = MicrocontrollerDef.MSG_LENGTH
self._cmd_id = 0
self._cmd_id_mcu = None # command id of mcu's last received command
self._cmd_execution_status = None
self.mcu_cmd_execution_in_progress = False
self.x_pos = 0 # unit: microstep or encoder resolution
self.y_pos = 0 # unit: microstep or encoder resolution
self.z_pos = 0 # unit: microstep or encoder resolution
self.theta_pos = 0 # unit: microstep or encoder resolution
self.button_and_switch_state = 0
self.joystick_button_pressed = 0
self.signal_joystick_button_pressed_event = False
self.switch_state = 0
self.last_command = None
self.timeout_counter = 0
# establish serial communication
if port is None:
port = self.autodetectSerial()
try:
self.serial = serial.Serial(port,2000000)
except:
# one more attempt to find the serial:
port = self.autodetectSerial()
self.serial = serial.Serial(port,2000000)
self.new_packet_callback_external = None
self.terminate_reading_received_packet_thread = False
self.thread_read_received_packet = threading.Thread(target=self.read_received_packet, daemon=True)
self.thread_read_received_packet.start()
def autodetectSerial(self):
# AUTO-DETECT the Arduino! By Deepak
arduino_ports = [
p.device
for p in serial.tools.list_ports.comports()
if 'Arduino' in p.description]
if not arduino_ports:
raise IOError("No Arduino found")
if len(arduino_ports) > 1:
self.__logger.debug('Multiple Arduinos found - using the first')
else:
self.__logger.debug('Using Arduino found at : {}'.format(arduino_ports[0]))
port = arduino_ports[0]
return port
def close(self):
self.terminate_reading_received_packet_thread = True
self.thread_read_received_packet.join()
self.serial.close()
def turn_on_illumination(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.TURN_ON_ILLUMINATION
self.send_command(cmd)
def turn_off_illumination(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.TURN_OFF_ILLUMINATION
self.send_command(cmd)
def set_laser(self, channel=0, intensity=0):
intensity_r = 0
intensity_g = 0
intensity_b = 0
if channel==0:
intensity_r = intensity
elif channel==1:
intensity_g = intensity
if channel==0:
intensity_b = intensity
illumination_source = channel # TODO: what does tis mean?
self.set_illumination(illumination_source,intensity,r=intensity_r,g=intensity_g,b=intensity_b)
def set_illumination(self,illumination_source,intensity,r=None,g=None,b=None):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_ILLUMINATION
cmd[2] = illumination_source
cmd[3] = int((intensity/100)*65535) >> 8
cmd[4] = int((intensity/100)*65535) & 0xff
self.send_command(cmd)
def set_illumination_led_matrix(self,illumination_source,r,g,b):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_ILLUMINATION_LED_MATRIX
cmd[2] = illumination_source
cmd[3] = min(int(r*255),255)
cmd[4] = min(int(g*255),255)
cmd[5] = min(int(b*255),255)
self.send_command(cmd)
def send_hardware_trigger(self,control_illumination=False,illumination_on_time_us=0,trigger_output_ch=0):
illumination_on_time_us = int(illumination_on_time_us)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SEND_HARDWARE_TRIGGER
cmd[2] = (control_illumination<<7) + trigger_output_ch # MSB: whether illumination is controlled
cmd[3] = illumination_on_time_us >> 24
cmd[4] = (illumination_on_time_us >> 16) & 0xff
cmd[5] = (illumination_on_time_us >> 8) & 0xff
cmd[6] = illumination_on_time_us & 0xff
self.send_command(cmd)
def set_strobe_delay_us(self, strobe_delay_us, camera_channel=0):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_STROBE_DELAY
cmd[2] = camera_channel
cmd[3] = strobe_delay_us >> 24
cmd[4] = (strobe_delay_us >> 16) & 0xff
cmd[5] = (strobe_delay_us >> 8) & 0xff
cmd[6] = strobe_delay_us & 0xff
self.send_command(cmd)
'''
def move_x(self,delta):
direction = int((np.sign(delta)+1)/2)
n_microsteps = abs(delta*Motion.STEPS_PER_MM_XY)
if n_microsteps > 65535:
n_microsteps = 65535
cmd = bytearray(self.tx_buffer_length)
cmd[0] = CMD_SET.MOVE_X
cmd[1] = direction
cmd[2] = int(n_microsteps) >> 8
cmd[3] = int(n_microsteps) & 0xff
self.serial.write(cmd)
'''
def move_x_usteps(self,usteps):
direction = STAGE_MOVEMENT_SIGN_X*np.sign(usteps)
n_microsteps_abs = abs(usteps)
# if n_microsteps_abs exceed the max value that can be sent in one go
while n_microsteps_abs >= (2**32)/2:
n_microsteps_partial_abs = (2**32)/2 - 1
n_microsteps_partial = direction*n_microsteps_partial_abs
payload = self._int_to_payload(n_microsteps_partial,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVE_X
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs
n_microsteps = direction*n_microsteps_abs
payload = self._int_to_payload(n_microsteps,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVE_X
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
def move_x_to_usteps(self,usteps):
payload = self._int_to_payload(STAGE_MOVEMENT_SIGN_X*usteps,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVETO_X
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
'''
def move_y(self,delta):
direction = int((np.sign(delta)+1)/2)
n_microsteps = abs(delta*Motion.STEPS_PER_MM_XY)
if n_microsteps > 65535:
n_microsteps = 65535
cmd = bytearray(self.tx_buffer_length)
cmd[0] = CMD_SET.MOVE_Y
cmd[1] = direction
cmd[2] = int(n_microsteps) >> 8
cmd[3] = int(n_microsteps) & 0xff
self.serial.write(cmd)
'''
def move_y_usteps(self,usteps):
direction = STAGE_MOVEMENT_SIGN_Y*np.sign(usteps)
n_microsteps_abs = abs(usteps)
# if n_microsteps_abs exceed the max value that can be sent in one go
while n_microsteps_abs >= (2**32)/2:
n_microsteps_partial_abs = (2**32)/2 - 1
n_microsteps_partial = direction*n_microsteps_partial_abs
payload = self._int_to_payload(n_microsteps_partial,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVE_Y
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs
n_microsteps = direction*n_microsteps_abs
payload = self._int_to_payload(n_microsteps,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVE_Y
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
def move_y_to_usteps(self,usteps):
payload = self._int_to_payload(STAGE_MOVEMENT_SIGN_Y*usteps,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVETO_Y
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
'''
def move_z(self,delta):
direction = int((np.sign(delta)+1)/2)
n_microsteps = abs(delta*Motion.STEPS_PER_MM_Z)
if n_microsteps > 65535:
n_microsteps = 65535
cmd = bytearray(self.tx_buffer_length)
cmd[0] = CMD_SET.MOVE_Z
cmd[1] = 1-direction
cmd[2] = int(n_microsteps) >> 8
cmd[3] = int(n_microsteps) & 0xff
self.serial.write(cmd)
'''
def move_z_usteps(self,usteps):
direction = STAGE_MOVEMENT_SIGN_Z*np.sign(usteps)
n_microsteps_abs = abs(usteps)
# if n_microsteps_abs exceed the max value that can be sent in one go
while n_microsteps_abs >= (2**32)/2:
n_microsteps_partial_abs = (2**32)/2 - 1
n_microsteps_partial = direction*n_microsteps_partial_abs
payload = self._int_to_payload(n_microsteps_partial,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVE_Z
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs
n_microsteps = direction*n_microsteps_abs
payload = self._int_to_payload(n_microsteps,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVE_Z
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
def move_z_to_usteps(self,usteps):
payload = self._int_to_payload(STAGE_MOVEMENT_SIGN_Z*usteps,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVETO_Z
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
def move_theta_usteps(self,usteps):
direction = STAGE_MOVEMENT_SIGN_THETA*np.sign(usteps)
n_microsteps_abs = abs(usteps)
# if n_microsteps_abs exceed the max value that can be sent in one go
while n_microsteps_abs >= (2**32)/2:
n_microsteps_partial_abs = (2**32)/2 - 1
n_microsteps_partial = direction*n_microsteps_partial_abs
payload = self._int_to_payload(n_microsteps_partial,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVE_THETA
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
n_microsteps_abs = n_microsteps_abs - n_microsteps_partial_abs
n_microsteps = direction*n_microsteps_abs
payload = self._int_to_payload(n_microsteps,4)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.MOVE_THETA
cmd[2] = payload >> 24
cmd[3] = (payload >> 16) & 0xff
cmd[4] = (payload >> 8) & 0xff
cmd[5] = payload & 0xff
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
def home_x(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = AXIS.X
cmd[3] = int((STAGE_MOVEMENT_SIGN_X+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
# # to do: add timeout
def home_y(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = AXIS.Y
cmd[3] = int((STAGE_MOVEMENT_SIGN_Y+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# sleep(self._motion_status_checking_interval)
# # to do: add timeout
def home_z(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = AXIS.Z
cmd[3] = int((STAGE_MOVEMENT_SIGN_Z+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
# # to do: add timeout
def home_theta(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = 3
cmd[3] = int((STAGE_MOVEMENT_SIGN_THETA+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
# # to do: add timeout
def home_xy(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = AXIS.XY
cmd[3] = int((STAGE_MOVEMENT_SIGN_X+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1
cmd[4] = int((STAGE_MOVEMENT_SIGN_Y+1)/2) # "move backward" if SIGN is 1, "move forward" if SIGN is -1
self.send_command(cmd)
def zero_x(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = AXIS.X
cmd[3] = HOME_OR_ZERO.ZERO
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
# # to do: add timeout
def zero_y(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = AXIS.Y
cmd[3] = HOME_OR_ZERO.ZERO
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# sleep(self._motion_status_checking_interval)
# # to do: add timeout
def zero_z(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = AXIS.Z
cmd[3] = HOME_OR_ZERO.ZERO
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
# # to do: add timeout
def zero_theta(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.HOME_OR_ZERO
cmd[2] = AXIS.THETA
cmd[3] = HOME_OR_ZERO.ZERO
self.send_command(cmd)
# while self.mcu_cmd_execution_in_progress == True:
# time.sleep(self._motion_status_checking_interval)
# # to do: add timeout
def set_lim(self,limit_code,usteps):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_LIM
cmd[2] = limit_code
payload = self._int_to_payload(usteps,4)
cmd[3] = payload >> 24
cmd[4] = (payload >> 16) & 0xff
cmd[5] = (payload >> 8) & 0xff
cmd[6] = payload & 0xff
self.send_command(cmd)
def set_limit_switch_polarity(self,axis,polarity):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_LIM_SWITCH_POLARITY
cmd[2] = axis
cmd[3] = polarity
self.send_command(cmd)
def configure_motor_driver(self,axis,microstepping,current_rms,I_hold):
# current_rms in mA
# I_hold 0.0-1.0
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.CONFIGURE_STEPPER_DRIVER
cmd[2] = axis
if microstepping == 1:
cmd[3] = 0
else:
cmd[3] = microstepping
cmd[4] = current_rms >> 8
cmd[5] = current_rms & 0xff
cmd[6] = int(I_hold*255)
self.send_command(cmd)
def set_max_velocity_acceleration(self,axis,velocity,acceleration):
# velocity: max 65535/100 mm/s
# acceleration: max 65535/10 mm/s^2
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_MAX_VELOCITY_ACCELERATION
cmd[2] = axis
cmd[3] = int(velocity*100) >> 8
cmd[4] = int(velocity*100) & 0xff
cmd[5] = int(acceleration*10) >> 8
cmd[6] = int(acceleration*10) & 0xff
self.send_command(cmd)
def set_leadscrew_pitch(self,axis,pitch_mm):
# pitch: max 65535/1000 = 65.535 (mm)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_LEAD_SCREW_PITCH
cmd[2] = axis
cmd[3] = int(pitch_mm*1000) >> 8
cmd[4] = int(pitch_mm*1000) & 0xff
self.send_command(cmd)
def configure_actuators(self):
# lead screw pitch
self.set_leadscrew_pitch(AXIS.X,SCREW_PITCH_X_MM)
self.set_leadscrew_pitch(AXIS.Y,SCREW_PITCH_Y_MM)
self.set_leadscrew_pitch(AXIS.Z,SCREW_PITCH_Z_MM)
# stepper driver (microstepping,rms current and I_hold)
self.configure_motor_driver(AXIS.X,MICROSTEPPING_DEFAULT_X,X_MOTOR_RMS_CURRENT_mA,X_MOTOR_I_HOLD)
self.configure_motor_driver(AXIS.Y,MICROSTEPPING_DEFAULT_Y,Y_MOTOR_RMS_CURRENT_mA,Y_MOTOR_I_HOLD)
self.configure_motor_driver(AXIS.Z,MICROSTEPPING_DEFAULT_Z,Z_MOTOR_RMS_CURRENT_mA,Z_MOTOR_I_HOLD)
# max velocity and acceleration
self.set_max_velocity_acceleration(AXIS.X,MAX_VELOCITY_X_mm,MAX_ACCELERATION_X_mm)
self.set_max_velocity_acceleration(AXIS.Y,MAX_VELOCITY_Y_mm,MAX_ACCELERATION_Y_mm)
self.set_max_velocity_acceleration(AXIS.Z,MAX_VELOCITY_Z_mm,MAX_ACCELERATION_Z_mm)
# home switch
self.set_limit_switch_polarity(AXIS.X,X_HOME_SWITCH_POLARITY)
self.set_limit_switch_polarity(AXIS.Y,Y_HOME_SWITCH_POLARITY)
self.set_limit_switch_polarity(AXIS.Z,Z_HOME_SWITCH_POLARITY)
def ack_joystick_button_pressed(self):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.ACK_JOYSTICK_BUTTON_PRESSED
self.send_command(cmd)
def analog_write_onboard_DAC(self,dac,value):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.ANALOG_WRITE_ONBOARD_DAC
cmd[2] = dac
cmd[3] = (value >> 8) & 0xff
cmd[4] = value & 0xff
self.send_command(cmd)
def send_command(self,command):
self._cmd_id = (self._cmd_id + 1)%256
command[0] = self._cmd_id
# command[self.tx_buffer_length-1] = self._calculate_CRC(command)
self.serial.write(command)
self.mcu_cmd_execution_in_progress = True
self.last_command = command
self.timeout_counter = 0
def resend_last_command(self):
self.serial.write(self.last_command)
self.mcu_cmd_execution_in_progress = True
self.timeout_counter = 0
def read_received_packet(self):
while self.terminate_reading_received_packet_thread == False:
# wait to receive data
if self.serial.in_waiting==0:
continue
if self.serial.in_waiting % self.rx_buffer_length != 0:
continue
# get rid of old data
num_bytes_in_rx_buffer = self.serial.in_waiting
if num_bytes_in_rx_buffer > self.rx_buffer_length:
# print('getting rid of old data')
for i in range(num_bytes_in_rx_buffer-self.rx_buffer_length):
self.serial.read()
# read the buffer
msg=[]
for i in range(self.rx_buffer_length):
msg.append(ord(self.serial.read()))
# parse the message
'''
- command ID (1 byte)
- execution status (1 byte)
- X pos (4 bytes)
- Y pos (4 bytes)
- Z pos (4 bytes)
- Theta (4 bytes)
- buttons and switches (1 byte)
- reserved (4 bytes)
- CRC (1 byte)
'''
self._cmd_id_mcu = msg[0]
self._cmd_execution_status = msg[1]
if (self._cmd_id_mcu == self._cmd_id) and (self._cmd_execution_status == CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS):
if self.mcu_cmd_execution_in_progress == True:
self.mcu_cmd_execution_in_progress = False
print(' mcu command ' + str(self._cmd_id) + ' complete')
elif self._cmd_id_mcu != self._cmd_id and self.last_command != None:
self.timeout_counter = self.timeout_counter + 1
if self.timeout_counter > 10:
self.resend_last_command()
print(' *** resend the last command')
# print('command id ' + str(self._cmd_id) + '; mcu command ' + str(self._cmd_id_mcu) + ' status: ' + str(msg[1]) )
self.x_pos = self._payload_to_int(msg[2:6],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution
self.y_pos = self._payload_to_int(msg[6:10],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution
self.z_pos = self._payload_to_int(msg[10:14],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution
self.theta_pos = self._payload_to_int(msg[14:18],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution
self.button_and_switch_state = msg[18]
# joystick button
tmp = self.button_and_switch_state & (1 << BIT_POS_JOYSTICK_BUTTON)
joystick_button_pressed = tmp > 0
if self.joystick_button_pressed == False and joystick_button_pressed == True:
self.signal_joystick_button_pressed_event = True
self.ack_joystick_button_pressed()
self.joystick_button_pressed = joystick_button_pressed
# switch
tmp = self.button_and_switch_state & (1 << BIT_POS_SWITCH)
self.switch_state = tmp > 0
if self.new_packet_callback_external is not None:
self.new_packet_callback_external(self)
def get_pos(self):
return self.x_pos, self.y_pos, self.z_pos, self.theta_pos
def get_button_and_switch_state(self):
return self.button_and_switch_state
def is_busy(self):
return self.mcu_cmd_execution_in_progress
def set_callback(self,function):
self.new_packet_callback_external = function
def _int_to_payload(self,signed_int,number_of_bytes):
if signed_int >= 0:
payload = signed_int
else:
payload = 2**(8*number_of_bytes) + signed_int # find two's completement
return payload
def _payload_to_int(self,payload,number_of_bytes):
signed = 0
for i in range(number_of_bytes):
signed = signed + int(payload[i])*(256**(number_of_bytes-1-i))
if signed >= 256**number_of_bytes/2:
signed = signed - 256**number_of_bytes
return signed
class Microcontroller_Simulation():
def __init__(self,parent=None):
self.serial = None
self.platform_name = platform.system()
self.tx_buffer_length = MicrocontrollerDef.CMD_LENGTH
self.rx_buffer_length = MicrocontrollerDef.MSG_LENGTH
self._cmd_id = 0
self._cmd_id_mcu = None # command id of mcu's last received command
self._cmd_execution_status = None
self.mcu_cmd_execution_in_progress = False
self.x_pos = 0 # unit: microstep or encoder resolution
self.y_pos = 0 # unit: microstep or encoder resolution
self.z_pos = 0 # unit: microstep or encoder resolution
self.theta_pos = 0 # unit: microstep or encoder resolution
self.button_and_switch_state = 0
self.joystick_button_pressed = 0
self.signal_joystick_button_pressed_event = False
self.switch_state = 0
# for simulation
self.timestamp_last_command = time.time() # for simulation only
self._mcu_cmd_execution_status = None
self.timer_update_command_execution_status = QTimer()
self.timer_update_command_execution_status.timeout.connect(self._simulation_update_cmd_execution_status)
self.new_packet_callback_external = None
self.terminate_reading_received_packet_thread = False
self.thread_read_received_packet = threading.Thread(target=self.read_received_packet, daemon=True)
self.thread_read_received_packet.start()
def close(self):
self.terminate_reading_received_packet_thread = True
self.thread_read_received_packet.join()
def move_x_usteps(self,usteps):
self.x_pos = self.x_pos + STAGE_MOVEMENT_SIGN_X*usteps
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': move x')
def move_x_to_usteps(self,usteps):
self.x_pos = STAGE_MOVEMENT_SIGN_X*usteps
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': move x to')
def move_y_usteps(self,usteps):
self.y_pos = self.y_pos + STAGE_MOVEMENT_SIGN_Y*usteps
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': move y')
def move_y_to_usteps(self,usteps):
self.y_pos = STAGE_MOVEMENT_SIGN_Y*usteps
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': move y to')
def move_z_usteps(self,usteps):
self.z_pos = self.z_pos + STAGE_MOVEMENT_SIGN_Z*usteps
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': move z')
def move_z_to_usteps(self,usteps):
self.z_pos = STAGE_MOVEMENT_SIGN_Z*usteps
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': move z to')
def move_theta_usteps(self,usteps):
self.theta_pos = self.theta_pos + usteps
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
def home_x(self):
self.x_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': home x')
def home_y(self):
self.y_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': home y')
def home_z(self):
self.z_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': home z')
def home_xy(self):
self.x_pos = 0
self.y_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': home xy')
def home_theta(self):
self.theta_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
def zero_x(self):
self.x_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': zero x')
def zero_y(self):
self.y_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': zero y')
def zero_z(self):
self.z_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': zero z')
def zero_theta(self):
self.theta_pos = 0
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
def set_lim(self,limit_code,usteps):
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
def configure_motor_driver(self,axis,microstepping,current_rms,I_hold):
# current_rms in mA
# I_hold 0.0-1.0
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.CONFIGURE_STEPPER_DRIVER
cmd[2] = axis
if microstepping == 1:
cmd[3] = 0
else:
cmd[3] = microstepping
cmd[4] = current_rms >> 8
cmd[5] = current_rms & 0xff
cmd[6] = int(I_hold*255)
self.send_command(cmd)
def set_max_velocity_acceleration(self,axis,velocity,acceleration):
# velocity: max 65535/100 mm/s
# acceleration: max 65535/10 mm/s^2
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_MAX_VELOCITY_ACCELERATION
cmd[2] = axis
cmd[3] = int(velocity*100) >> 8
cmd[4] = int(velocity*100) & 0xff
cmd[5] = int(acceleration*10) >> 8
cmd[6] = int(acceleration*10) & 0xff
self.send_command(cmd)
def set_leadscrew_pitch(self,axis,pitch_mm):
# pitch: max 65535/1000 = 65.535 (mm)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_LEAD_SCREW_PITCH
cmd[2] = axis
cmd[3] = int(pitch_mm*1000) >> 8
cmd[4] = int(pitch_mm*1000) & 0xff
self.send_command(cmd)
def set_limit_switch_polarity(self,axis,polarity):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_LIM_SWITCH_POLARITY
cmd[2] = axis
cmd[3] = polarity
self.send_command(cmd)
def configure_actuators(self):
# lead screw pitch
self.set_leadscrew_pitch(AXIS.X,SCREW_PITCH_X_MM)
self.set_leadscrew_pitch(AXIS.Y,SCREW_PITCH_Y_MM)
self.set_leadscrew_pitch(AXIS.Z,SCREW_PITCH_Z_MM)
# stepper driver (microstepping,rms current and I_hold)
self.configure_motor_driver(AXIS.X,MICROSTEPPING_DEFAULT_X,X_MOTOR_RMS_CURRENT_mA,X_MOTOR_I_HOLD)
self.configure_motor_driver(AXIS.Y,MICROSTEPPING_DEFAULT_Y,Y_MOTOR_RMS_CURRENT_mA,Y_MOTOR_I_HOLD)
self.configure_motor_driver(AXIS.Z,MICROSTEPPING_DEFAULT_Z,Z_MOTOR_RMS_CURRENT_mA,Z_MOTOR_I_HOLD)
# max velocity and acceleration
self.set_max_velocity_acceleration(AXIS.X,MAX_VELOCITY_X_mm,MAX_ACCELERATION_X_mm)
self.set_max_velocity_acceleration(AXIS.Y,MAX_VELOCITY_X_mm,MAX_ACCELERATION_Y_mm)
self.set_max_velocity_acceleration(AXIS.Z,MAX_VELOCITY_X_mm,MAX_ACCELERATION_Z_mm)
# home switch
self.set_limit_switch_polarity(AXIS.X,X_HOME_SWITCH_POLARITY)
self.set_limit_switch_polarity(AXIS.Y,Y_HOME_SWITCH_POLARITY)
self.set_limit_switch_polarity(AXIS.Z,Z_HOME_SWITCH_POLARITY)
def analog_write_onboard_DAC(self,dac,value):
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.ANALOG_WRITE_ONBOARD_DAC
cmd[2] = dac
cmd[3] = (value >> 8) & 0xff
cmd[4] = value & 0xff
self.send_command(cmd)
def read_received_packet(self):
while self.terminate_reading_received_packet_thread == False:
# only for simulation - update the command execution status
if time.time() - self.timestamp_last_command > 0.05: # in the simulation, assume all the operation takes 0.05s to complete
if self._mcu_cmd_execution_status != CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS:
self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS
print(' mcu command ' + str(self._cmd_id) + ' complete')
# read and parse message
msg=[]
for i in range(self.rx_buffer_length):
msg.append(0)
msg[0] = self._cmd_id
msg[1] = self._mcu_cmd_execution_status
self._cmd_id_mcu = msg[0]
self._cmd_execution_status = msg[1]
if (self._cmd_id_mcu == self._cmd_id) and (self._cmd_execution_status == CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS):
self.mcu_cmd_execution_in_progress = False
# print('mcu_cmd_execution_in_progress: ' + str(self.mcu_cmd_execution_in_progress))
# self.x_pos = utils.unsigned_to_signed(msg[2:6],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution
# self.y_pos = utils.unsigned_to_signed(msg[6:10],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution
# self.z_pos = utils.unsigned_to_signed(msg[10:14],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution
# self.theta_pos = utils.unsigned_to_signed(msg[14:18],MicrocontrollerDef.N_BYTES_POS) # unit: microstep or encoder resolution
self.button_and_switch_state = msg[18]
if self.new_packet_callback_external is not None:
self.new_packet_callback_external(self)
time.sleep(0.005) # simulate MCU packet transmission interval
def turn_on_illumination(self):
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': turn on illumination')
def turn_off_illumination(self):
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': turn off illumination')
def set_illumination(self,illumination_source,intensity):
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': set illumination')
def set_illumination_led_matrix(self,illumination_source,r,g,b):
cmd = bytearray(self.tx_buffer_length)
self.send_command(cmd)
print(' mcu command ' + str(self._cmd_id) + ': set illumination (led matrix)')
def send_hardware_trigger(self,control_illumination=False,illumination_on_time_us=0,trigger_output_ch = 0):
illumination_on_time_us = int(illumination_on_time_us)
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SEND_HARDWARE_TRIGGER
cmd[2] = (control_illumination<<7) + trigger_output_ch # MSB: whether illumination is controlled
cmd[3] = illumination_on_time_us >> 24
cmd[4] = (illumination_on_time_us >> 16) & 0xff
cmd[5] = (illumination_on_time_us >> 8) & 0xff
cmd[6] = illumination_on_time_us & 0xff
self.send_command(cmd)
def set_strobe_delay_us(self, strobe_delay_us, camera_channel=0):
print('set strobe delay')
cmd = bytearray(self.tx_buffer_length)
cmd[1] = CMD_SET.SET_STROBE_DELAY
cmd[2] = camera_channel
cmd[3] = strobe_delay_us >> 24
cmd[4] = (strobe_delay_us >> 16) & 0xff
cmd[5] = (strobe_delay_us >> 8) & 0xff
cmd[6] = strobe_delay_us & 0xff
self.send_command(cmd)
def get_pos(self):
return self.x_pos, self.y_pos, self.z_pos, self.theta_pos
def get_button_and_switch_state(self):
return self.button_and_switch_state
def set_callback(self,function):
self.new_packet_callback_external = function
def is_busy(self):
return self.mcu_cmd_execution_in_progress
def send_command(self,command):
self._cmd_id = (self._cmd_id + 1)%256
command[0] = self._cmd_id
# command[self.tx_buffer_length-1] = self._calculate_CRC(command)
self.mcu_cmd_execution_in_progress = True
# for simulation
self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.IN_PROGRESS
# self.timer_update_command_execution_status.setInterval(2000)
# self.timer_update_command_execution_status.start()
# print('start timer')
# timer cannot be started from another thread
self.timestamp_last_command = time.time()
def _simulation_update_cmd_execution_status(self):
# print('simulation - MCU command execution finished')
# self._mcu_cmd_execution_status = CMD_EXECUTION_STATUS.COMPLETED_WITHOUT_ERRORS
# self.timer_update_command_execution_status.stop()
pass # timer cannot be started from another thread | PypiClean |
/models/MLP/train.py | import glob
import os
import pandas as pd
import numpy as np
import sys
sys.path.insert(-1, '/scratch/yd105/ML_MM_Benchmark')
# Torch
# Own
import flag_reader
from utils import data_reader
from class_wrapper import Network
from model_maker import Forward
from utils.helper_functions import put_param_into_folder, write_flags_and_BVE
def training_from_flag(flags):
"""
Training interface. 1. Read data 2. initialize network 3. train network 4. record flags
:param flag: The training flags read from command line or parameter.py
:return: None
"""
if flags.use_cpu_only:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Get the data
train_loader, test_loader = data_reader.read_data(flags)
# Reset the boundary is normalized
if flags.normalize_input:
flags.geoboundary_norm = [-1, 1, -1, 1]
print("Boundary is set at:", flags.geoboundary)
print("Making network now")
# Make Network
ntwk = Network(Forward, flags, train_loader, test_loader)
total_param = sum(p.numel() for p in ntwk.model.parameters() if p.requires_grad)
print("Total learning parameter is: %d"%total_param)
# Training process
print("Start training now...")
ntwk.train()
# Do the house keeping, write the parameters and put into folder, also use pickle to save the flags obejct
write_flags_and_BVE(flags, ntwk.best_validation_loss, ntwk.ckpt_dir)
# put_param_into_folder(ntwk.ckpt_dir)
def importData(flags):
# pull data into python, should be either for training set or eval set
directory = os.path.join(flags.data_dir, 'Yang', 'dataIn')
x_range = flags.x_range
y_range = flags.y_range
train_data_files = []
for file in os.listdir(os.path.join(directory)):
if file.endswith('.csv'):
train_data_files.append(file)
print(train_data_files)
# get data
ftr = []
lbl = []
for file_name in train_data_files:
# import full arrays
print(x_range)
ftr_array = pd.read_csv(os.path.join(directory, file_name), delimiter=',',
header=None, usecols=x_range)
lbl_array = pd.read_csv(os.path.join(directory, file_name), delimiter=',',
header=None, usecols=y_range)
# append each data point to ftr and lbl
for params, curve in zip(ftr_array.values, lbl_array.values):
ftr.append(params)
lbl.append(curve)
ftr = np.array(ftr, dtype='float32')
lbl = np.array(lbl, dtype='float32')
for i in range(len(ftr[0, :])):
print('For feature {}, the max is {} and min is {}'.format(i, np.max(ftr[:, i]), np.min(ftr[:, i])))
print(ftr.shape, lbl.shape)
np.savetxt('data_x.csv', ftr, delimiter=',')
np.savetxt('data_y.csv', lbl, delimiter=',')
return ftr, lbl
def data_check():
xd = pd.read_csv('data_x.csv',delimiter=',', header=None)
yd = pd.read_csv('data_y.csv',delimiter=',', header=None)
x = xd.to_numpy()
y = yd.to_numpy()
print(x.shape, y.shape, x.dtype, y.dtype)
return
if __name__ == '__main__':
# Read the parameters to be set
flags = flag_reader.read_flag()
# Call the train from flag function
training_from_flag(flags) | PypiClean |
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/import_scripts/BAMtoGeneVariants.py |
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This script can be run on its own to extract a single BAM file at a time or
indirectly by multiBAMtoBED.py to extract exon.bed files (Tophat format)
from many BAM files in a single directory at once. Requires an exon.bed reference
file for exon coordinates (genomic bins for which to sum unique read counts).
Excludes junction reads within each interval"""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import pysam
import copy
import time
import getopt
def findGeneVariants(species,symbols,bam_dir,variants=None):
global insertion_db
insertion_db={}
print symbols
print bam_dir
if len(symbols)>0:
### Search for genes not for coordinates
search_locations = geneCoordinates(species,symbols)
else:
### Search for coordinates and not genes
search_locations = variantCoordinates(variants)
### Discover the variants
variant_db = findVariants(bam_dir,search_locations)
variant_filtered_db={}
for var in variant_db:
#print var, variant_db[var]
if variant_db[var]>3:
#print var,variant_db[var]
variant_filtered_db[var] = variant_db[var]
### Quantify the variants versus background
pileupAnalysis(bam_dir,variant_filtered_db)
def variantCoordinates(variants):
search_locations=[]
contents = open(variants, "rU")
for line in contents:
line = line.rstrip()
chr,start,end,symbol = string.split(line,'\t')
if 'chr' not in chr: chr = 'chr'+chr
strand = 'NA'
search_locations.append([chr,strand,start,end,symbol])
return search_locations
def geneCoordinates(species,symbols):
genes=[]
from build_scripts import EnsemblImport
ensembl_annotation_db = EnsemblImport.reimportEnsemblAnnotations(species,symbolKey=True)
for symbol in symbols:
if symbol in ensembl_annotation_db:
ens_geneid = ensembl_annotation_db[symbol]
genes.append((ens_geneid,symbol))
else:
print symbol, 'not found'
### Get gene genomic locations
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
search_locations=[]
for (gene,symbol) in genes:
chr,strand,start,end = gene_location_db[gene]
#if symbol == 'SRSF10': chr = 'chr1'; strand = '-'; start = '24295573'; end = '24306953'
if len(chr)>6: print symbol, 'bad chromosomal reference:',chr
else:
search_locations.append([chr,strand,start,end,symbol])
return search_locations
def findVariants(bam_dir,search_locations,multi=False):
start_time = time.time()
bamfile = pysam.Samfile(bam_dir, "rb" )
output_bed_rows=0
#https://www.biostars.org/p/76119/
variant_db={}
reference_rows=0
o = open (string.replace(bam_dir,'.bam','__variant.txt'),"w")
for (chr,strand,start,stop,symbol) in search_locations: ### read each line one-at-a-time rather than loading all in memory
read_count=0
reference_rows+=1
stop=int(stop)+100 ### buffer for single variants
start=int(start)-100 ### buffer for single variants
for alignedread in bamfile.fetch(chr, int(start),int(stop)):
md = alignedread.opt('MD')
omd = md
codes = map(lambda x: x[0],alignedread.cigar)
cigarstring = alignedread.cigarstring
#print symbol,cigarstring
if 1 in codes and alignedread.pos:
### Thus an insertion is present
cigarstring = alignedread.cigarstring
chr = bamfile.getrname(alignedread.rname)
pos = alignedread.pos
def getInsertions(cigarList,X):
cummulative=0
coordinates=[]
for (code,seqlen) in cigarList:
if code == 0 or code == 3:
cummulative+=seqlen
if code == 1:
coordinates.append(X+cummulative)
return coordinates
coordinates = getInsertions(alignedread.cigar,pos)
"""
print pos
print coordinates
print alignedread.seq
print codes
print alignedread.cigar
print cigarstring
print md;sys.exit()
"""
for pos in coordinates:
try: variant_db[chr,pos,symbol]+=1
except Exception: variant_db[chr,pos,symbol] = 1
insertion_db[chr,pos]=[]
continue
try:
int(md) ### If an integer, no mismatches or deletions present
continue
except Exception:
#print chr, int(start),int(stop)
#print alignedread.get_reference_sequence()
#print alignedread.seq
md = string.replace(md,'C','A')
md = string.replace(md,'G','A')
md = string.replace(md,'T','A')
md = string.split(md,'A')
pos = alignedread.pos
chr = bamfile.getrname(alignedread.rname)
#if omd == '34^GA16': print md, pos
for i in md[:-1]:
try:
pos+=int(i)+1
except Exception:
if i == '':
pos+=+1
elif '^' in i: ### position is equal to the last position
pos+=int(string.split(i,'^')[0])+1
#pass
#if 'CGGATCC' in alignedread.seq: print string.split(alignedread.seq,'CGGATCC')[1],[pos]
try: variant_db[chr,pos,symbol]+=1
except Exception: variant_db[chr,pos,symbol] = 1
#codes = map(lambda x: x[0],alignedread.cigar)
output_bed_rows+=1
o.close()
bamfile.close()
if multi==False:
print time.time()-start_time, 'seconds to assign reads for %d entries from %d reference entries' % (output_bed_rows,reference_rows)
#print variant_db;sys.exit()
return variant_db
def pileupAnalysis(bam_dir,search_locations,multi=False):
start_time = time.time()
bamfile = pysam.Samfile(bam_dir, "rb" )
reference_rows=0
output_bed_rows=0
#https://www.biostars.org/p/76119/
variant_db={}
o = open (string.replace(bam_dir,'.bam','__variant.txt'),"w")
entries = ['chr','position','rare-allele frq','type','depth','gene','variant_info','alt_frq']
o.write(string.join(entries,'\t')+'\n')
#print 'Analyzing',len(search_locations),'variants'
for (chr,pos,symbol) in search_locations: ### read each line one-at-a-time rather than loading all in memory
pos = int(pos)
read_count=0
reference_rows+=1
nucleotide_frequency={}
for pileupcolumn in bamfile.pileup(chr,pos,pos+1):
# Skip columns outside desired range
#print pos, pileupcolumn.pos, pileupcolumn.cigarstring, pileupcolumn.alignment.pos
if pileupcolumn.pos == (pos-1):
for pileupread in pileupcolumn.pileups:
try: nt = pileupread.alignment.query_sequence[pileupread.query_position]
except Exception,e:
if 'D' in pileupread.alignment.cigarstring:
nt = 'del'
else:
nt = 'ins'
try: nucleotide_frequency[nt]+=1
except Exception: nucleotide_frequency[nt]=1
nt_freq_list=[]
nt_freq_list_tuple=[]
for nt in nucleotide_frequency:
nt_freq_list.append(nucleotide_frequency[nt])
nt_freq_list_tuple.append([nucleotide_frequency[nt],nt])
s = sum(nt_freq_list)
nt_freq_list.sort()
nt_freq_list_tuple.sort()
try:
frq = float(search_locations[chr,pos,symbol])/s ### This fixes that (number of insertions from before)
except Exception: frq = '1.000000'; print symbol, pos, nucleotide_frequency, search_locations[chr,pos,symbol]
if (chr,pos) in insertion_db:
#print 'insertion', chr, pos
call = 'insertion'
### For insertions if the inserted base matches the reference base, incorrect freq will be reported
elif 'del' in nucleotide_frequency:
#frq = float(nt_freq_list[-2])/s
call = 'del'
else:
#frq = float(nt_freq_list[-2])/s
call = 'mismatch'
if len(nt_freq_list)>1 or call == 'insertion':
if frq>0.01:
frq = str(frq)[:4]
most_frequent_frq,most_frequent_nt = nt_freq_list_tuple[-1]
try:
second_most_frequent_frq,second_most_frequent_nt = nt_freq_list_tuple[-2]
alt_frq = str(float(second_most_frequent_frq)/most_frequent_frq)
except Exception:
second_most_frequent_frq = 'NA'; second_most_frequent_nt='NA'
alt_frq = 'NA'
variant_info = most_frequent_nt+'('+str(most_frequent_frq)+')|'+second_most_frequent_nt+'('+str(second_most_frequent_frq)+')'
entries = [chr,str(pos),str(frq),call,str(s),symbol,variant_info,alt_frq]
o.write(string.join(entries,'\t')+'\n')
output_bed_rows+=1
o.close()
bamfile.close()
if multi==False:
print time.time()-start_time, 'seconds to assign reads for %d entries from %d reference entries' % (output_bed_rows,reference_rows)
if __name__ == "__main__":
#bam_dir = "H9.102.2.6.bam"
#reference_dir = 'H9.102.2.6__exon.bed'
################ Comand-line arguments ################
symbols=[]
variantFile = None
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a BAM file as input in the command-line"
print "Example: python BAMtoExonBED.py --i /Users/me/sample1.bam --r /Users/me/Hs_exon-cancer_hg19.bed"
sys.exit()
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','species=','g=','v='])
for opt, arg in options:
if opt == '--i': bam_dir=arg ### A single BAM file location (full path)
elif opt == '--species': species=arg
elif opt == '--g': symbols.append(arg)
elif opt == '--v': variantFile = arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
findGeneVariants(species,symbols,bam_dir,variants=variantFile) | PypiClean |
/Lantz-0.3.zip/Lantz-0.3/docs/guides/defaults.rst | .. _defaults_dictionary:
=======================
The DEFAULTS dictionary
=======================
Different instruments require different communication settings such as baud rate, end of a message characters, etc. The :attribute::`DEFAULTS` dictionary provides a way to customize resource initialization at the :class::MessageBasedDriver level, avoiding tedious customization in all instances.
It is easier to see it with an example. Let's start with simple case::
class MyDriver(MessageBasedDriver):
DEFAULTS = {
'COMMON': {'write_termination': '\n'}
}
The 'COMMON' key is used to tells MessageBasedDriver that 'write_termination' should be set to '\n' for all type of interface types (USB, GPIB, etc).
But in certain cases, different resource types might require different settings::
DEFAULTS = {
'ASRL': {'write_termination': '\n',
'read_termination': '\r',
'baud_rate': 9600},
'USB': {'write_termination': '\n',
'read_termination': \n'}
}
This specifies a dictionary of settings for an ASRL (serial) resource and a different for USB. We might make this more concise::
DEFAULTS = {
'ASRL': {'read_termination': '\r',
'baud_rate': 9600},
'USB': {'read_termination': \n'},
'COMMON': {'write_termination': '\n'}
}
When you require a USB resource, Lantz will combine the USB and COMMON settings.
The interface type is not the only thing that defines the resource. For example TCPIP device can be a INSTR or SOCKET. You can also specify this in a tuple::
DEFAULTS = {
'INSTR': {'read_termination': '\r'},
'SOCKET': {'read_termination': \n'},
'COMMON': {'write_termination': '\n'}
}
This will specify that 'read_termination' will be set '\r' to for al INSTR. If you want to specify only for TCPIP, use a tuple like this::
DEFAULTS = {
('TCPIP, 'INSTR'): {'read_termination': '\r'},
'SOCKET': {'read_termination': \n'},
'COMMON': {'write_termination': '\n'}
}
Overriding on initialization
----------------------------
You can override the defaults when you instantiate the instrument by passing these values a command line arguments::
inst = MyDriver('TCPIP::localhost::5678::INSTR', read_termination='\t')
Colliding values
----------------
When multiple values are given for the same setting (for example 'read_termination' is in USB And COMMON) and a USB resource is requested, the following order is used to define the precedence:
- user provided keyword arguments.
- settings for (instrument_type, resource_type).
- settings for instrument_type: ASRL, USB, GPIB, TCPIP
- settings for resource_type: SOCKET, INSTR, RAW
- settings for COMMON
The rule is: more specific has precedence.
Valid settings
--------------
If you provide an invalid setting, you will get an Exception upon initalization. The valid settings are defined by `Attributes per resource in PyVISA`_
.. _Attributes per resource in PyVISA: http://pyvisa.readthedocs.org/en/master/api/resources.html
| PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/samples/plugins/htmlwriter/assets/outputforflash/swfobject.js | var swfobject=function(){function u(){if(!s){try{var a=d.getElementsByTagName("body")[0].appendChild(d.createElement("span"));a.parentNode.removeChild(a)}catch(b){return}s=!0;for(var a=x.length,c=0;c<a;c++)x[c]()}}function L(a){s?a():x[x.length]=a}function M(a){if(typeof m.addEventListener!=i)m.addEventListener("load",a,!1);else if(typeof d.addEventListener!=i)d.addEventListener("load",a,!1);else if(typeof m.attachEvent!=i)U(m,"onload",a);else if("function"==typeof m.onload){var b=m.onload;m.onload=
function(){b();a()}}else m.onload=a}function V(){var a=d.getElementsByTagName("body")[0],b=d.createElement(r);b.setAttribute("type",y);var c=a.appendChild(b);if(c){var f=0;(function(){if(typeof c.GetVariable!=i){var g=c.GetVariable("$version");g&&(g=g.split(" ")[1].split(","),e.pv=[parseInt(g[0],10),parseInt(g[1],10),parseInt(g[2],10)])}else if(10>f){f++;setTimeout(arguments.callee,10);return}a.removeChild(b);c=null;D()})()}else D()}function D(){var a=p.length;if(0<a)for(var b=0;b<a;b++){var c=p[b].id,
f=p[b].callbackFn,g={success:!1,id:c};if(0<e.pv[0]){var d=n(c);if(d)if(z(p[b].swfVersion)&&!(e.wk&&312>e.wk))t(c,!0),f&&(g.success=!0,g.ref=E(c),f(g));else if(p[b].expressInstall&&F()){g={};g.data=p[b].expressInstall;g.width=d.getAttribute("width")||"0";g.height=d.getAttribute("height")||"0";d.getAttribute("class")&&(g.styleclass=d.getAttribute("class"));d.getAttribute("align")&&(g.align=d.getAttribute("align"));for(var h={},d=d.getElementsByTagName("param"),j=d.length,k=0;k<j;k++)"movie"!=d[k].getAttribute("name").toLowerCase()&&
(h[d[k].getAttribute("name")]=d[k].getAttribute("value"));G(g,h,c,f)}else W(d),f&&f(g)}else if(t(c,!0),f){if((c=E(c))&&typeof c.SetVariable!=i)g.success=!0,g.ref=c;f(g)}}}function E(a){var b=null;if((a=n(a))&&"OBJECT"==a.nodeName)typeof a.SetVariable!=i?b=a:(a=a.getElementsByTagName(r)[0])&&(b=a);return b}function F(){return!A&&z("6.0.65")&&(e.win||e.mac)&&!(e.wk&&312>e.wk)}function G(a,b,c,f){A=!0;H=f||null;N={success:!1,id:c};var g=n(c);if(g){"OBJECT"==g.nodeName?(w=I(g),B=null):(w=g,B=c);a.id=
O;if(typeof a.width==i||!/%$/.test(a.width)&&310>parseInt(a.width,10))a.width="310";if(typeof a.height==i||!/%$/.test(a.height)&&137>parseInt(a.height,10))a.height="137";d.title=d.title.slice(0,47)+" - Flash Player Installation";f=e.ie&&e.win?"ActiveX":"PlugIn";f="MMredirectURL="+m.location.toString().replace(/&/g,"%26")+"&MMplayerType="+f+"&MMdoctitle="+d.title;b.flashvars=typeof b.flashvars!=i?b.flashvars+("&"+f):f;e.ie&&(e.win&&4!=g.readyState)&&(f=d.createElement("div"),c+="SWFObjectNew",f.setAttribute("id",
c),g.parentNode.insertBefore(f,g),g.style.display="none",function(){g.readyState==4?g.parentNode.removeChild(g):setTimeout(arguments.callee,10)}());J(a,b,c)}}function W(a){if(e.ie&&e.win&&4!=a.readyState){var b=d.createElement("div");a.parentNode.insertBefore(b,a);b.parentNode.replaceChild(I(a),b);a.style.display="none";(function(){4==a.readyState?a.parentNode.removeChild(a):setTimeout(arguments.callee,10)})()}else a.parentNode.replaceChild(I(a),a)}function I(a){var b=d.createElement("div");if(e.win&&
e.ie)b.innerHTML=a.innerHTML;else if(a=a.getElementsByTagName(r)[0])if(a=a.childNodes)for(var c=a.length,f=0;f<c;f++)!(1==a[f].nodeType&&"PARAM"==a[f].nodeName)&&8!=a[f].nodeType&&b.appendChild(a[f].cloneNode(!0));return b}function J(a,b,c){var f,g=n(c);if(e.wk&&312>e.wk)return f;if(g)if(typeof a.id==i&&(a.id=c),e.ie&&e.win){var o="",h;for(h in a)a[h]!=Object.prototype[h]&&("data"==h.toLowerCase()?b.movie=a[h]:"styleclass"==h.toLowerCase()?o+=' class="'+a[h]+'"':"classid"!=h.toLowerCase()&&(o+=" "+
h+'="'+a[h]+'"'));h="";for(var j in b)b[j]!=Object.prototype[j]&&(h+='<param name="'+j+'" value="'+b[j]+'" />');g.outerHTML='<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"'+o+">"+h+"</object>";C[C.length]=a.id;f=n(a.id)}else{j=d.createElement(r);j.setAttribute("type",y);for(var k in a)a[k]!=Object.prototype[k]&&("styleclass"==k.toLowerCase()?j.setAttribute("class",a[k]):"classid"!=k.toLowerCase()&&j.setAttribute(k,a[k]));for(o in b)b[o]!=Object.prototype[o]&&"movie"!=o.toLowerCase()&&
(a=j,h=o,k=b[o],c=d.createElement("param"),c.setAttribute("name",h),c.setAttribute("value",k),a.appendChild(c));g.parentNode.replaceChild(j,g);f=j}return f}function P(a){var b=n(a);b&&"OBJECT"==b.nodeName&&(e.ie&&e.win?(b.style.display="none",function(){if(4==b.readyState){var c=n(a);if(c){for(var f in c)"function"==typeof c[f]&&(c[f]=null);c.parentNode.removeChild(c)}}else setTimeout(arguments.callee,10)}()):b.parentNode.removeChild(b))}function n(a){var b=null;try{b=d.getElementById(a)}catch(c){}return b}
function U(a,b,c){a.attachEvent(b,c);v[v.length]=[a,b,c]}function z(a){var b=e.pv,a=a.split(".");a[0]=parseInt(a[0],10);a[1]=parseInt(a[1],10)||0;a[2]=parseInt(a[2],10)||0;return b[0]>a[0]||b[0]==a[0]&&b[1]>a[1]||b[0]==a[0]&&b[1]==a[1]&&b[2]>=a[2]?!0:!1}function Q(a,b,c,f){if(!e.ie||!e.mac){var g=d.getElementsByTagName("head")[0];if(g){c=c&&"string"==typeof c?c:"screen";f&&(K=l=null);if(!l||K!=c)f=d.createElement("style"),f.setAttribute("type","text/css"),f.setAttribute("media",c),l=g.appendChild(f),
e.ie&&(e.win&&typeof d.styleSheets!=i&&0<d.styleSheets.length)&&(l=d.styleSheets[d.styleSheets.length-1]),K=c;e.ie&&e.win?l&&typeof l.addRule==r&&l.addRule(a,b):l&&typeof d.createTextNode!=i&&l.appendChild(d.createTextNode(a+" {"+b+"}"))}}}function t(a,b){if(R){var c=b?"visible":"hidden";s&&n(a)?n(a).style.visibility=c:Q("#"+a,"visibility:"+c)}}function S(a){return null!=/[\\\"<>\.;]/.exec(a)&&typeof encodeURIComponent!=i?encodeURIComponent(a):a}var i="undefined",r="object",y="application/x-shockwave-flash",
O="SWFObjectExprInst",m=window,d=document,q=navigator,T=!1,x=[function(){T?V():D()}],p=[],C=[],v=[],w,B,H,N,s=!1,A=!1,l,K,R=!0,e=function(){var a=typeof d.getElementById!=i&&typeof d.getElementsByTagName!=i&&typeof d.createElement!=i,b=q.userAgent.toLowerCase(),c=q.platform.toLowerCase(),f=c?/win/.test(c):/win/.test(b),c=c?/mac/.test(c):/mac/.test(b),b=/webkit/.test(b)?parseFloat(b.replace(/^.*webkit\/(\d+(\.\d+)?).*$/,"$1")):!1,g=!+"\v1",e=[0,0,0],h=null;if(typeof q.plugins!=i&&typeof q.plugins["Shockwave Flash"]==
r){if((h=q.plugins["Shockwave Flash"].description)&&!(typeof q.mimeTypes!=i&&q.mimeTypes[y]&&!q.mimeTypes[y].enabledPlugin))T=!0,g=!1,h=h.replace(/^.*\s+(\S+\s+\S+$)/,"$1"),e[0]=parseInt(h.replace(/^(.*)\..*$/,"$1"),10),e[1]=parseInt(h.replace(/^.*\.(.*)\s.*$/,"$1"),10),e[2]=/[a-zA-Z]/.test(h)?parseInt(h.replace(/^.*[a-zA-Z]+(.*)$/,"$1"),10):0}else if(typeof m.ActiveXObject!=i)try{var j=new ActiveXObject("ShockwaveFlash.ShockwaveFlash");if(j&&(h=j.GetVariable("$version")))g=!0,h=h.split(" ")[1].split(","),
e=[parseInt(h[0],10),parseInt(h[1],10),parseInt(h[2],10)]}catch(k){}return{w3:a,pv:e,wk:b,ie:g,win:f,mac:c}}();(function(){e.w3&&((typeof d.readyState!=i&&"complete"==d.readyState||typeof d.readyState==i&&(d.getElementsByTagName("body")[0]||d.body))&&u(),s||(typeof d.addEventListener!=i&&d.addEventListener("DOMContentLoaded",u,!1),e.ie&&e.win&&(d.attachEvent("onreadystatechange",function(){"complete"==d.readyState&&(d.detachEvent("onreadystatechange",arguments.callee),u())}),m==top&&function(){if(!s){try{d.documentElement.doScroll("left")}catch(a){setTimeout(arguments.callee,
0);return}u()}}()),e.wk&&function(){s||(/loaded|complete/.test(d.readyState)?u():setTimeout(arguments.callee,0))}(),M(u)))})();(function(){e.ie&&e.win&&window.attachEvent("onunload",function(){for(var a=v.length,b=0;b<a;b++)v[b][0].detachEvent(v[b][1],v[b][2]);a=C.length;for(b=0;b<a;b++)P(C[b]);for(var c in e)e[c]=null;e=null;for(var f in swfobject)swfobject[f]=null;swfobject=null})})();return{registerObject:function(a,b,c,f){if(e.w3&&a&&b){var d={};d.id=a;d.swfVersion=b;d.expressInstall=c;d.callbackFn=
f;p[p.length]=d;t(a,!1)}else f&&f({success:!1,id:a})},getObjectById:function(a){if(e.w3)return E(a)},embedSWF:function(a,b,c,d,g,o,h,j,k,m){var n={success:!1,id:b};e.w3&&!(e.wk&&312>e.wk)&&a&&b&&c&&d&&g?(t(b,!1),L(function(){c+="";d+="";var e={};if(k&&typeof k===r)for(var l in k)e[l]=k[l];e.data=a;e.width=c;e.height=d;l={};if(j&&typeof j===r)for(var p in j)l[p]=j[p];if(h&&typeof h===r)for(var q in h)l.flashvars=typeof l.flashvars!=i?l.flashvars+("&"+q+"="+h[q]):q+"="+h[q];if(z(g))p=J(e,l,b),e.id==
b&&t(b,!0),n.success=!0,n.ref=p;else{if(o&&F()){e.data=o;G(e,l,b,m);return}t(b,!0)}m&&m(n)})):m&&m(n)},switchOffAutoHideShow:function(){R=!1},ua:e,getFlashPlayerVersion:function(){return{major:e.pv[0],minor:e.pv[1],release:e.pv[2]}},hasFlashPlayerVersion:z,createSWF:function(a,b,c){if(e.w3)return J(a,b,c)},showExpressInstall:function(a,b,c,d){e.w3&&F()&&G(a,b,c,d)},removeSWF:function(a){e.w3&&P(a)},createCSS:function(a,b,c,d){e.w3&&Q(a,b,c,d)},addDomLoadEvent:L,addLoadEvent:M,getQueryParamValue:function(a){var b=
d.location.search||d.location.hash;if(b){/\?/.test(b)&&(b=b.split("?")[1]);if(null==a)return S(b);for(var b=b.split("&"),c=0;c<b.length;c++)if(b[c].substring(0,b[c].indexOf("="))==a)return S(b[c].substring(b[c].indexOf("=")+1))}return""},expressInstallCallback:function(){if(A){var a=n(O);a&&w&&(a.parentNode.replaceChild(w,a),B&&(t(B,!0),e.ie&&e.win&&(w.style.display="block")),H&&H(N));A=!1}}}}(); | PypiClean |
/NlpToolkit-PropBank-1.0.21.tar.gz/NlpToolkit-PropBank-1.0.21/PropBank/FramesetArgument.py | class FramesetArgument(object):
__argument_type: str
__definition: str
__function: str
def __init__(self,
argumentType: str,
definition: str,
function: str = None):
"""
A constructor of FramesetArgument class which takes argumentType and definition as input and initializes
corresponding attributes
PARAMETERS
----------
argumentType : str
ArgumentType of the frameset argument
definition : str
Definition of the frameset argument
function : str
Function of the frameset argument
"""
self.__argument_type = argumentType
self.__definition = definition
self.__function = function
def getArgumentType(self) -> str:
"""
Accessor for argumentType.
RETURNS
-------
str
argumentType.
"""
return self.__argument_type
def getDefinition(self) -> str:
"""
Accessor for definition.
RETURNS
-------
str
definition.
"""
return self.__definition
def getFunction(self) -> str:
"""
Accessor for function.
RETURNS
-------
str
function.
"""
return self.__function
def setDefinition(self, definition: str):
"""
Mutator for definition.
PARAMETERS
----------
definition : str
definition to set.
"""
self.__definition = definition
def setFunction(self, function: str):
"""
Mutator for definition.
PARAMETERS
----------
function : str
function to set.
"""
self.__function = function
def __str__(self) -> str:
"""
__str__ converts an FramesetArgument to a string. It returns argument string which is in the form of
argumentType:definition
RETURNS
-------
str
string form of frameset argument
"""
return self.__argument_type + ":" + self.__definition | PypiClean |
/Flask-ThriftClient-0.2.0.tar.gz/Flask-ThriftClient-0.2.0/flask_thriftclient/__init__.py | from thrift.transport import TSocket, THttpClient, TTransport, TZlibTransport, TSSLSocket
from thrift.protocol import TBinaryProtocol, TCompactProtocol
try:
#only available from thrift >= 0.9.1
from thrift.protocol import TJSONProtocol
HAS_JSON_PROTOCOL = True
except ImportError:
HAS_JSON_PROTOCOL = False
from urlparse import urlparse
from functools import wraps
from contextlib import contextmanager
class ThriftClient(object):
"""
Flask ThriftClient
##################
Introduction
============
This extension provide a simple intergration with
`Thrift <https://thrift.apache.org>`_ RPC server.
.. code:: python
from flask import Flask
from flask_thriftclient import ThriftClient
from MyGeneratedThriftCode import MyService
app = Flask(__name__)
app.config["THRIFTCLIENT_TRANSPORT"] = "tcp://127.0.0.1:9090"
thriftclient = ThriftClient(MyService.Client, app)
@app.route("/")
def home():
data = thriftclient.client.mymethod()
return data
Transport
=========
Thrift endpoints are defined in the configuration variable
THRIFTCLIENT_TRANSPORT as an URL. The default transport is
tcp://localhost:9090
Available url schemes are:
tcp: use TCP socket as transport, you have to define the server
address and port. If the port isn't defined, 9090 will be used
Example:
* tcp://127.0.0.1
* tcp://localhost:1234/
http: use HTTP protocol as transport. Examples:
* http://myservice.local/
unix: use unix sockets as transport, as this scheme follow URI format,
it *MUST* have either no or three "/" before the socket path
* unix:///tmp/mysocket #absolute path
* unix:/tmp/mysocket #absolute path
* unix:./mysocket #relative path
SSL
===
You may set SSL version of transport communications by using *'s'*
version of url scheme:
tcp <=> tcps
http <=> https
unix <=> unixs
examples:
* https://myserver/
* unixs:/tmp/mysocket
* tcps://localhost:5533/
Two options are related to SSL transport:
THRIFTCLIENT_SSL_VALIDATE: True if the certificate has to be validated
(default True)
THRIFTCLIENT_SSL_CA_CERTS: path to the SSL certificate (default None)
Note that you *MUST* set one of theses options:
.. code:: python
app.config["THRIFTCLIENT_SSL_VALIDATE"] = False
app.config["THRIFTCLIENT_TRANSPORT"] = "https://127.0.0.1/"
#or
app.config["THRIFTCLIENT_SSL_CA_CERTS"] = "./cacert.pem"
app.config["THRIFTCLIENT_TRANSPORT"] = "https://127.0.0.1/"
Protocol
========
You may define which procotol must be use by setting the parametter
*THRIFTCLIENT_PROTOCOL*. The default protocol is Binary.
Available parametters are:
ThriftClient.BINARY or "BINARY" : use the binary protocol
ThriftClient.COMPACT or "COMPACT" : use the compact protocol
ThriftClient.JSON or "JSON" : use the JSON protocol. note that this
protocol is only available for thrift >= 0.9.1
Connection
==========
By default the application will open then close the transport for each request
This can be overriden by setting *THRIFTCLIENT_ALWAYS_CONNECT* to False
when THRIFTCLIENT_ALWAYS_CONNECT is set to False there is 3 ways to handle your
connections:
- you can call transport.close and transport.open manually
- you can use the autoconnect decorator
- you can use the connect "with" context
.. code:: python
app = Flask(__name__)
app.config["THRIFTCLIENT_TRANSPORT"] = "tcp://127.0.0.1:9090"
app.config["THRIFTCLIENT_ALWAYS_CONNECT"] = False
thriftclient = ThriftClient(MyService.Client, app)
@app.route("/with_autoconnect")
@thriftclient.autoconnect
def with_autoconnect():
data = thriftclient.client.mymethod()
return data
@app.route("/with_context")
def with_context():
with thriftclient.connect():
data = thriftclient.client.mymethod()
return data
@app.route("/with_manual_connection")
def with_manual_connection():
thriftclient.transport.open()
data = thriftclient.client.mymethod()
thriftclient.transport.close()
return data
Options
=======
Other options are:
THRIFTCLIENT_BUFFERED: use buffered transport (default False)
THRIFTCLIENT_ZLIB: use zlib compressed transport (default False)
"""
BINARY = "BINARY"
COMPACT = "COMPACT"
if HAS_JSON_PROTOCOL:
JSON = "JSON"
def __init__(self, interface, app=None, config=None):
self.interface = interface
self.protocol = None
self.transport = None
self.client = None
self.config = config
self.alwaysConnect = True
if app is not None:
self.init_app(app)
def init_app(self, app, config=None):
if not config:
config = self.config
if not config:
config = app.config
config.setdefault("THRIFTCLIENT_TRANSPORT", "tcp://localhost:9090")
config.setdefault("THRIFTCLIENT_PROTOCOL", ThriftClient.BINARY)
config.setdefault("THRIFTCLIENT_SSL_VALIDATE", True)
config.setdefault("THRIFTCLIENT_SSL_CA_CERTS", None)
config.setdefault("THRIFTCLIENT_BUFFERED", False)
config.setdefault("THRIFTCLIENT_ZLIB", False)
config.setdefault("THRIFTCLIENT_ALWAYS_CONNECT", True)
self._set_client(app, config)
if self.alwaysConnect:
@app.before_request
def before_request():
assert(self.client is not None)
assert(self.transport is not None)
try:
self.transport.open()
except TTransport.TTransportException:
raise RuntimeError("Unable to connect to thrift server")
@app.teardown_request
def after_request(response):
self.transport.close()
@contextmanager
def connect(self):
assert(self.client is not None)
assert(self.transport is not None)
try:
self.transport.open()
except TTransport.TTransportException:
raise RuntimeError("Unable to connect to thrift server")
yield
self.transport.close()
def autoconnect(self, func):
"""
when using THRIFTCLIENT_ALWAYS_CONNECT at false, this decorator allows
to connect to the thrift service automatically for a single function
"""
@wraps(func)
def onCall(*args, **kwargs):
#we don't want to connect twice
if self.alwaysConnect:
return func(*args, **kwargs)
with self.connect():
return func(*args, **kwargs)
return onCall
def _set_client(self, app, config):
#configure thrift thransport
if config["THRIFTCLIENT_TRANSPORT"] is None:
raise RuntimeError("THRIFTCLIENT_TRANSPORT MUST be specified")
uri = urlparse(config["THRIFTCLIENT_TRANSPORT"])
if uri.scheme == "tcp":
port = uri.port or 9090
self.transport = TSocket.TSocket(uri.hostname, port)
elif uri.scheme == "tcps":
port = uri.port or 9090
self.transport = TSSLSocket.TSSLSocket(
host=uri.hostname,
port=port,
validate=config["THRIFTCLIENT_SSL_VALIDATE"],
ca_certs=config["THRIFTCLIENT_SSL_CA_CERTS"],
)
elif uri.scheme in ["http", "https"]:
self.transport = THttpClient.THttpClient(config["THRIFTCLIENT_TRANSPORT"])
elif uri.scheme == "unix":
if uri.hostname is not None:
raise RuntimeError("unix socket MUST starts with either unix:/ or unix:///")
self.transport = TSocket.TSocket(unix_socket=uri.path)
elif uri.scheme == "unixs":
if uri.hostname is not None:
raise RuntimeError("unixs socket MUST starts with either unixs:/ or unixs:///")
self.transport = TSSLSocket.TSSLSocket(
validate = config["THRIFTCLIENT_SSL_VALIDATE"],
ca_certs = config["THRIFTCLIENT_SSL_CA_CERTS"],
unix_socket = uri.path)
else:
raise RuntimeError(
"invalid configuration for THRIFTCLIENT_TRANSPORT: {transport}"
.format(transport = config["THRIFTCLIENT_TRANSPORT"])
)
#configure additionnal protocol layers
if config["THRIFTCLIENT_BUFFERED"] == True:
self.transport = TTransport.TBufferedTransport(self.transport)
if config["THRIFTCLIENT_ZLIB"] == True:
self.transport = TZlibTransport.TZlibTransport(self.transport)
#configure thrift protocol
if config["THRIFTCLIENT_PROTOCOL"] == ThriftClient.BINARY:
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
elif config["THRIFTCLIENT_PROTOCOL"] == ThriftClient.COMPACT:
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
elif HAS_JSON_PROTOCOL and config["THRIFTCLIENT_PROTOCOL"] == ThriftClient.JSON:
self.protocol = TJSONProtocol.TJSONProtocol(self.transport)
else:
raise RuntimeError(
"invalid configuration for THRIFTCLIENT_PROTOCOL: {protocol}"
.format(protocol = config["THRIFTCLIENT_PROTOCOL"])
)
#create the client from the interface
self.client = self.interface(self.protocol)
#configure auto connection
self.alwaysConnect = config["THRIFTCLIENT_ALWAYS_CONNECT"] | PypiClean |
/DeepXDE-1.9.3-py3-none-any.whl/deepxde/model.py | __all__ = ["LossHistory", "Model", "TrainState"]
import pickle
from collections import OrderedDict
import numpy as np
from . import config
from . import display
from . import gradients as grad
from . import losses as losses_module
from . import metrics as metrics_module
from . import optimizers
from . import utils
from .backend import backend_name, tf, torch, jax, paddle
from .callbacks import CallbackList
from .utils import list_to_str
class Model:
"""A ``Model`` trains a ``NN`` on a ``Data``.
Args:
data: ``deepxde.data.Data`` instance.
net: ``deepxde.nn.NN`` instance.
"""
def __init__(self, data, net):
self.data = data
self.net = net
self.opt_name = None
self.batch_size = None
self.callbacks = None
self.metrics = None
self.external_trainable_variables = []
self.train_state = TrainState()
self.losshistory = LossHistory()
self.stop_training = False
# Backend-dependent attributes
self.opt = None
# Tensor or callable
self.outputs = None
self.outputs_losses_train = None
self.outputs_losses_test = None
self.train_step = None
if backend_name == "tensorflow.compat.v1":
self.sess = None
self.saver = None
elif backend_name in ["pytorch", "paddle"]:
self.lr_scheduler = None
elif backend_name == "jax":
self.opt_state = None
self.params = None
@utils.timing
def compile(
self,
optimizer,
lr=None,
loss="MSE",
metrics=None,
decay=None,
loss_weights=None,
external_trainable_variables=None,
):
"""Configures the model for training.
Args:
optimizer: String name of an optimizer, or a backend optimizer class
instance.
lr (float): The learning rate. For L-BFGS, use
``dde.optimizers.set_LBFGS_options`` to set the hyperparameters.
loss: If the same loss is used for all errors, then `loss` is a String name
of a loss function or a loss function. If different errors use
different losses, then `loss` is a list whose size is equal to the
number of errors.
metrics: List of metrics to be evaluated by the model during training.
decay (tuple): Name and parameters of decay to the initial learning rate.
One of the following options:
- For backend TensorFlow 1.x:
- `inverse_time_decay <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/inverse_time_decay>`_: ("inverse time", decay_steps, decay_rate)
- `cosine_decay <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/cosine_decay>`_: ("cosine", decay_steps, alpha)
- For backend TensorFlow 2.x:
- `InverseTimeDecay <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/InverseTimeDecay>`_: ("inverse time", decay_steps, decay_rate)
- `CosineDecay <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/CosineDecay>`_: ("cosine", decay_steps, alpha)
- For backend PyTorch:
- `StepLR <https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.StepLR.html>`_: ("step", step_size, gamma)
- `CosineAnnealingLR <https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.CosineAnnealingLR.html>`_: ("cosine", T_max, eta_min)
- `InverseTimeLR <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/InverseTimeDecay>`_: ("inverse time", decay_steps, decay_rate)
- `ExponentialLR <https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.ExponentialLR.html>`_: ("exponential", gamma)
- `LambdaLR <https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.LambdaLR.html>`_: ("lambda", lambda_fn: Callable[[step], float])
- For backend PaddlePaddle:
- `InverseTimeDecay
<https://www.paddlepaddle.org.cn/documentation/docs/en/develop/api/paddle/optimizer/lr/InverseTimeDecay_en.html>`_:
("inverse time", gamma)
loss_weights: A list specifying scalar coefficients (Python floats) to
weight the loss contributions. The loss value that will be minimized by
the model will then be the weighted sum of all individual losses,
weighted by the `loss_weights` coefficients.
external_trainable_variables: A trainable ``dde.Variable`` object or a list
of trainable ``dde.Variable`` objects. The unknown parameters in the
physics systems that need to be recovered. If the backend is
tensorflow.compat.v1, `external_trainable_variables` is ignored, and all
trainable ``dde.Variable`` objects are automatically collected.
"""
if config.rank == 0:
print("Compiling model...")
self.opt_name = optimizer
loss_fn = losses_module.get(loss)
self.losshistory.set_loss_weights(loss_weights)
if external_trainable_variables is None:
self.external_trainable_variables = []
else:
if backend_name == "tensorflow.compat.v1":
print(
"Warning: For the backend tensorflow.compat.v1, "
"`external_trainable_variables` is ignored, and all trainable "
"``tf.Variable`` objects are automatically collected."
)
if not isinstance(external_trainable_variables, list):
external_trainable_variables = [external_trainable_variables]
self.external_trainable_variables = external_trainable_variables
if backend_name == "tensorflow.compat.v1":
self._compile_tensorflow_compat_v1(lr, loss_fn, decay, loss_weights)
elif backend_name == "tensorflow":
self._compile_tensorflow(lr, loss_fn, decay, loss_weights)
elif backend_name == "pytorch":
self._compile_pytorch(lr, loss_fn, decay, loss_weights)
elif backend_name == "jax":
self._compile_jax(lr, loss_fn, decay, loss_weights)
elif backend_name == "paddle":
self._compile_paddle(lr, loss_fn, decay, loss_weights)
# metrics may use model variables such as self.net, and thus are instantiated
# after backend compile.
metrics = metrics or []
self.metrics = [metrics_module.get(m) for m in metrics]
def _compile_tensorflow_compat_v1(self, lr, loss_fn, decay, loss_weights):
"""tensorflow.compat.v1"""
if not self.net.built:
self.net.build()
if self.sess is None:
if config.xla_jit:
cfg = tf.ConfigProto()
cfg.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2
)
self.sess = tf.Session(config=cfg)
elif config.hvd is not None:
cfg = tf.ConfigProto()
cfg.gpu_options.visible_device_list = str(config.rank)
self.sess = tf.Session(config=cfg)
else:
self.sess = tf.Session()
self.saver = tf.train.Saver(max_to_keep=None)
def losses(losses_fn):
# Data losses
losses = losses_fn(
self.net.targets, self.net.outputs, loss_fn, self.net.inputs, self
)
if not isinstance(losses, list):
losses = [losses]
# Regularization loss
if self.net.regularizer is not None:
losses.append(tf.losses.get_regularization_loss())
losses = tf.convert_to_tensor(losses)
# Weighted losses
if loss_weights is not None:
losses *= loss_weights
return losses
losses_train = losses(self.data.losses_train)
losses_test = losses(self.data.losses_test)
total_loss = tf.math.reduce_sum(losses_train)
# Tensors
self.outputs = self.net.outputs
self.outputs_losses_train = [self.net.outputs, losses_train]
self.outputs_losses_test = [self.net.outputs, losses_test]
self.train_step = optimizers.get(
total_loss, self.opt_name, learning_rate=lr, decay=decay
)
def _compile_tensorflow(self, lr, loss_fn, decay, loss_weights):
"""tensorflow"""
@tf.function(jit_compile=config.xla_jit)
def outputs(training, inputs):
return self.net(inputs, training=training)
def outputs_losses(training, inputs, targets, auxiliary_vars, losses_fn):
self.net.auxiliary_vars = auxiliary_vars
# Don't call outputs() decorated by @tf.function above, otherwise the
# gradient of outputs wrt inputs will be lost here.
outputs_ = self.net(inputs, training=training)
# Data losses
losses = losses_fn(targets, outputs_, loss_fn, inputs, self)
if not isinstance(losses, list):
losses = [losses]
# Regularization loss
if self.net.regularizer is not None:
losses += [tf.math.reduce_sum(self.net.losses)]
losses = tf.convert_to_tensor(losses)
# Weighted losses
if loss_weights is not None:
losses *= loss_weights
return outputs_, losses
@tf.function(jit_compile=config.xla_jit)
def outputs_losses_train(inputs, targets, auxiliary_vars):
return outputs_losses(
True, inputs, targets, auxiliary_vars, self.data.losses_train
)
@tf.function(jit_compile=config.xla_jit)
def outputs_losses_test(inputs, targets, auxiliary_vars):
return outputs_losses(
False, inputs, targets, auxiliary_vars, self.data.losses_test
)
opt = optimizers.get(self.opt_name, learning_rate=lr, decay=decay)
@tf.function(jit_compile=config.xla_jit)
def train_step(inputs, targets, auxiliary_vars):
# inputs and targets are np.ndarray and automatically converted to Tensor.
with tf.GradientTape() as tape:
losses = outputs_losses_train(inputs, targets, auxiliary_vars)[1]
total_loss = tf.math.reduce_sum(losses)
trainable_variables = (
self.net.trainable_variables + self.external_trainable_variables
)
grads = tape.gradient(total_loss, trainable_variables)
opt.apply_gradients(zip(grads, trainable_variables))
def train_step_tfp(
inputs, targets, auxiliary_vars, previous_optimizer_results=None
):
def build_loss():
losses = outputs_losses_train(inputs, targets, auxiliary_vars)[1]
return tf.math.reduce_sum(losses)
trainable_variables = (
self.net.trainable_variables + self.external_trainable_variables
)
return opt(trainable_variables, build_loss, previous_optimizer_results)
# Callables
self.outputs = outputs
self.outputs_losses_train = outputs_losses_train
self.outputs_losses_test = outputs_losses_test
self.train_step = (
train_step
if not optimizers.is_external_optimizer(self.opt_name)
else train_step_tfp
)
def _compile_pytorch(self, lr, loss_fn, decay, loss_weights):
"""pytorch"""
def outputs(training, inputs):
self.net.train(mode=training)
with torch.no_grad():
if isinstance(inputs, tuple):
inputs = tuple(
map(lambda x: torch.as_tensor(x).requires_grad_(), inputs)
)
else:
inputs = torch.as_tensor(inputs)
inputs.requires_grad_()
# Clear cached Jacobians and Hessians.
grad.clear()
return self.net(inputs)
def outputs_losses(training, inputs, targets, auxiliary_vars, losses_fn):
self.net.auxiliary_vars = None
if auxiliary_vars is not None:
self.net.auxiliary_vars = torch.as_tensor(auxiliary_vars)
self.net.train(mode=training)
if isinstance(inputs, tuple):
inputs = tuple(
map(lambda x: torch.as_tensor(x).requires_grad_(), inputs)
)
else:
inputs = torch.as_tensor(inputs)
inputs.requires_grad_()
outputs_ = self.net(inputs)
# Data losses
if targets is not None:
targets = torch.as_tensor(targets)
losses = losses_fn(targets, outputs_, loss_fn, inputs, self)
if not isinstance(losses, list):
losses = [losses]
losses = torch.stack(losses)
# Weighted losses
if loss_weights is not None:
losses *= torch.as_tensor(loss_weights)
# Clear cached Jacobians and Hessians.
grad.clear()
return outputs_, losses
def outputs_losses_train(inputs, targets, auxiliary_vars):
return outputs_losses(
True, inputs, targets, auxiliary_vars, self.data.losses_train
)
def outputs_losses_test(inputs, targets, auxiliary_vars):
return outputs_losses(
False, inputs, targets, auxiliary_vars, self.data.losses_test
)
# Another way is using per-parameter options
# https://pytorch.org/docs/stable/optim.html#per-parameter-options,
# but not all optimizers (such as L-BFGS) support this.
trainable_variables = (
list(self.net.parameters()) + self.external_trainable_variables
)
if self.net.regularizer is None:
self.opt, self.lr_scheduler = optimizers.get(
trainable_variables, self.opt_name, learning_rate=lr, decay=decay
)
else:
if self.net.regularizer[0] == "l2":
self.opt, self.lr_scheduler = optimizers.get(
trainable_variables,
self.opt_name,
learning_rate=lr,
decay=decay,
weight_decay=self.net.regularizer[1],
)
else:
raise NotImplementedError(
f"{self.net.regularizer[0]} regularization to be implemented for "
"backend pytorch."
)
def train_step(inputs, targets, auxiliary_vars):
def closure():
losses = outputs_losses_train(inputs, targets, auxiliary_vars)[1]
total_loss = torch.sum(losses)
self.opt.zero_grad()
total_loss.backward()
return total_loss
self.opt.step(closure)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
# Callables
self.outputs = outputs
self.outputs_losses_train = outputs_losses_train
self.outputs_losses_test = outputs_losses_test
self.train_step = train_step
def _compile_jax(self, lr, loss_fn, decay, loss_weights):
"""jax"""
# Initialize the network's parameters
key = jax.random.PRNGKey(config.jax_random_seed)
self.net.params = self.net.init(key, self.data.test()[0])
self.params = [self.net.params, self.external_trainable_variables]
# TODO: learning rate decay
self.opt = optimizers.get(self.opt_name, learning_rate=lr)
self.opt_state = self.opt.init(self.params)
@jax.jit
def outputs(params, training, inputs):
return self.net.apply(params, inputs, training=training)
def outputs_losses(params, training, inputs, targets, losses_fn):
nn_params, ext_params = params
# TODO: Add auxiliary vars
def outputs_fn(inputs):
return self.net.apply(nn_params, inputs, training=training)
outputs_ = self.net.apply(nn_params, inputs, training=training)
# Data losses
# We use aux so that self.data.losses is a pure function.
aux = [outputs_fn, ext_params] if ext_params else [outputs_fn]
losses = losses_fn(targets, outputs_, loss_fn, inputs, self, aux=aux)
# TODO: Add regularization loss, weighted losses
if not isinstance(losses, list):
losses = [losses]
losses = jax.numpy.asarray(losses)
return outputs_, losses
@jax.jit
def outputs_losses_train(params, inputs, targets):
return outputs_losses(params, True, inputs, targets, self.data.losses_train)
@jax.jit
def outputs_losses_test(params, inputs, targets):
return outputs_losses(params, False, inputs, targets, self.data.losses_test)
@jax.jit
def train_step(params, opt_state, inputs, targets):
def loss_function(params):
return jax.numpy.sum(outputs_losses_train(params, inputs, targets)[1])
grad_fn = jax.grad(loss_function)
grads = grad_fn(params)
updates, new_opt_state = self.opt.update(grads, opt_state)
new_params = optimizers.apply_updates(params, updates)
return new_params, new_opt_state
# Pure functions
self.outputs = outputs
self.outputs_losses_train = outputs_losses_train
self.outputs_losses_test = outputs_losses_test
self.train_step = train_step
def _compile_paddle(self, lr, loss_fn, decay, loss_weights):
"""paddle"""
def outputs(training, inputs):
if training:
self.net.train()
else:
self.net.eval()
with paddle.no_grad():
if isinstance(inputs, tuple):
inputs = tuple(
map(lambda x: paddle.to_tensor(x, stop_gradient=False), inputs)
)
else:
inputs = paddle.to_tensor(inputs, stop_gradient=False)
return self.net(inputs)
def outputs_losses(training, inputs, targets, auxiliary_vars, losses_fn):
self.net.auxiliary_vars = auxiliary_vars
if training:
self.net.train()
else:
self.net.eval()
if isinstance(inputs, tuple):
inputs = tuple(
map(lambda x: paddle.to_tensor(x, stop_gradient=False), inputs)
)
else:
inputs = paddle.to_tensor(inputs, stop_gradient=False)
outputs_ = self.net(inputs)
# Data losses
if targets is not None:
targets = paddle.to_tensor(targets)
losses = losses_fn(targets, outputs_, loss_fn, inputs, self)
if not isinstance(losses, list):
losses = [losses]
# TODO: regularization
losses = paddle.stack(losses, axis=0)
# Weighted losses
if loss_weights is not None:
losses *= paddle.to_tensor(loss_weights)
# Clear cached Jacobians and Hessians.
grad.clear()
return outputs_, losses
def outputs_losses_train(inputs, targets, auxiliary_vars):
return outputs_losses(
True, inputs, targets, auxiliary_vars, self.data.losses_train
)
def outputs_losses_test(inputs, targets, auxiliary_vars):
return outputs_losses(
False, inputs, targets, auxiliary_vars, self.data.losses_test
)
trainable_variables = (
list(self.net.parameters()) + self.external_trainable_variables
)
self.opt = optimizers.get(
trainable_variables, self.opt_name, learning_rate=lr, decay=decay
)
def train_step(inputs, targets, auxiliary_vars):
losses = outputs_losses_train(inputs, targets, auxiliary_vars)[1]
total_loss = paddle.sum(losses)
total_loss.backward()
self.opt.step()
self.opt.clear_grad()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
def train_step_lbfgs(inputs, targets, auxiliary_vars):
def closure():
losses = outputs_losses_train(inputs, targets, auxiliary_vars)[1]
total_loss = paddle.sum(losses)
self.opt.clear_grad()
total_loss.backward()
return total_loss
self.opt.step(closure)
# Callables
self.outputs = outputs
self.outputs_losses_train = outputs_losses_train
self.outputs_losses_test = outputs_losses_test
self.train_step = (
train_step
if not optimizers.is_external_optimizer(self.opt_name)
else train_step_lbfgs
)
def _outputs(self, training, inputs):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(training, inputs)
return self.sess.run(self.outputs, feed_dict=feed_dict)
if backend_name in ["tensorflow", "pytorch", "paddle"]:
outs = self.outputs(training, inputs)
elif backend_name == "jax":
outs = self.outputs(self.net.params, training, inputs)
return utils.to_numpy(outs)
def _outputs_losses(self, training, inputs, targets, auxiliary_vars):
if training:
outputs_losses = self.outputs_losses_train
else:
outputs_losses = self.outputs_losses_test
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(training, inputs, targets, auxiliary_vars)
return self.sess.run(outputs_losses, feed_dict=feed_dict)
if backend_name == "tensorflow":
outs = outputs_losses(inputs, targets, auxiliary_vars)
elif backend_name == "pytorch":
self.net.requires_grad_(requires_grad=False)
outs = outputs_losses(inputs, targets, auxiliary_vars)
self.net.requires_grad_()
elif backend_name == "jax":
# TODO: auxiliary_vars
outs = outputs_losses(self.params, inputs, targets)
elif backend_name == "paddle":
outs = outputs_losses(inputs, targets, auxiliary_vars)
return utils.to_numpy(outs[0]), utils.to_numpy(outs[1])
def _train_step(self, inputs, targets, auxiliary_vars):
if backend_name == "tensorflow.compat.v1":
feed_dict = self.net.feed_dict(True, inputs, targets, auxiliary_vars)
self.sess.run(self.train_step, feed_dict=feed_dict)
elif backend_name in ["tensorflow", "paddle"]:
self.train_step(inputs, targets, auxiliary_vars)
elif backend_name == "pytorch":
self.train_step(inputs, targets, auxiliary_vars)
elif backend_name == "jax":
# TODO: auxiliary_vars
self.params, self.opt_state = self.train_step(
self.params, self.opt_state, inputs, targets
)
self.net.params, self.external_trainable_variables = self.params
@utils.timing
def train(
self,
iterations=None,
batch_size=None,
display_every=1000,
disregard_previous_best=False,
callbacks=None,
model_restore_path=None,
model_save_path=None,
epochs=None,
):
"""Trains the model.
Args:
iterations (Integer): Number of iterations to train the model, i.e., number
of times the network weights are updated.
batch_size: Integer, tuple, or ``None``.
- If you solve PDEs via ``dde.data.PDE`` or ``dde.data.TimePDE``, do not use `batch_size`, and instead use
`dde.callbacks.PDEPointResampler
<https://deepxde.readthedocs.io/en/latest/modules/deepxde.html#deepxde.callbacks.PDEPointResampler>`_,
see an `example <https://github.com/lululxvi/deepxde/blob/master/examples/diffusion_1d_resample.py>`_.
- For DeepONet in the format of Cartesian product, if `batch_size` is an Integer,
then it is the batch size for the branch input; if you want to also use mini-batch for the trunk net input,
set `batch_size` as a tuple, where the fist number is the batch size for the branch net input
and the second number is the batch size for the trunk net input.
display_every (Integer): Print the loss and metrics every this steps.
disregard_previous_best: If ``True``, disregard the previous saved best
model.
callbacks: List of ``dde.callbacks.Callback`` instances. List of callbacks
to apply during training.
model_restore_path (String): Path where parameters were previously saved.
model_save_path (String): Prefix of filenames created for the checkpoint.
epochs (Integer): Deprecated alias to `iterations`. This will be removed in
a future version.
"""
if iterations is None and epochs is not None:
print(
"Warning: epochs is deprecated and will be removed in a future version."
" Use iterations instead."
)
iterations = epochs
self.batch_size = batch_size
self.callbacks = CallbackList(callbacks=callbacks)
self.callbacks.set_model(self)
if disregard_previous_best:
self.train_state.disregard_best()
if backend_name == "tensorflow.compat.v1":
if self.train_state.step == 0:
self.sess.run(tf.global_variables_initializer())
if config.hvd is not None:
bcast = config.hvd.broadcast_global_variables(0)
self.sess.run(bcast)
else:
utils.guarantee_initialized_variables(self.sess)
if model_restore_path is not None:
self.restore(model_restore_path, verbose=1)
if config.rank == 0:
print("Training model...\n")
self.stop_training = False
self.train_state.set_data_train(*self.data.train_next_batch(self.batch_size))
self.train_state.set_data_test(*self.data.test())
self._test()
self.callbacks.on_train_begin()
if optimizers.is_external_optimizer(self.opt_name):
if backend_name == "tensorflow.compat.v1":
self._train_tensorflow_compat_v1_scipy(display_every)
elif backend_name == "tensorflow":
self._train_tensorflow_tfp()
elif backend_name == "pytorch":
self._train_pytorch_lbfgs()
elif backend_name == "paddle":
self._train_paddle_lbfgs()
else:
if iterations is None:
raise ValueError("No iterations for {}.".format(self.opt_name))
self._train_sgd(iterations, display_every)
self.callbacks.on_train_end()
if config.rank == 0:
print("")
display.training_display.summary(self.train_state)
if model_save_path is not None:
self.save(model_save_path, verbose=1)
return self.losshistory, self.train_state
def _train_sgd(self, iterations, display_every):
for i in range(iterations):
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_state.epoch += 1
self.train_state.step += 1
if self.train_state.step % display_every == 0 or i + 1 == iterations:
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _train_tensorflow_compat_v1_scipy(self, display_every):
def loss_callback(loss_train, loss_test, *args):
self.train_state.epoch += 1
self.train_state.step += 1
if self.train_state.step % display_every == 0:
self.train_state.loss_train = loss_train
self.train_state.loss_test = loss_test
self.train_state.metrics_test = None
self.losshistory.append(
self.train_state.step,
self.train_state.loss_train,
self.train_state.loss_test,
None,
)
display.training_display(self.train_state)
for cb in self.callbacks.callbacks:
if type(cb).__name__ == "VariableValue":
cb.epochs_since_last += 1
if cb.epochs_since_last >= cb.period:
cb.epochs_since_last = 0
print(
cb.model.train_state.epoch,
list_to_str(
[float(arg) for arg in args],
precision=cb.precision,
),
file=cb.file,
)
cb.file.flush()
self.train_state.set_data_train(*self.data.train_next_batch(self.batch_size))
feed_dict = self.net.feed_dict(
True,
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
fetches = [self.outputs_losses_train[1], self.outputs_losses_test[1]]
if self.external_trainable_variables:
fetches += self.external_trainable_variables
self.train_step.minimize(
self.sess,
feed_dict=feed_dict,
fetches=fetches,
loss_callback=loss_callback,
)
self._test()
def _train_tensorflow_tfp(self):
# There is only one optimization step. If using multiple steps with/without
# previous_optimizer_results, L-BFGS failed to reach a small error. The reason
# could be that tfp.optimizer.lbfgs_minimize will start from scratch for each
# call.
n_iter = 0
while n_iter < optimizers.LBFGS_options["maxiter"]:
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
results = self.train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter += results.num_iterations.numpy()
self.train_state.epoch += results.num_iterations.numpy()
self.train_state.step += results.num_iterations.numpy()
self._test()
if results.converged or results.failed:
break
def _train_pytorch_lbfgs(self):
prev_n_iter = 0
while prev_n_iter < optimizers.LBFGS_options["maxiter"]:
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter = self.opt.state_dict()["state"][0]["n_iter"]
if prev_n_iter == n_iter:
# Converged
break
self.train_state.epoch += n_iter - prev_n_iter
self.train_state.step += n_iter - prev_n_iter
prev_n_iter = n_iter
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _train_paddle_lbfgs(self):
prev_n_iter = 0
while prev_n_iter < optimizers.LBFGS_options["maxiter"]:
self.callbacks.on_epoch_begin()
self.callbacks.on_batch_begin()
self.train_state.set_data_train(
*self.data.train_next_batch(self.batch_size)
)
self._train_step(
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
n_iter = self.opt.state_dict()["state"]["n_iter"]
if prev_n_iter == n_iter:
# Converged
break
self.train_state.epoch += n_iter - prev_n_iter
self.train_state.step += n_iter - prev_n_iter
prev_n_iter = n_iter
self._test()
self.callbacks.on_batch_end()
self.callbacks.on_epoch_end()
if self.stop_training:
break
def _test(self):
# TODO Now only print the training loss in rank 0. The correct way is to print the average training loss of all ranks.
(
self.train_state.y_pred_train,
self.train_state.loss_train,
) = self._outputs_losses(
True,
self.train_state.X_train,
self.train_state.y_train,
self.train_state.train_aux_vars,
)
self.train_state.y_pred_test, self.train_state.loss_test = self._outputs_losses(
False,
self.train_state.X_test,
self.train_state.y_test,
self.train_state.test_aux_vars,
)
if isinstance(self.train_state.y_test, (list, tuple)):
self.train_state.metrics_test = [
m(self.train_state.y_test[i], self.train_state.y_pred_test[i])
for m in self.metrics
for i in range(len(self.train_state.y_test))
]
else:
self.train_state.metrics_test = [
m(self.train_state.y_test, self.train_state.y_pred_test)
for m in self.metrics
]
self.train_state.update_best()
self.losshistory.append(
self.train_state.step,
self.train_state.loss_train,
self.train_state.loss_test,
self.train_state.metrics_test,
)
if (
np.isnan(self.train_state.loss_train).any()
or np.isnan(self.train_state.loss_test).any()
):
self.stop_training = True
if config.rank == 0:
display.training_display(self.train_state)
def predict(self, x, operator=None, callbacks=None):
"""Generates predictions for the input samples. If `operator` is ``None``,
returns the network output, otherwise returns the output of the `operator`.
Args:
x: The network inputs. A Numpy array or a tuple of Numpy arrays.
operator: A function takes arguments (`inputs`, `outputs`) or (`inputs`,
`outputs`, `auxiliary_variables`) and outputs a tensor. `inputs` and
`outputs` are the network input and output tensors, respectively.
`auxiliary_variables` is the output of `auxiliary_var_function(x)`
in `dde.data.PDE`. `operator` is typically chosen as the PDE (used to
define `dde.data.PDE`) to predict the PDE residual.
callbacks: List of ``dde.callbacks.Callback`` instances. List of callbacks
to apply during prediction.
"""
if isinstance(x, tuple):
x = tuple(np.asarray(xi, dtype=config.real(np)) for xi in x)
else:
x = np.asarray(x, dtype=config.real(np))
callbacks = CallbackList(callbacks=callbacks)
callbacks.set_model(self)
callbacks.on_predict_begin()
if operator is None:
y = self._outputs(False, x)
callbacks.on_predict_end()
return y
# operator is not None
if utils.get_num_args(operator) == 3:
aux_vars = self.data.auxiliary_var_fn(x).astype(config.real(np))
if backend_name == "tensorflow.compat.v1":
if utils.get_num_args(operator) == 2:
op = operator(self.net.inputs, self.net.outputs)
feed_dict = self.net.feed_dict(False, x)
elif utils.get_num_args(operator) == 3:
op = operator(
self.net.inputs, self.net.outputs, self.net.auxiliary_vars
)
feed_dict = self.net.feed_dict(False, x, auxiliary_vars=aux_vars)
y = self.sess.run(op, feed_dict=feed_dict)
elif backend_name == "tensorflow":
if utils.get_num_args(operator) == 2:
@tf.function
def op(inputs):
y = self.net(inputs)
return operator(inputs, y)
elif utils.get_num_args(operator) == 3:
@tf.function
def op(inputs):
y = self.net(inputs)
return operator(inputs, y, aux_vars)
y = op(x)
y = utils.to_numpy(y)
elif backend_name == "pytorch":
self.net.eval()
if isinstance(x, tuple):
inputs = tuple(map(lambda x: torch.as_tensor(x).requires_grad_(), x))
else:
inputs = torch.as_tensor(x).requires_grad_()
outputs = self.net(inputs)
if utils.get_num_args(operator) == 2:
y = operator(inputs, outputs)
elif utils.get_num_args(operator) == 3:
# TODO: Pytorch backend Implementation of Auxiliary variables.
# y = operator(inputs, outputs, torch.as_tensor(aux_vars))
raise NotImplementedError(
"Model.predict() with auxiliary variable hasn't been implemented "
"for backend pytorch."
)
# Clear cached Jacobians and Hessians.
grad.clear()
y = utils.to_numpy(y)
elif backend_name == "paddle":
self.net.eval()
inputs = paddle.to_tensor(x, stop_gradient=False)
outputs = self.net(inputs)
if utils.get_num_args(operator) == 2:
y = operator(inputs, outputs)
elif utils.get_num_args(operator) == 3:
# TODO: Paddle backend Implementation of Auxiliary variables.
# y = operator(inputs, outputs, paddle.to_tensor(aux_vars))
raise NotImplementedError(
"Model.predict() with auxiliary variable hasn't been implemented "
"for backend paddle."
)
y = utils.to_numpy(y)
callbacks.on_predict_end()
return y
# def evaluate(self, x, y, callbacks=None):
# """Returns the loss values & metrics values for the model in test mode."""
# raise NotImplementedError(
# "Model.evaluate to be implemented. Alternatively, use Model.predict."
# )
def state_dict(self):
"""Returns a dictionary containing all variables."""
if backend_name == "tensorflow.compat.v1":
destination = OrderedDict()
variables_names = [v.name for v in tf.global_variables()]
values = self.sess.run(variables_names)
for k, v in zip(variables_names, values):
destination[k] = v
elif backend_name == "tensorflow":
# user-provided variables
destination = {
f"external_trainable_variable:{i}": v
for (i, v) in enumerate(self.external_trainable_variables)
}
# the paramaters of the net
destination.update(self.net.get_weight_paths())
elif backend_name in ["pytorch", "paddle"]:
destination = self.net.state_dict()
else:
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
return destination
def save(self, save_path, protocol="backend", verbose=0):
"""Saves all variables to a disk file.
Args:
save_path (string): Prefix of filenames to save the model file.
protocol (string): If `protocol` is "backend", save using the
backend-specific method.
- For "tensorflow.compat.v1", use `tf.train.Save <https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/Saver#attributes>`_.
- For "tensorflow", use `tf.keras.Model.save_weights <https://www.tensorflow.org/api_docs/python/tf/keras/Model#save_weights>`_.
- For "pytorch", use `torch.save <https://pytorch.org/docs/stable/generated/torch.save.html>`_.
- For "paddle", use `paddle.save <https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/save_en.html>`_.
If `protocol` is "pickle", save using the Python pickle module. Only the
protocol "backend" supports ``restore()``.
Returns:
string: Path where model is saved.
"""
# TODO: backend tensorflow
save_path = f"{save_path}-{self.train_state.epoch}"
if protocol == "pickle":
save_path += ".pkl"
with open(save_path, "wb") as f:
pickle.dump(self.state_dict(), f)
elif protocol == "backend":
if backend_name == "tensorflow.compat.v1":
save_path += ".ckpt"
self.saver.save(self.sess, save_path)
elif backend_name == "tensorflow":
save_path += ".ckpt"
self.net.save_weights(save_path)
elif backend_name == "pytorch":
save_path += ".pt"
checkpoint = {
"model_state_dict": self.net.state_dict(),
"optimizer_state_dict": self.opt.state_dict(),
}
torch.save(checkpoint, save_path)
elif backend_name == "paddle":
save_path += ".pdparams"
checkpoint = {
"model": self.net.state_dict(),
"opt": self.opt.state_dict(),
}
paddle.save(checkpoint, save_path)
else:
raise NotImplementedError(
"Model.save() hasn't been implemented for this backend."
)
if verbose > 0:
print(
"Epoch {}: saving model to {} ...\n".format(
self.train_state.epoch, save_path
)
)
return save_path
def restore(self, save_path, device=None, verbose=0):
"""Restore all variables from a disk file.
Args:
save_path (string): Path where model was previously saved.
device (string, optional): Device to load the model on (e.g. "cpu","cuda:0"...). By default, the model is loaded on the device it was saved from.
"""
# TODO: backend tensorflow
if device is not None and backend_name != "pytorch":
print(
"Warning: device is only supported for backend pytorch. Model will be loaded on the device it was saved from."
)
if verbose > 0:
print("Restoring model from {} ...\n".format(save_path))
if backend_name == "tensorflow.compat.v1":
self.saver.restore(self.sess, save_path)
elif backend_name == "tensorflow":
self.net.load_weights(save_path)
elif backend_name == "pytorch":
if device is not None:
checkpoint = torch.load(save_path, map_location=torch.device(device))
else:
checkpoint = torch.load(save_path)
self.net.load_state_dict(checkpoint["model_state_dict"])
self.opt.load_state_dict(checkpoint["optimizer_state_dict"])
elif backend_name == "paddle":
checkpoint = paddle.load(save_path)
self.net.set_state_dict(checkpoint["model"])
self.opt.set_state_dict(checkpoint["opt"])
else:
raise NotImplementedError(
"Model.restore() hasn't been implemented for this backend."
)
def print_model(self):
"""Prints all trainable variables."""
# TODO: backend tensorflow, pytorch
if backend_name != "tensorflow.compat.v1":
raise NotImplementedError(
"state_dict hasn't been implemented for this backend."
)
variables_names = [v.name for v in tf.trainable_variables()]
values = self.sess.run(variables_names)
for k, v in zip(variables_names, values):
print("Variable: {}, Shape: {}".format(k, v.shape))
print(v)
class TrainState:
def __init__(self):
self.epoch = 0
self.step = 0
# Current data
self.X_train = None
self.y_train = None
self.train_aux_vars = None
self.X_test = None
self.y_test = None
self.test_aux_vars = None
# Results of current step
# Train results
self.loss_train = None
self.y_pred_train = None
# Test results
self.loss_test = None
self.y_pred_test = None
self.y_std_test = None
self.metrics_test = None
# The best results correspond to the min train loss
self.best_step = 0
self.best_loss_train = np.inf
self.best_loss_test = np.inf
self.best_y = None
self.best_ystd = None
self.best_metrics = None
def set_data_train(self, X_train, y_train, train_aux_vars=None):
self.X_train = X_train
self.y_train = y_train
self.train_aux_vars = train_aux_vars
def set_data_test(self, X_test, y_test, test_aux_vars=None):
self.X_test = X_test
self.y_test = y_test
self.test_aux_vars = test_aux_vars
def update_best(self):
if self.best_loss_train > np.sum(self.loss_train):
self.best_step = self.step
self.best_loss_train = np.sum(self.loss_train)
self.best_loss_test = np.sum(self.loss_test)
self.best_y = self.y_pred_test
self.best_ystd = self.y_std_test
self.best_metrics = self.metrics_test
def disregard_best(self):
self.best_loss_train = np.inf
class LossHistory:
def __init__(self):
self.steps = []
self.loss_train = []
self.loss_test = []
self.metrics_test = []
self.loss_weights = None
def set_loss_weights(self, loss_weights):
self.loss_weights = loss_weights
def append(self, step, loss_train, loss_test, metrics_test):
self.steps.append(step)
self.loss_train.append(loss_train)
if loss_test is None:
loss_test = self.loss_test[-1]
if metrics_test is None:
metrics_test = self.metrics_test[-1]
self.loss_test.append(loss_test)
self.metrics_test.append(metrics_test) | PypiClean |
/AQoPA-0.9.5.tar.gz/AQoPA-0.9.5/aqopa/cmd.py | import optparse
import sys
import os
from aqopa import VERSION
from aqopa.bin import console, gui
def gui_command():
app = gui.AqopaApp(False)
app.MainLoop()
def console_command():
parser = optparse.OptionParser()
parser.usage = "%prog [options]"
parser.add_option("-f", "--model-file", dest="model_file", metavar="FILE",
help="specifies model file")
parser.add_option("-m", "--metrics-file", dest="metrics_file", metavar="FILE",
help="specifies file with metrics")
parser.add_option("-c", "--config-file", dest="config_file", metavar="FILE",
help="specifies file with modules configuration")
parser.add_option("-s", "--states", dest="save_states", action="store_true", default=False,
help="save states flow in a file")
parser.add_option("-p", '--progressbar', dest="show_progressbar", action="store_true", default=False,
help="show the progressbar of the simulation")
parser.add_option("-V", '--version', dest="show_version", action="store_true", default=False,
help="show version of AQoPA")
parser.add_option("-d", "--debug", dest="debug", action="store_true", default=False,
help="DEBUG mode")
(options, args) = parser.parse_args()
if options.show_version:
print "AQoPA (version %s)" % VERSION
sys.exit(0)
if not options.model_file:
parser.error("no qopml model file specified")
if not os.path.exists(options.model_file):
parser.error("qopml model file '%s' does not exist" % options.model_file)
if not options.metrics_file:
parser.error("no metrics file specified")
if not os.path.exists(options.metrics_file):
parser.error("metrics file '%s' does not exist" % options.metrics_file)
if not options.config_file:
parser.error("no configuration file specified")
if not os.path.exists(options.config_file):
parser.error("configuration file '%s' does not exist" % options.config_file)
f = open(options.model_file, 'r')
qopml_model = f.read()
f.close()
f = open(options.metrics_file, 'r')
qopml_metrics = f.read()
f.close()
f = open(options.config_file, 'r')
qopml_config = f.read()
f.close()
console.run(qopml_model, qopml_metrics, qopml_config,
save_states=options.save_states, debug=options.debug,
show_progressbar=options.show_progressbar) | PypiClean |
/ClipCap-1.0.0-py3-none-any.whl/clipcap/preprocess/reader.py |
from torch.utils.data.dataloader import default_collate
from torch.utils.data import DataLoader
from pathlib import Path
import io
def folder_to_keys(folder, media_file_extensions: list):
"""returns a list of keys from a folder of images and text"""
path = Path(folder)
text_files = [*path.glob("**/*.txt")]
text_files = {text_file.stem: text_file for text_file in text_files}
image_files = [list(path.glob(f"**/*.{filetype}")) for filetype in media_file_extensions]
image_files = [file for filetype in image_files for file in filetype] # flatten (overcomplicated?)
image_files = {image_file.stem: image_file for image_file in image_files}
keys = None
join = lambda new_set: new_set & keys if keys is not None else new_set
keys = join(text_files.keys())
keys = join(image_files.keys())
keys = list(sorted(keys))
return keys, text_files, image_files
def get_image_dataset():
"""retrieve image dataset module without importing torch at the top level"""
from torch.utils.data import Dataset
class ImageDataset(Dataset):
"""ImageDataset is a pytorch Dataset exposing image and text tensors from a folder of image and text"""
def __init__(
self,
sample_processor,
folder,
media_file_extensions,
input_sampler=lambda a: a,
):
super().__init__()
self.keys, text_files, media_files = folder_to_keys(
folder, media_file_extensions
)
self.keys = input_sampler(self.keys)
self.text_files = {k: v for k, v in text_files.items() if k in self.keys}
self.media_files = {k: v for k, v in media_files.items() if k in self.keys}
self.sample_processor = sample_processor
def __len__(self):
return len(self.keys)
def __getitem__(self, ind):
key = self.keys[ind]
output = {}
media_file = self.media_files[key]
data_tensor = self.sample_processor(media_file)
output["data_tensor"] = data_tensor
text_file = self.text_files[key]
caption = text_file.read_text()
output["text"] = caption
return output
return ImageDataset
def create_webdataset(
urls,
sample_processor,
media_key="jpg",
caption_key="txt",
cache_path=None,
input_sampler=lambda a: a,
):
"""Create a WebDataset reader, it can read a webdataset of image, text and json"""
import webdataset as wds
urls = input_sampler(urls)
dataset = wds.WebDataset(urls, cache_dir=cache_path, cache_size=10**10, handler=wds.handlers.warn_and_continue)
def filter_dataset(item):
if caption_key not in item:
return False
elif media_key not in item:
return False
else:
return True
filtered_dataset = dataset.select(filter_dataset)
def preprocess_dataset(item):
output = {}
image_data = item[media_key]
data_tensor = sample_processor(io.BytesIO(image_data))
output["data_tensor"] = data_tensor
text = item[caption_key]
caption = text.decode("utf-8")
output["text"] = caption
return output
transformed_dataset = filtered_dataset.map(preprocess_dataset, handler=wds.handlers.warn_and_continue)
return transformed_dataset
def dataset_to_dataloader(dataset, batch_size, num_prepro_workers, input_format):
"""Create a pytorch dataloader from a dataset"""
def collate_fn(batch):
batch = list(filter(lambda x: x is not None, batch))
return default_collate(batch)
data = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_prepro_workers,
pin_memory=True,
prefetch_factor=2,
collate_fn=collate_fn if input_format == "files" else None,
)
return data
class FilesReader:
"""FilesReader is a reader that reads files from a folder"""
def __init__(
self,
sampler,
sample_processor,
input_dataset,
media_file_extensions,
batch_size,
num_prepro_workers,
) -> None:
super().__init__()
dataset = get_image_dataset()(sample_processor, input_dataset, media_file_extensions, sampler)
self.dataloader = dataset_to_dataloader(dataset, batch_size, num_prepro_workers, "files")
def __iter__(self):
for batch in self.dataloader:
yield batch
class WebdatasetReader:
"""WebdatasetReader is a reader that reads samples from a webdataset"""
def __init__(
self,
sampler,
sample_processor,
input_dataset,
batch_size,
num_prepro_workers,
wds_media_key="jpg",
wds_caption_key="txt",
cache_path=None,
):
self.batch_size = batch_size
dataset = create_webdataset(
input_dataset,
sample_processor,
media_key=wds_media_key,
caption_key=wds_caption_key,
cache_path=cache_path,
input_sampler=sampler,
)
self.dataloader = dataset_to_dataloader(dataset, batch_size, num_prepro_workers, "webdataset")
def __iter__(self):
for batch in self.dataloader:
yield batch | PypiClean |
/BRACoD-0.3.3.tar.gz/BRACoD-0.3.3/README.md | # BRACoD: Bayesian Regression Analysis of Compositional Data
### Installation
Installation in python:
pip install BRACoD
There is also an R interface, which depends on the python version being installed. There is a helper function that will do it for you, but it might be easier to do it with pip.
devtools::install_github("ajverster/BRACoD/BRACoD.R")
### Python Walkthrough
1. Simulate some data and normalize it
```python
import BRACoD
import numpy as np
sim_counts, sim_y, contributions = BRACoD.simulate_microbiome_counts(BRACoD.df_counts_obesity)
sim_relab = BRACoD.scale_counts(sim_counts)
```
2. Run BRACoD
```python
trace = BRACoD.run_bracod(sim_relab, sim_y, n_sample = 1000, n_burn=1000, njobs=4)
```
3. Examine the diagnostics
```python
BRACoD.convergence_tests(trace, sim_relab)
```
4. Examine the results
```python
df_results = BRACoD.summarize_trace(trace, sim_counts.columns, 0.3)
```
5. Compare the results to the simulated truth
```python
taxon_identified = df_results["taxon_num"].values
taxon_actual = np.where(contributions != 0)[0]
precision, recall, f1 = BRACoD.score(taxon_identified, taxon_actual)
print("Precision: {}, Recall: {}, F1: {}".format(precision, recall, f1))
```
6. Try with your real data. We have included some functions to help you threshold and process your data
```python
df_counts = BRACoD.threshold_count_data(BRACoD.df_counts_obesity)
df_rel = BRACoD.scale_counts(df_counts)
df_rel, Y = BRACoD.remove_null(df_rel, BRACoD.df_scfa_obesity["butyric"].values)
trace = BRACoD.run_bracod(df_rel, Y, n_sample = 1000, n_burn=1000, njobs=4)
df_results = BRACoD.summarize_trace(trace, df_rel.columns, 0.3)
```
### R Walkthrough
1. Simulate some data and normalize it
```R
library('BRACoD.R')
data(obesity)
r <- simulate_microbiome_counts(df_counts_obesity)
sim_counts <- r[[1]]
sim_y <- r[[2]]
contributions <- r[[3]]
sim_relab <- scale_counts(sim_counts)
```
2. Run BRACoD
```R
trace <- run_bracod(sim_relab, sim_y, n_sample = 1000, n_burn=1000, njobs=4)
```
3. Examine the diagnostics
```R
convergence_tests(trace, sim_relab)
```
4. Examine the results
```R
df_results <- summarize_trace(trace, colnames(sim_counts))
```
5. Compare the results to the simulated truth
```R
taxon_identified <- df_results$taxon_num
taxon_actual <- which(contributions != 0)
r <- score(taxon_identified, taxon_actual)
precision <- r[[1]]
recall <- r[[2]]
f1 <- r[[3]]
print(sprintf("Precision: %.2f, Recall: %.2f, F1: %.2f",precision, recall, f1))
```
6. Try with your real data. We have included some functions to help you threshold and process your data
```R
df_counts_obesity_sub <- threshold_count_data(df_counts_obesity)
df_rel <- scale_counts(df_counts_obesity_sub)
r <- remove_null(df_rel, df_scfa$butyric)
df_rel <- r[[1]]
Y <- r[[2]]
trace <- run_bracod(df_rel, Y, n_sample = 1000, n_burn=1000, njobs=4)
df_results <- summarize_trace(trace, colnames(df_counts_obesity_sub), 0.3)
```
| PypiClean |
/GReNaDIne-0.0.21.tar.gz/GReNaDIne-0.0.21/tutorials/Infer_dream5_E_coli_GRN_using_GENIE3.ipynb | ```
%matplotlib inline
import pandas as pd
```
# Load the dream5 dataset
Please download the following datasets from the [dream5 dedicated website] (you need to create an account first)(https://www.synapse.org/#!Synapse:syn3130840):
+ `net3_expression_data.tsv`: E. coli gene expression data (MicroArray)
+ `net3_transcription_factors.tsv`: transcription factor genes
### Load the datasets
+ Load the gene expression dataset $X$
```
X = pd.read_csv("net3_expression_data.tsv",sep="\t").T# rows represent genes and columns represent conditions
```
+ Load the Transcription Factors list
```
tf = pd.read_csv("net3_transcription_factors.tsv",header=None)[0]
```
# Preprocessing
Apply a simple z-score gene-wise (axis=0)
```
from grenadine.Preprocessing.standard_preprocessing import z_score
X = z_score(X,axis=1)
```
# Infer the GRN
+ Load the score links function and GENIE3 method
```
from grenadine.Inference.inference import score_links
from grenadine.Inference.regression_predictors import GENIE3
```
+ Choose the parameters of the underlying Random Forest of the GENIE3 method
(the parameters are the same as those of [sklearn RandomForestRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html))
```
GENIE3_params = {"n_estimators":30,
'max_depth':3}
```
+ Score all the possible edges between Transcription Factors and Target Genes
```
score_matrix = score_links(X, GENIE3, tf, **GENIE3_params)
```
# Visualize the results
```
import matplotlib.pyplot as plt
import seaborn as sns
plt.imshow(score_matrix,aspect="auto")
```
# Rank the links according to their scores
```
from grenadine.Inference.inference import rank_GRN
ranking = rank_GRN(score_matrix)
ranking.head(20)
```
# Evalute the Results
+ Download `DREAM5_NetworkInference_GoldStandard_Network3 - E. coli.tsv` the gold standard dataset from the [dream5 website](https://www.synapse.org/#!Synapse:syn2787213)
+ Load the gold standard
```
grn = pd.read_csv("DREAM5_NetworkInference_GoldStandard_Network3 - E. coli.tsv",sep="\t",header=None)
# Rename the columns and the index
grn.columns = ["TF","TG","IS_REGULATED"]
grn.index = grn["TF"]+"_"+grn["TG"]
# Drop duplicate rows
grn = grn.drop_duplicates()
```
+ Load the `evaluate_result` function
```
from grenadine.Evaluation.evaluation import evaluate_result
metrics = evaluate_result(score_matrix, grn, n_links=100000)
metrics
```
| PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/doc/development/tutorials/todo.rst | Developing a "TODO" extension
=============================
The objective of this tutorial is to create a more comprehensive extension than
that created in :doc:`helloworld`. Whereas that guide just covered writing a
custom :term:`directive`, this guide adds multiple directives, along with custom
nodes, additional config values and custom event handlers. To this end, we will
cover a ``todo`` extension that adds capabilities to include todo entries in the
documentation, and to collect these in a central place. This is similar the
``sphinxext.todo`` extension distributed with Sphinx.
Overview
--------
.. note::
To understand the design of this extension, refer to
:ref:`important-objects` and :ref:`build-phases`.
We want the extension to add the following to Sphinx:
* A ``todo`` directive, containing some content that is marked with "TODO" and
only shown in the output if a new config value is set. Todo entries should not
be in the output by default.
* A ``todolist`` directive that creates a list of all todo entries throughout
the documentation.
For that, we will need to add the following elements to Sphinx:
* New directives, called ``todo`` and ``todolist``.
* New document tree nodes to represent these directives, conventionally also
called ``todo`` and ``todolist``. We wouldn't need new nodes if the new
directives only produced some content representable by existing nodes.
* A new config value ``todo_include_todos`` (config value names should start
with the extension name, in order to stay unique) that controls whether todo
entries make it into the output.
* New event handlers: one for the :event:`doctree-resolved` event, to replace
the todo and todolist nodes, and one for :event:`env-purge-doc` (the reason
for that will be covered later).
Prerequisites
-------------
As with :doc:`helloworld`, we will not be distributing this plugin via PyPI so
once again we need a Sphinx project to call this from. You can use an existing
project or create a new one using :program:`sphinx-quickstart`.
We assume you are using separate source (:file:`source`) and build
(:file:`build`) folders. Your extension file could be in any folder of your
project. In our case, let's do the following:
#. Create an :file:`_ext` folder in :file:`source`
#. Create a new Python file in the :file:`_ext` folder called :file:`todo.py`
Here is an example of the folder structure you might obtain:
.. code-block:: text
└── source
├── _ext
│ └── todo.py
├── _static
├── conf.py
├── somefolder
├── index.rst
├── somefile.rst
└── someotherfile.rst
Writing the extension
---------------------
Open :file:`todo.py` and paste the following code in it, all of which we will
explain in detail shortly:
.. literalinclude:: examples/todo.py
:language: python
:linenos:
This is far more extensive extension than the one detailed in :doc:`helloworld`,
however, we will will look at each piece step-by-step to explain what's
happening.
.. rubric:: The node classes
Let's start with the node classes:
.. literalinclude:: examples/todo.py
:language: python
:linenos:
:lines: 8-21
Node classes usually don't have to do anything except inherit from the standard
docutils classes defined in :mod:`docutils.nodes`. ``todo`` inherits from
``Admonition`` because it should be handled like a note or warning, ``todolist``
is just a "general" node.
.. note::
Many extensions will not have to create their own node classes and work fine
with the nodes already provided by `docutils
<http://docutils.sourceforge.net/docs/ref/doctree.html>`__ and :ref:`Sphinx
<nodes>`.
.. attention::
It is important to know that while you can extend Sphinx without
leaving your ``conf.py``, if you declare an inherited node right
there, you'll hit an unobvious :py:class:`PickleError`. So if
something goes wrong, please make sure that you put inherited nodes
into a separate Python module.
For more details, see:
- https://github.com/sphinx-doc/sphinx/issues/6751
- https://github.com/sphinx-doc/sphinx/issues/1493
- https://github.com/sphinx-doc/sphinx/issues/1424
.. rubric:: The directive classes
A directive class is a class deriving usually from
:class:`docutils.parsers.rst.Directive`. The directive interface is also
covered in detail in the `docutils documentation`_; the important thing is that
the class should have attributes that configure the allowed markup, and a
``run`` method that returns a list of nodes.
Looking first at the ``TodolistDirective`` directive:
.. literalinclude:: examples/todo.py
:language: python
:linenos:
:lines: 24-27
It's very simple, creating and returning an instance of our ``todolist`` node
class. The ``TodolistDirective`` directive itself has neither content nor
arguments that need to be handled. That brings us to the ``TodoDirective``
directive:
.. literalinclude:: examples/todo.py
:language: python
:linenos:
:lines: 30-53
Several important things are covered here. First, as you can see, we're now
subclassing the :class:`~sphinx.util.docutils.SphinxDirective` helper class
instead of the usual :class:`~docutils.parsers.rst.Directive` class. This
gives us access to the :ref:`build environment instance <important-objects>`
using the ``self.env`` property. Without this, we'd have to use the rather
convoluted ``self.state.document.settings.env``. Then, to act as a link target
(from ``TodolistDirective``), the ``TodoDirective`` directive needs to return a
target node in addition to the ``todo`` node. The target ID (in HTML, this will
be the anchor name) is generated by using ``env.new_serialno`` which returns a
new unique integer on each call and therefore leads to unique target names. The
target node is instantiated without any text (the first two arguments).
On creating admonition node, the content body of the directive are parsed using
``self.state.nested_parse``. The first argument gives the content body, and
the second one gives content offset. The third argument gives the parent node
of parsed result, in our case the ``todo`` node. Following this, the ``todo``
node is added to the environment. This is needed to be able to create a list of
all todo entries throughout the documentation, in the place where the author
puts a ``todolist`` directive. For this case, the environment attribute
``todo_all_todos`` is used (again, the name should be unique, so it is prefixed
by the extension name). It does not exist when a new environment is created, so
the directive must check and create it if necessary. Various information about
the todo entry's location are stored along with a copy of the node.
In the last line, the nodes that should be put into the doctree are returned:
the target node and the admonition node.
The node structure that the directive returns looks like this::
+--------------------+
| target node |
+--------------------+
+--------------------+
| todo node |
+--------------------+
\__+--------------------+
| admonition title |
+--------------------+
| paragraph |
+--------------------+
| ... |
+--------------------+
.. rubric:: The event handlers
Event handlers are one of Sphinx's most powerful features, providing a way to
do hook into any part of the documentation process. There are many events
provided by Sphinx itself, as detailed in :ref:`the API guide <events>`, and
we're going to use a subset of them here.
Let's look at the event handlers used in the above example. First, the one for
the :event:`env-purge-doc` event:
.. literalinclude:: examples/todo.py
:language: python
:linenos:
:lines: 56-61
Since we store information from source files in the environment, which is
persistent, it may become out of date when the source file changes. Therefore,
before each source file is read, the environment's records of it are cleared,
and the :event:`env-purge-doc` event gives extensions a chance to do the same.
Here we clear out all todos whose docname matches the given one from the
``todo_all_todos`` list. If there are todos left in the document, they will be
added again during parsing.
The other handler belongs to the :event:`doctree-resolved` event:
.. literalinclude:: examples/todo.py
:language: python
:linenos:
:lines: 64-103
The :event:`doctree-resolved` event is emitted at the end of :ref:`phase 3
(resolving) <build-phases>` and allows custom resolving to be done. The handler
we have written for this event is a bit more involved. If the
``todo_include_todos`` config value (which we'll describe shortly) is false,
all ``todo`` and ``todolist`` nodes are removed from the documents. If not,
``todo`` nodes just stay where and how they are. ``todolist`` nodes are
replaced by a list of todo entries, complete with backlinks to the location
where they come from. The list items are composed of the nodes from the
``todo`` entry and docutils nodes created on the fly: a paragraph for each
entry, containing text that gives the location, and a link (reference node
containing an italic node) with the backreference. The reference URI is built
by :meth:`sphinx.builders.Builder.get_relative_uri`` which creates a suitable
URI depending on the used builder, and appending the todo node's (the target's)
ID as the anchor name.
.. rubric:: The ``setup`` function
.. currentmodule:: sphinx.application
As noted :doc:`previously <helloworld>`, the ``setup`` function is a requirement
and is used to plug directives into Sphinx. However, we also use it to hook up
the other parts of our extension. Let's look at our ``setup`` function:
.. literalinclude:: examples/todo.py
:language: python
:linenos:
:lines: 106-
The calls in this function refer to the classes and functions we added earlier.
What the individual calls do is the following:
* :meth:`~Sphinx.add_config_value` lets Sphinx know that it should recognize the
new *config value* ``todo_include_todos``, whose default value should be
``False`` (this also tells Sphinx that it is a boolean value).
If the third argument was ``'html'``, HTML documents would be full rebuild if the
config value changed its value. This is needed for config values that
influence reading (build :ref:`phase 1 (reading) <build-phases>`).
* :meth:`~Sphinx.add_node` adds a new *node class* to the build system. It also
can specify visitor functions for each supported output format. These visitor
functions are needed when the new nodes stay until :ref:`phase 4 (writing)
<build-phases>`. Since the ``todolist`` node is always replaced in
:ref:`phase 3 (resolving) <build-phases>`, it doesn't need any.
* :meth:`~Sphinx.add_directive` adds a new *directive*, given by name and class.
* Finally, :meth:`~Sphinx.connect` adds an *event handler* to the event whose
name is given by the first argument. The event handler function is called
with several arguments which are documented with the event.
With this, our extension is complete.
Using the extension
-------------------
As before, we need to enable the extension by declaring it in our
:file:`conf.py` file. There are two steps necessary here:
#. Add the :file:`_ext` directory to the `Python path`_ using
``sys.path.append``. This should be placed at the top of the file.
#. Update or create the :confval:`extensions` list and add the extension file
name to the list
In addition, we may wish to set the ``todo_include_todos`` config value. As
noted above, this defaults to ``False`` but we can set it explicitly.
For example:
.. code-block:: python
import os
import sys
sys.path.append(os.path.abspath("./_ext"))
extensions = ['todo']
todo_include_todos = False
You can now use the extension throughout your project. For example:
.. code-block:: rst
:caption: index.rst
Hello, world
============
.. toctree::
somefile.rst
someotherfile.rst
Hello world. Below is the list of TODOs.
.. todolist::
.. code-block:: rst
:caption: somefile.rst
foo
===
Some intro text here...
.. todo:: Fix this
.. code-block:: rst
:caption: someotherfile.rst
bar
===
Some more text here...
.. todo:: Fix that
Because we have configured ``todo_include_todos`` to ``False``, we won't
actually see anything rendered for the ``todo`` and ``todolist`` directives.
However, if we toggle this to true, we will see the output described
previously.
Further reading
---------------
For more information, refer to the `docutils`_ documentation and
:doc:`/extdev/index`.
.. _docutils: http://docutils.sourceforge.net/docs/
.. _Python path: https://docs.python.org/3/using/cmdline.html#envvar-PYTHONPATH
.. _docutils documentation: http://docutils.sourceforge.net/docs/ref/rst/directives.html
| PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/node_modules/bower/packages/bower-registry-client/Client.js | var async = require('async');
var methods = require('./lib');
var Cache = require('./lib/util/Cache');
function RegistryClient(config, logger) {
this._logger = logger;
this._config = config;
if (!this._config.registry) {
throw new Error(
'You need to pass config as read by bower-config module. Registry field is missing.'
);
}
// Cache defaults to storage registry
if (!Object.prototype.hasOwnProperty.call(this._config, 'cache')) {
this._config.cache = this._config.storage
? this._config.storage.registry
: null;
}
// Init the cache
this._initCache();
}
// Add every method to the prototype
RegistryClient.prototype.lookup = methods.lookup;
RegistryClient.prototype.search = methods.search;
RegistryClient.prototype.list = methods.list;
RegistryClient.prototype.register = methods.register;
RegistryClient.prototype.unregister = methods.unregister;
RegistryClient.prototype.clearCache = function(name, callback) {
if (typeof name === 'function') {
callback = name;
name = null;
}
async.parallel(
[
this.lookup.clearCache.bind(this, name),
this.search.clearCache.bind(this, name),
this.list.clearCache.bind(this)
],
callback
);
};
RegistryClient.prototype.resetCache = function(name) {
this.lookup.resetCache.call(this, name);
this.search.resetCache.call(this, name);
this.list.resetCache.call(this);
return this;
};
RegistryClient.clearRuntimeCache = function() {
Cache.clearRuntimeCache();
};
// -----------------------------
RegistryClient.prototype._initCache = function() {
var cache;
var dir = this._config.cache;
// Cache is stored/retrieved statically to ensure singularity
// among instances
cache = this.constructor._cache = this.constructor._cache || {};
this._cache = cache[dir] = cache[dir] || {};
this.lookup.initCache.call(this);
this.search.initCache.call(this);
this.list.initCache.call(this);
};
module.exports = RegistryClient; | PypiClean |
/Mathics-1.0.tar.gz/Mathics-1.0/mathics/web/media/js/inout.js | function showSave() {
requireLogin("You must login to save worksheets online.", function() {
showPopup($('save'));
});
}
function openWorksheet(name) {
hidePopup();
new Ajax.Request('/ajax/open/', {
method: 'post',
parameters: {
'name': name
},
onSuccess: function(transport) {
var response = transport.responseText.evalJSON();
if ($('document').visible())
setContent(response.content);
else
$('codetext').value = response.content;
}
})
}
function showOpen() {
requireLogin("You must login to open online worksheets.", function() {
new Ajax.Request('/ajax/getworksheets/', {
method: 'get',
onSuccess: function(transport) {
var response = transport.responseText.evalJSON();
var tbody = $('openFilelist');
tbody.deleteChildNodes();
response.worksheets.each(function(worksheet) {
tbody.appendChild($E('tr', $E('td',
$E('a', {'href': 'javascript:openWorksheet("' + worksheet.name + '")'},
$T(worksheet.name)
)
)));
});
showPopup($('open'));
}
});
});
}
function cancelSave() {
hidePopup();
}
function cancelOpen() {
hidePopup();
}
function save(overwrite) {
if (!overwrite)
overwrite = '';
var content;
if ($('document').visible())
content = getContent();
else
content = $('codetext').value;
submitForm('saveForm', '/ajax/save/', function(response) {
if (!checkLogin(response))
return;
cancelSave();
if (response.result == 'overwrite') {
showDialog("Overwrite worksheet", "There already exists a worksheet with the name '" +
response.form.values.name + "'. Do you want to overwrite it?",
'Yes, overwrite it', 'No, cancel', function() {
save(true);
});
}
}, {
'content': content,
'overwrite': overwrite
});
}
function switchCode() {
if ($('document').visible()) {
$('document').hide();
var content = getContent();
$('codetext').value = content;
$('code').show();
$('codelink').setText("Interactive mode");
} else {
var content = $('codetext').value;
setContent(content);
function load() {
$('code').hide();
$('document').show();
$('codelink').setText("View/edit code");
}
load();
}
}
function getContent() {
var queries = [];
$('queries').childElements().each(function(query) {
var item = {};
var textarea = query.select('textarea.request')[0];
item.request = textarea.value;
item.results = textarea.results;
queries.push(item);
});
var content = Object.toJSON(queries);
return content;
}
function setContent(content) {
$('queries').deleteChildNodes();
$('welcome').hide();
var queries = content.evalJSON();
queries.each(function(item) {
var li = createQuery(null, true, true);
li.textarea.value = item.request;
if( item.results != undefined ) {
setResult(li.ul, item.results);
li.textarea.results = item.results;
}
});
createSortable();
refreshInputSizes();
lastFocus = null;
if ($('queries').lastChild)
$('queries').lastChild.textarea.focus();
}
function createLink() {
var queries = new Array();
$('queries').childElements().each(function(query) {
var text = query.select('textarea.request')[0].getText();
queries[queries.length] = 'queries=' + encodeURIComponent(text);
});
var query = queries.join('&');
location.hash = '#' + btoa(query); //encodeURI(query);
}
function setQueries(queries) {
var list = [];
queries.each(function(query) {
var li = createQuery(null, true, true);
li.textarea.value = query;
list.push({'li': li, 'query': query});
});
refreshInputSizes();
function load(index) {
if (index < list.length) {
var item = list[index];
submitQuery(item.li.textarea, function() {
load(index + 1);
});
} else {
createSortable();
lastFocus = null;
if ($('queries').lastChild)
$('queries').lastChild.textarea.focus();
}
}
load(0);
}
function loadLink() {
var hash = location.hash;
if (hash && hash.length > 1) {
var params = atob(hash.slice(1)).split('&');
var queries = [];
params.each(function(param) {
if (param.startsWith('queries=')) {
param = param.slice(8);
param = decodeURIComponent(param);
if (param != "")
queries.push(param);
}
});
setQueries(queries);
return queries.length > 0;
} else
return false;
}
function showGallery() {
setQueries([
'1 + 2 - x * 3 x / y',
'Sin[Pi]',
'Plot[{Sin[x], Cos[x], Tan[x]}, {x, -3Pi, 3Pi}]',
'Plot3D[Exp[x] Cos[y], {x, -2, 1}, {y, -Pi, 2 Pi}]',
'translate[graphics_, {dx_,dy_,dz_}] := graphics /. Sphere[{x_,y_,z_}, r_] -> Sphere[{x+dx, y+dy, z+dz}, r]',
'sierpinski[block_, size_] := translate[block, #*size*2]& /@ {{0,0,.6124}, {-.2886,-.5,-.204}, {-.2886,.5,-.204}, {.5774,0,-.204}}',
'Graphics3D[{Yellow, First[Nest[{sierpinski[First[#], Last[#]], Last[#]*2}&, {Sphere[{0,0,0}, 1], 1}, 3]]}]',
'N[E, 30]',
'D[Sin[2x] + Log[x] ^ 2, x]',
'Integrate[Tan[x] ^ 5, x]',
'A = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; MatrixForm[A]',
'LinearSolve[A, {1, 1, 1}] // MatrixForm',
'Eigenvalues[A]',
'# ^ 2 & /@ Range[10]',
'Graphics[Table[{EdgeForm[{GrayLevel[0, 0.5]}], Hue[(-11+q+10r)/72, 1, 1, 0.6], Disk[(8-r){Cos[2Pi q/12], Sin [2Pi q/12]}, (8-r)/3]}, {r, 6}, {q, 12}]]'
]);
} | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/core/mail/backends/filebased.py |
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import (
EmailBackend as ConsoleEmailBackend,
)
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, file_path=None, **kwargs):
self._fname = None
if file_path is not None:
self.file_path = file_path
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH', None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, str):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is a directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured(
'Path for saving email messages exists, but is not a directory: %s' % self.file_path
)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError as err:
raise ImproperlyConfigured(
'Could not create directory for saving email messages: %s (%s)' % (self.file_path, err)
)
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super().__init__(*args, **kwargs)
def write_message(self, message):
self.stream.write(message.message().as_bytes() + b'\n')
self.stream.write(b'-' * 79)
self.stream.write(b'\n')
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'ab')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None | PypiClean |
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/box_coders/faster_rcnn_box_coder.py | import tensorflow.compat.v1 as tf
from object_detection.core import box_coder
from object_detection.core import box_list
EPSILON = 1e-8
class FasterRcnnBoxCoder(box_coder.BoxCoder):
"""Faster RCNN box coder."""
def __init__(self, scale_factors=None):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.log(w / wa)
th = tf.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/ColorPalette.js | require({cache:{"url:dijit/templates/ColorPalette.html":"<div class=\"dijitInline dijitColorPalette\">\n\t<table dojoAttachPoint=\"paletteTableNode\" class=\"dijitPaletteTable\" cellSpacing=\"0\" cellPadding=\"0\" role=\"grid\">\n\t\t<tbody data-dojo-attach-point=\"gridNode\"></tbody>\n\t</table>\n</div>\n"}});
define("dijit/ColorPalette",["require","dojo/text!./templates/ColorPalette.html","./_Widget","./_TemplatedMixin","./_PaletteMixin","dojo/i18n","dojo/_base/Color","dojo/_base/declare","dojo/dom-class","dojo/dom-construct","dojo/_base/window","dojo/string","dojo/i18n!dojo/nls/colors","dojo/colors"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,_a,_b,_c){
var _d=_8("dijit.ColorPalette",[_3,_4,_5],{palette:"7x10",_palettes:{"7x10":[["white","seashell","cornsilk","lemonchiffon","lightyellow","palegreen","paleturquoise","lightcyan","lavender","plum"],["lightgray","pink","bisque","moccasin","khaki","lightgreen","lightseagreen","lightskyblue","cornflowerblue","violet"],["silver","lightcoral","sandybrown","orange","palegoldenrod","chartreuse","mediumturquoise","skyblue","mediumslateblue","orchid"],["gray","red","orangered","darkorange","yellow","limegreen","darkseagreen","royalblue","slateblue","mediumorchid"],["dimgray","crimson","chocolate","coral","gold","forestgreen","seagreen","blue","blueviolet","darkorchid"],["darkslategray","firebrick","saddlebrown","sienna","olive","green","darkcyan","mediumblue","darkslateblue","darkmagenta"],["black","darkred","maroon","brown","darkolivegreen","darkgreen","midnightblue","navy","indigo","purple"]],"3x4":[["white","lime","green","blue"],["silver","yellow","fuchsia","navy"],["gray","red","purple","black"]]},templateString:_2,baseClass:"dijitColorPalette",_dyeFactory:function(_e,_f,col){
return new this._dyeClass(_e,_f,col);
},buildRendering:function(){
this.inherited(arguments);
this._dyeClass=_8(_d._Color,{hc:_9.contains(_b.body(),"dijit_a11y"),palette:this.palette});
this._preparePalette(this._palettes[this.palette],_6.getLocalization("dojo","colors",this.lang));
}});
_d._Color=_8("dijit._Color",_7,{template:"<span class='dijitInline dijitPaletteImg'>"+"<img src='${blankGif}' alt='${alt}' class='dijitColorPaletteSwatch' style='background-color: ${color}'/>"+"</span>",hcTemplate:"<span class='dijitInline dijitPaletteImg' style='position: relative; overflow: hidden; height: 12px; width: 14px;'>"+"<img src='${image}' alt='${alt}' style='position: absolute; left: ${left}px; top: ${top}px; ${size}'/>"+"</span>",_imagePaths:{"7x10":_1.toUrl("./themes/a11y/colors7x10.png"),"3x4":_1.toUrl("./themes/a11y/colors3x4.png")},constructor:function(_10,row,col){
this._alias=_10;
this._row=row;
this._col=col;
this.setColor(_7.named[_10]);
},getValue:function(){
return this.toHex();
},fillCell:function(_11,_12){
var _13=_c.substitute(this.hc?this.hcTemplate:this.template,{color:this.toHex(),blankGif:_12,alt:this._alias,image:this._imagePaths[this.palette].toString(),left:this._col*-20-5,top:this._row*-20-5,size:this.palette=="7x10"?"height: 145px; width: 206px":"height: 64px; width: 86px"});
_a.place(_13,_11);
}});
return _d;
}); | PypiClean |
/CANberry-0.4.tar.gz/CANberry-0.4/canberry/bower_components/jquery/src/css.js | define([
"./core",
"./var/pnum",
"./core/access",
"./css/var/rmargin",
"./css/var/rnumnonpx",
"./css/var/cssExpand",
"./css/var/isHidden",
"./css/var/getStyles",
"./css/curCSS",
"./css/defaultDisplay",
"./css/addGetHookIf",
"./css/support",
"./data/var/data_priv",
"./core/init",
"./css/swap",
"./core/ready",
"./selector" // contains
], function( jQuery, pnum, access, rmargin, rnumnonpx, cssExpand, isHidden,
getStyles, curCSS, defaultDisplay, addGetHookIf, support, data_priv ) {
var
// Swappable if display is none or starts with table except "table", "table-cell", or "table-caption"
// See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
rdisplayswap = /^(none|table(?!-c[ea]).+)/,
rnumsplit = new RegExp( "^(" + pnum + ")(.*)$", "i" ),
rrelNum = new RegExp( "^([+-])=(" + pnum + ")", "i" ),
cssShow = { position: "absolute", visibility: "hidden", display: "block" },
cssNormalTransform = {
letterSpacing: "0",
fontWeight: "400"
},
cssPrefixes = [ "Webkit", "O", "Moz", "ms" ];
// Return a css property mapped to a potentially vendor prefixed property
function vendorPropName( style, name ) {
// Shortcut for names that are not vendor prefixed
if ( name in style ) {
return name;
}
// Check for vendor prefixed names
var capName = name[0].toUpperCase() + name.slice(1),
origName = name,
i = cssPrefixes.length;
while ( i-- ) {
name = cssPrefixes[ i ] + capName;
if ( name in style ) {
return name;
}
}
return origName;
}
function setPositiveNumber( elem, value, subtract ) {
var matches = rnumsplit.exec( value );
return matches ?
// Guard against undefined "subtract", e.g., when used as in cssHooks
Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) :
value;
}
function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) {
var i = extra === ( isBorderBox ? "border" : "content" ) ?
// If we already have the right measurement, avoid augmentation
4 :
// Otherwise initialize for horizontal or vertical properties
name === "width" ? 1 : 0,
val = 0;
for ( ; i < 4; i += 2 ) {
// Both box models exclude margin, so add it if we want it
if ( extra === "margin" ) {
val += jQuery.css( elem, extra + cssExpand[ i ], true, styles );
}
if ( isBorderBox ) {
// border-box includes padding, so remove it if we want content
if ( extra === "content" ) {
val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
}
// At this point, extra isn't border nor margin, so remove border
if ( extra !== "margin" ) {
val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
}
} else {
// At this point, extra isn't content, so add padding
val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
// At this point, extra isn't content nor padding, so add border
if ( extra !== "padding" ) {
val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
}
}
}
return val;
}
function getWidthOrHeight( elem, name, extra ) {
// Start with offset property, which is equivalent to the border-box value
var valueIsBorderBox = true,
val = name === "width" ? elem.offsetWidth : elem.offsetHeight,
styles = getStyles( elem ),
isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box";
// Some non-html elements return undefined for offsetWidth, so check for null/undefined
// svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285
// MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668
if ( val <= 0 || val == null ) {
// Fall back to computed then uncomputed css if necessary
val = curCSS( elem, name, styles );
if ( val < 0 || val == null ) {
val = elem.style[ name ];
}
// Computed unit is not pixels. Stop here and return.
if ( rnumnonpx.test(val) ) {
return val;
}
// Check for style in case a browser which returns unreliable values
// for getComputedStyle silently falls back to the reliable elem.style
valueIsBorderBox = isBorderBox &&
( support.boxSizingReliable() || val === elem.style[ name ] );
// Normalize "", auto, and prepare for extra
val = parseFloat( val ) || 0;
}
// Use the active box-sizing model to add/subtract irrelevant styles
return ( val +
augmentWidthOrHeight(
elem,
name,
extra || ( isBorderBox ? "border" : "content" ),
valueIsBorderBox,
styles
)
) + "px";
}
function showHide( elements, show ) {
var display, elem, hidden,
values = [],
index = 0,
length = elements.length;
for ( ; index < length; index++ ) {
elem = elements[ index ];
if ( !elem.style ) {
continue;
}
values[ index ] = data_priv.get( elem, "olddisplay" );
display = elem.style.display;
if ( show ) {
// Reset the inline display of this element to learn if it is
// being hidden by cascaded rules or not
if ( !values[ index ] && display === "none" ) {
elem.style.display = "";
}
// Set elements which have been overridden with display: none
// in a stylesheet to whatever the default browser style is
// for such an element
if ( elem.style.display === "" && isHidden( elem ) ) {
values[ index ] = data_priv.access( elem, "olddisplay", defaultDisplay(elem.nodeName) );
}
} else {
hidden = isHidden( elem );
if ( display !== "none" || !hidden ) {
data_priv.set( elem, "olddisplay", hidden ? display : jQuery.css( elem, "display" ) );
}
}
}
// Set the display of most of the elements in a second loop
// to avoid the constant reflow
for ( index = 0; index < length; index++ ) {
elem = elements[ index ];
if ( !elem.style ) {
continue;
}
if ( !show || elem.style.display === "none" || elem.style.display === "" ) {
elem.style.display = show ? values[ index ] || "" : "none";
}
}
return elements;
}
jQuery.extend({
// Add in style property hooks for overriding the default
// behavior of getting and setting a style property
cssHooks: {
opacity: {
get: function( elem, computed ) {
if ( computed ) {
// We should always get a number back from opacity
var ret = curCSS( elem, "opacity" );
return ret === "" ? "1" : ret;
}
}
}
},
// Don't automatically add "px" to these possibly-unitless properties
cssNumber: {
"columnCount": true,
"fillOpacity": true,
"flexGrow": true,
"flexShrink": true,
"fontWeight": true,
"lineHeight": true,
"opacity": true,
"order": true,
"orphans": true,
"widows": true,
"zIndex": true,
"zoom": true
},
// Add in properties whose names you wish to fix before
// setting or getting the value
cssProps: {
"float": "cssFloat"
},
// Get and set the style property on a DOM Node
style: function( elem, name, value, extra ) {
// Don't set styles on text and comment nodes
if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) {
return;
}
// Make sure that we're working with the right name
var ret, type, hooks,
origName = jQuery.camelCase( name ),
style = elem.style;
name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) );
// Gets hook for the prefixed version, then unprefixed version
hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
// Check if we're setting a value
if ( value !== undefined ) {
type = typeof value;
// Convert "+=" or "-=" to relative numbers (#7345)
if ( type === "string" && (ret = rrelNum.exec( value )) ) {
value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) );
// Fixes bug #9237
type = "number";
}
// Make sure that null and NaN values aren't set (#7116)
if ( value == null || value !== value ) {
return;
}
// If a number, add 'px' to the (except for certain CSS properties)
if ( type === "number" && !jQuery.cssNumber[ origName ] ) {
value += "px";
}
// Support: IE9-11+
// background-* props affect original clone's values
if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) {
style[ name ] = "inherit";
}
// If a hook was provided, use that value, otherwise just set the specified value
if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) {
style[ name ] = value;
}
} else {
// If a hook was provided get the non-computed value from there
if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) {
return ret;
}
// Otherwise just get the value from the style object
return style[ name ];
}
},
css: function( elem, name, extra, styles ) {
var val, num, hooks,
origName = jQuery.camelCase( name );
// Make sure that we're working with the right name
name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) );
// Try prefixed name followed by the unprefixed name
hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
// If a hook was provided get the computed value from there
if ( hooks && "get" in hooks ) {
val = hooks.get( elem, true, extra );
}
// Otherwise, if a way to get the computed value exists, use that
if ( val === undefined ) {
val = curCSS( elem, name, styles );
}
// Convert "normal" to computed value
if ( val === "normal" && name in cssNormalTransform ) {
val = cssNormalTransform[ name ];
}
// Make numeric if forced or a qualifier was provided and val looks numeric
if ( extra === "" || extra ) {
num = parseFloat( val );
return extra === true || jQuery.isNumeric( num ) ? num || 0 : val;
}
return val;
}
});
jQuery.each([ "height", "width" ], function( i, name ) {
jQuery.cssHooks[ name ] = {
get: function( elem, computed, extra ) {
if ( computed ) {
// Certain elements can have dimension info if we invisibly show them
// but it must have a current display style that would benefit
return rdisplayswap.test( jQuery.css( elem, "display" ) ) && elem.offsetWidth === 0 ?
jQuery.swap( elem, cssShow, function() {
return getWidthOrHeight( elem, name, extra );
}) :
getWidthOrHeight( elem, name, extra );
}
},
set: function( elem, value, extra ) {
var styles = extra && getStyles( elem );
return setPositiveNumber( elem, value, extra ?
augmentWidthOrHeight(
elem,
name,
extra,
jQuery.css( elem, "boxSizing", false, styles ) === "border-box",
styles
) : 0
);
}
};
});
// Support: Android 2.3
jQuery.cssHooks.marginRight = addGetHookIf( support.reliableMarginRight,
function( elem, computed ) {
if ( computed ) {
return jQuery.swap( elem, { "display": "inline-block" },
curCSS, [ elem, "marginRight" ] );
}
}
);
// These hooks are used by animate to expand properties
jQuery.each({
margin: "",
padding: "",
border: "Width"
}, function( prefix, suffix ) {
jQuery.cssHooks[ prefix + suffix ] = {
expand: function( value ) {
var i = 0,
expanded = {},
// Assumes a single number if not a string
parts = typeof value === "string" ? value.split(" ") : [ value ];
for ( ; i < 4; i++ ) {
expanded[ prefix + cssExpand[ i ] + suffix ] =
parts[ i ] || parts[ i - 2 ] || parts[ 0 ];
}
return expanded;
}
};
if ( !rmargin.test( prefix ) ) {
jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber;
}
});
jQuery.fn.extend({
css: function( name, value ) {
return access( this, function( elem, name, value ) {
var styles, len,
map = {},
i = 0;
if ( jQuery.isArray( name ) ) {
styles = getStyles( elem );
len = name.length;
for ( ; i < len; i++ ) {
map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles );
}
return map;
}
return value !== undefined ?
jQuery.style( elem, name, value ) :
jQuery.css( elem, name );
}, name, value, arguments.length > 1 );
},
show: function() {
return showHide( this, true );
},
hide: function() {
return showHide( this );
},
toggle: function( state ) {
if ( typeof state === "boolean" ) {
return state ? this.show() : this.hide();
}
return this.each(function() {
if ( isHidden( this ) ) {
jQuery( this ).show();
} else {
jQuery( this ).hide();
}
});
}
});
return jQuery;
}); | PypiClean |
/Django-Template-Preprocess-1.0.2.tar.gz/Django-Template-Preprocess-1.0.2/template_preprocess/processor.py | from django.conf import settings
from importlib import import_module
from template_preprocess.util.loader import Loader
from template_preprocess.util.content_type import filename_is_html
def process_sub_template(name, seen_templates):
content = Loader().get_template_content(name)
is_html = filename_is_html(name)
return process_template_content(content,
seen_templates,
subcall=True,
is_html=is_html)
def process_template_content(content,
seen_templates=None,
subcall=False,
is_html=False):
# The basic strategy here is to build the template up to it's full
# included/extended size, then work on the minimizing or precomputing
# content from there. That makes it multi-pass, but it avoids having a
# dependency order.
# If anything fails, just return the original template. Worse case is
# django's default behavior.
if seen_templates is None:
seen_templates = {}
original_content = content
processors = get_processors()
for processor in processors:
try:
method = processor["method"]
only_html = processor["html_only"]
if only_html and not is_html:
continue
content = method(content,
seen_templates=seen_templates,
template_processor=process_sub_template,
)
except Exception as ex:
# We want to return the original template content if there are any
# errors. if we're processing an include/extended template, we
# need to kick it back another level
if subcall:
raise
return original_content
return content
def get_default_config():
return [
{"method": "template_preprocess.process.extends.handle_extends"},
{"method": "template_preprocess.process.includes.handle_includes"},
{"method": "template_preprocess.process.compress_statics.process",
"html_only": True
},
{"method": "template_preprocess.process.html_minify.process",
"html_only": True
},
{"method": "template_preprocess.process.static.handle_static_tag",
"html_only": True
},
# minify won't minify content in <script> tags, so this needs
# to be the last thing done
{"method": "template_preprocess.process.handlebars.process"},
]
def get_processors():
config = getattr(settings,
"TEMPLATE_PREPROCESS_PROCESSORS",
get_default_config())
processors = []
for value in config:
name = value["method"]
module, attr = name.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, str(e)))
try:
method = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a '
'"%s" method' % (module, attr))
processor = {"method": method, "html_only": False}
if "html_only" in value and value["html_only"]:
processor["html_only"] = True
processors.append(processor)
return processors | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_gv.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"a.m.",
"p.m."
],
"DAY": [
"Jedoonee",
"Jelhein",
"Jemayrt",
"Jercean",
"Jerdein",
"Jeheiney",
"Jesarn"
],
"MONTH": [
"Jerrey-geuree",
"Toshiaght-arree",
"Mayrnt",
"Averil",
"Boaldyn",
"Mean-souree",
"Jerrey-souree",
"Luanistyn",
"Mean-fouyir",
"Jerrey-fouyir",
"Mee Houney",
"Mee ny Nollick"
],
"SHORTDAY": [
"Jed",
"Jel",
"Jem",
"Jerc",
"Jerd",
"Jeh",
"Jes"
],
"SHORTMONTH": [
"J-guer",
"T-arree",
"Mayrnt",
"Avrril",
"Boaldyn",
"M-souree",
"J-souree",
"Luanistyn",
"M-fouyir",
"J-fouyir",
"M.Houney",
"M.Nollick"
],
"fullDate": "EEEE dd MMMM y",
"longDate": "dd MMMM y",
"medium": "MMM dd, y HH:mm:ss",
"mediumDate": "MMM dd, y",
"mediumTime": "HH:mm:ss",
"short": "dd/MM/yy HH:mm",
"shortDate": "dd/MM/yy",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u00a3",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "gv",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/docs/aoa/smart.rst | .. _smart:
SMART
=====
*Availability: all but Mac OS*
*Dependency: this plugin uses the optional pySMART Python lib*
This plugin is disable by default, please use the --enable-plugin smart option
to enable it.
.. image:: ../_static/smart.png
Glances displays all the SMART attributes.
How to read the information:
- The first line display the name and model of the device
- The first column is the SMART attribute name
- The second column is the SMART attribute raw value
.. warning::
This plugin needs administrator rights. Please run Glances as root/admin.
| PypiClean |
/BenchExec-3.17.tar.gz/BenchExec-3.17/benchexec/tools/abc.py |
import re
import logging
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for ABC: A System for Sequential Synthesis and Verification
URL: https://people.eecs.berkeley.edu/~alanmi/abc/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("abc", subdir="bin")
def name(self):
return "ABC"
def cmdline(self, executable, options, task, rlimits):
# The default read method in ABC cannot process uninitialized registers properly.
# Therefore, a new read method `&r` (`&read`) is invoked here.
# Currently, `&r` only supports AIGER files (*.aig).
if task.single_input_file.endswith(".aig"):
return [executable] + ["-c", f"&r {task.single_input_file}; &put"] + options
# Files in other formats (e.g. *.blif, *.bench, *.v, ...) are processed with the default read method.
return [executable] + options + [task.single_input_file]
def determine_result(self, run):
"""
@return: status of ABC after executing a run
"""
if run.was_timeout:
return result.RESULT_TIMEOUT
for line in run.output:
if line.startswith("Property proved") or line.startswith(
"Networks are equivalent"
):
return result.RESULT_TRUE_PROP
elif "was asserted in frame" in line or line.startswith(
"Networks are NOT EQUIVALENT"
):
return result.RESULT_FALSE_PROP
elif line.startswith("Networks are UNDECIDED"):
return result.RESULT_UNKNOWN
return result.RESULT_ERROR
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the number after it
# the number can be an integer, a decimal, or a scientific notation
# warn if there are repeated matches (multiple statistics from sequential analysis?)
regex_integer = r"(\d+)"
regex_decimal = r"(\d+\.\d*|\d*\.\d+)"
regex_scinote = r"(\d\.?\d*[Ee][+\-]?\d+)"
regex_pattern = (
re.escape(identifier)
+ r"\s*[:=]?\s*(-?("
+ regex_integer
+ r"|"
+ regex_decimal
+ r"|"
+ regex_scinote
+ r"))(\s|$)"
)
regex = re.compile(regex_pattern)
match = None
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '%s': '%s'",
identifier,
line,
)
return match | PypiClean |
/BIT_framework-0.0.2-py3-none-any.whl/BIT_DL/pytorch/modules/networks/network_base.py | import sys
from typing import Any, Dict, List, Optional, Union
import torch
from torch import nn
from BIT_DL.pytorch.core.layers import get_layer
from BIT_DL.pytorch.hyperparams import HParams
from BIT_DL.pytorch.module_base import ModuleBase
from BIT_DL.pytorch.utils.utils import uniquify_str
__all__ = [
"FeedForwardNetworkBase",
]
class FeedForwardNetworkBase(ModuleBase):
r"""Base class inherited by all feed-forward network classes.
Args:
hparams (dict, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
See :meth:`forward` for the inputs and outputs.
"""
def __init__(self,
hparams: Optional[Union[HParams, Dict[str, Any]]] = None):
super().__init__(hparams)
self._layers = nn.ModuleList()
self._layer_names: List[str] = []
self._layers_by_name: Dict[str, nn.Module] = {}
self._layer_outputs: List[torch.Tensor] = []
self._layer_outputs_by_name: Dict[str, torch.Tensor] = {}
@staticmethod
def default_hparams() -> Dict[str, Any]:
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"name": "NN"
}
"""
return {
"name": "NN"
}
def __repr__(self) -> str:
if len(list(self.modules())) == 1: # only contains `_layers`
return ModuleBase.__repr__(self._layers)
return super().__repr__()
def forward(self, # type: ignore
input: torch.Tensor) -> torch.Tensor:
r"""Feeds forward inputs through the network layers and returns outputs.
Args:
input: The inputs to the network. The requirements on inputs
depends on the first layer and subsequent layers in the
network.
Returns:
The output of the network.
"""
outputs = input
for layer in self._layers:
outputs = layer(outputs)
return outputs
def append_layer(self, layer: Union[nn.Module, HParams, Dict[str, Any]]):
r"""Appends a layer to the end of the network.
Args:
layer: A subclass of :torch_nn:`Module`, or a dict of layer
hyperparameters.
"""
layer_ = layer
if not isinstance(layer_, nn.Module):
layer_ = get_layer(hparams=layer_)
self._layers.append(layer_)
layer_name = uniquify_str(layer_.__class__.__name__, self._layer_names)
self._layer_names.append(layer_name)
self._layers_by_name[layer_name] = layer_
def has_layer(self, layer_name: str) -> bool:
r"""Returns `True` if the network with the name exists. Returns
`False` otherwise.
Args:
layer_name (str): Name of the layer.
"""
return layer_name in self._layers_by_name
def layer_by_name(self, layer_name: str) -> Optional[nn.Module]:
r"""Returns the layer with the name. Returns `None` if the layer name
does not exist.
Args:
layer_name (str): Name of the layer.
"""
return self._layers_by_name.get(layer_name, None)
@property
def layers_by_name(self) -> Dict[str, nn.Module]:
r"""A dictionary mapping layer names to the layers.
"""
return self._layers_by_name
@property
def layers(self) -> nn.ModuleList:
r"""A list of the layers.
"""
return self._layers
@property
def layer_names(self) -> List[str]:
r"""A list of uniquified layer names.
"""
return self._layer_names
def _build_layers(self,
layers: Optional[nn.ModuleList] = None,
layer_hparams: Optional[List[
Union[HParams, Dict[str, Any]]]] = None):
r"""Builds layers.
Either :attr:`layer_hparams` or :attr:`layers` must be
provided. If both are given, :attr:`layers` will be used.
Args:
layers (optional): A list of layer instances supplied as an instance
of :torch_nn:`ModuleList`.
layer_hparams (optional): A list of layer hparams, each to which
is fed to :func:`~BIT_DL.pytorch.core.layers.get_layer` to create
the layer instance.
"""
if layers is not None:
self._layers = layers
else:
if layer_hparams is None:
raise ValueError(
'Either `layer` or `layer_hparams` is required.')
self._layers = nn.ModuleList()
for _, hparams in enumerate(layer_hparams):
self._layers.append(get_layer(hparams=hparams))
for layer in self._layers:
layer_name = uniquify_str(layer.__class__.__name__,
self._layer_names)
self._layer_names.append(layer_name)
self._layers_by_name[layer_name] = layer | PypiClean |
/IsoScore-1.0.tar.gz/IsoScore-1.0/README.md | # IsoScore
This contains the Python3 implementation of IsoScore, which was originally
introduced in the 2021 paper by William Rudman, Nate Gillman, Taylor Rayne, and
Carsten Eickhoff. IsoScore is a tool which measures how uniformly a point cloud
utilizes the Euclidian space that it sits inside of. See the original paper for more information.
### How to use
The only dependencies are `numpy` and `sklearn`.
```python3
import numpy as np
from IsoScore import IsoScore
# Computing the IsoScore for a fuzzy ball in R^3
point_cloud_isotropic = np.random.normal(size=(3,100))
the_score = IsoScore.IsoScore(point_cloud_isotropic)
print(f"The IsoScore for 100 points sampled from this Gaussian ball in R^3 is {the_score},")
# Computing the IsoScore for points sampled from the line t \mapsto (t, 2t, 3t) in R^3
random_array = np.random.normal(size=100)
point_cloud_anisotropic = np.array([random_array, 2*random_array, 3*random_array])
the_score = IsoScore.IsoScore(point_cloud_anisotropic)
print(f"and the IsoScore for 100 points sampled from this line in R^3 is {the_score}.")
```
### License
This project is licensed under the MIT License.
| PypiClean |
/Downpour-0.2.tar.gz/Downpour-0.2/downpour/core/organizer.py | from downpour.download import Status
from downpour.core import models
from twisted.internet import defer, threads
from time import time
from datetime import datetime
from dateutil.parser import parse as parsedate
import logging, os, re, mimetypes, shutil
mediatypes = {
'audio/music': 'Music',
'audio/podcast': 'Podcasts',
'audio/other': 'Other Audio',
'video/movie': 'Movies',
'video/tv': 'TV Series',
'video/other': 'Other Video',
'image/photos': 'Photos',
'image/other': 'Other Images'
}
media_mimetypes = {
'audio/music': ['audio/'],
'audio/podcast': ['audio/'],
'audio/other': ['audio/'],
'video/movie': ['video/'],
'video/tv': ['video/'],
'video/other': ['video/'],
'image/photos': ['image/'],
'image/other': ['image/']
}
extra_mimetypes = {
'mkv': 'video/x-matroska',
'mka': 'audio/x-matroska',
}
match_patterns = {
'audio/music': [
# Artist - Album/Number - Track Name.ext
re.compile(r'(?P<a>[^/]+?)[ _]*-[ _]*(?P<b>[^/]+)/(?P<t>[0-9]+)[ _]*-[ _]*(?P<n>[^/]+)\.(?P<x>\w+)$', re.IGNORECASE),
# Artist - Album/Track Name.ext
re.compile(r'(?P<a>[^/]+?)[ _]*-[ _]*(?P<b>[^/]+)/(?P<n>[^/]+)\.(?P<x>\w+)$', re.IGNORECASE),
# Artist/Album/Number - Track Name.ext
re.compile(r'(?P<a>[^/]+)/(?P<b>[^/]+)/(?P<t>[0-9]+)[ _]*-[ _]*(?P<n>[^/]+)\.(?P<x>\w+)$', re.IGNORECASE),
# Artist/Album/Track Name.ext
re.compile(r'(?P<a>[^/]+)/(?P<b>[^/]+)/(?P<n>[^/]+)\.(?P<x>\w+)$', re.IGNORECASE),
# Artist - Track Name.ext
re.compile(r'(?P<a>[^/]+?)[ _]*-[ _]*(?P<n>[^/]+)\.(?P<x>\w+)$', re.IGNORECASE),
# Track Name.ext
re.compile(r'(?P<n>[^/]+)\.(?P<x>\w+)$', re.IGNORECASE),
],
'audio/podcast': [
# TODO I don't use podcasts, need to look up examples
],
'audio/other': [
],
'video/movie': [
# Movie Name (2009).avi
# Movie.Name.2009.mp4
# Movie.Name[2009].DVDRIP.XviD.avi
re.compile(r'(?P<n>[^/]+?)\W?[\W\S](?P<y>[0-9]{4})[^/]*\.(?P<x>\w+)$', re.IGNORECASE),
# Movie.Name.DVDRIP.XviD.avi
re.compile(r'(?P<n>[^/]+)\Wdvdrip[^/]*\.(?P<x>\w+)$', re.IGNORECASE),
re.compile(r'(?P<n>[^/]+)\Wb[rd]rip[^/]*\.(?P<x>\w+)$', re.IGNORECASE),
# Movie Name.avi
re.compile(r'(?P<n>[^/]+)\.(?P<x>\w+)$', re.IGNORECASE),
],
'video/tv': [
# s01e01.avi
#re.compile(r's(?P<s>\d{1,2})\W?e(?P<e>\d{1,2}).*\.(?P<x>\w+)$', re.IGNORECASE),
# 01x01.avi
#re.compile(r'(?P<s>\d{1,2})x(?P<e>\d{1,2}).*\.(?P<x>\w+)$', re.IGNORECASE),
# Show Name - Episode Title s01.e01.Episode.Title.avi
re.compile(r'(?P<z>[\w \.]+?)\W*-\W*(?P<n>[\w \.]+?)\W*s(?P<s>\d{1,2})\W?e(?P<e>\d{1,2}).*\.(?P<x>\w+)$', re.IGNORECASE),
# Show.Name.s01.e01.Episode.Title.avi
# Show.Name.s01e01.Episode.Title.avi
# Show_Name.s01e01_Episode_Title.avi
# Show Name - s01e01 - Episode Title.avi
re.compile(r'(?P<z>[\w -\.]+?)\W*s(?P<s>\d{1,2})\W?e(?P<e>\d{1,2})\W*(?P<n>[\w -\.]*\w)?.*\.(?P<x>\w+)$', re.IGNORECASE),
# Show.Name.01x01.Episode.Title.avi
# Show_Name_01x01_Episode_Title.avi
# Show Name - 01x01 - Episode Title.avi
re.compile(r'(?P<z>[\w -\.]+?)\W*(?P<s>\d{1,2})x(?P<e>\d{1,2})\W*(?P<n>[\w -\.]*\w)?.*\.(?P<x>\w+)$', re.IGNORECASE),
# Show Name - s01e01.avi
#re.compile(r'(?P<z>[\w -\.]+?)\W*s(?P<s>\d{1,2})\W?e(?P<e>\d{1,2}).*\.(?P<x>\w+)$', re.IGNORECASE),
# Show Name - 01x01.avi
#re.compile(r'(?P<z>[\w -\.]+?)\W*(?P<s>\d{1,2})x(?P<e>\d{1,2}).*\.(?P<x>\w+)$', re.IGNORECASE),
],
'video/other': [
# Show Name - Title - Date.ext
re.compile(r'(?P<z>[\w \.]+?)\W*-\W*(?P<n>[\w \.]+?)\W*(?P<D>[0-9-\.]{3,}[0-9]).*\.(?P<x>\w+)$', re.IGNORECASE),
# Show Name - Date - Title.ext
re.compile(r'(?P<z>[\w -\.]+?)\W*(?P<D>[0-9-\.]{3,}[0-9])\W*(?P<n>[\w -\.]*\w)?.*\.(?P<x>\w+)$', re.IGNORECASE),
# Show Name - Title.ext
re.compile(r'(?P<z>[\w \.]+?)\W*-\W*(?P<n>[\w \.]+?).*\.(?P<x>\w+)$', re.IGNORECASE),
# Title.ext
re.compile(r'(?P<n>.*)\.(?P<x>\w+)$', re.IGNORECASE),
],
'image/photos': [
],
'image/other': [
]
}
rename_patterns = {
'audio/music': [
'%a/%b/%t - %n.%x',
'%a/%b - %n.%x',
'%a/%n.%x',
'%a - %b/%t - %n.%x',
'%a - %b - %n.%x',
'%a - %n.%x'
],
'audio/podcast': [
'%z/%e - %n - %D.%x',
'%z/%z %y-%m-%d %n.%x',
'%z/%Z.%y.%m.%d.%N.%x',
'%z/%z - %n.%x',
'%z/%Z.%N.%x',
'%n/%n.%x',
'%n/%N.%x',
],
'audio/other': [
],
'video/movie': [
'%n (%y).%x',
'%N(%y).%x',
'%n %x',
'%N.%x'
],
'video/tv': [
'%z/Season %S/%z S%sE%e %n.%x',
'%z/Season %S/%Z.s%s.e%e.%N.%x',
'%z/%z S%sE%e %n.%x',
'%z/%Z.s%s.e%e.%N.%x',
'%z/S%sE%e %n.%x',
'%z/s%s.e%e.%N.%x'
],
'video/other': [
'%z/%z - %y-%m-%d - %n.%x',
'%z/%Z.%y.%m.%d.%N.%x',
'%z/%z - %n.%x',
'%z/%Z.%N.%x',
'%z/%n.%x',
'%z/%N.%x',
'%n.%x',
'%N.%x',
],
'image/photos': [
'%y/%m/%f.%x',
'%y/%m/%d/%f.%x'
],
'image/other': [
]
}
stopwords = [
# Any three-letter (or more) acronym (HDTV, LOL, etc)
re.compile(r'\W[A-Z]{3,}\b.*'),
# DVDRIP/BDRIP tags
re.compile(r'\Wdvdrip\b.*', re.IGNORECASE),
re.compile(r'\Wb[rd]rip\b.*', re.IGNORECASE),
# XviD tags
re.compile(r'\Wxvid\b.*', re.IGNORECASE),
# UNRATED
re.compile(r'\Wunrated\b.*', re.IGNORECASE),
# 1080p/720p
re.compile(r'\[0-9]{3,4}p\b.*', re.IGNORECASE),
]
# Post-process downloads to organize them into media libraries
# This should be _very_ fault-tolerant; it can be run multiple
# times (if a user changes which library they want a download
# to be assigned to, etc) and should handle updating previously
# processed downloads gracefully
def process_download(manager, download, client):
dfr = defer.succeed(True)
library = None
libraries = get_media_libraries(manager.get_libraries())
if download.media_type:
library = libraries[download.media_type]
if not library:
library = models.Library()
library.directory = None
library.pattern = u'%p'
library.keepall = True
if download.imported:
# Already imported
dfr = import_files(download, manager, library, firstRun=False)
else:
# New download
for file in client.get_files():
f = models.File()
f.user = download.user
f.download = download
f.directory = None
f.filename = file['path'].decode('utf8')
f.size = file['size']
f.media_type = download.media_type
f.original_filename = file['path'].decode('utf8')
f.added = time()
download.files.add(f)
dfr = import_files(download, manager, library, firstRun=True)
return dfr
# Copy media into library
def import_files(download, manager, library, firstRun=True):
fmap = {}
dl = []
targetdir = manager.get_library_directory()
if library.directory:
targetdir = '%s/%s' % (targetdir, library.directory)
for file in download.files:
fullpath = file.filename
if firstRun:
fullpath = '%s/%s' % (manager.get_work_directory(download), file.filename)
else:
if file.directory:
fullpath = '%s/%s/%s' % (manager.get_library_directory(), \
file.directory, fullpath)
else:
fullpath = '%s/%s' % (manager.get_library_directory(), fullpath)
# Skip unrecognized media files
if not library.keepall:
mimetype = mimetypes.guess_type(file.filename)[0]
if not mimetype and file.filename.rfind('.') > -1:
ext = file.filename[file.filename.rfind('.') + 1:]
if ext in extra_mimetypes:
mimetype = extra_mimetypes[ext]
matches = sum([1 for m in media_mimetypes[download.media_type] \
if mimetype and mimetype.startswith(m)])
if matches == 0:
download.files.remove(file)
if not firstRun:
os.remove(fullpath)
dir = os.path.dirname(fullpath)
while os.path.exists(dir) and not len(os.listdir(dir)):
os.rmdir(dir)
dir = os.path.dirname(dir)
continue
# Map filename to desired renaming pattern
metadata = get_metadata(file.original_filename, download, fullpath)
dest = pattern_replace(library.pattern, metadata)
if dest:
while dest.find('//') > -1:
dest = dest.replace('//', '/')
else:
continue
# Move file on disk
dfr = None
if not firstRun:
dfr = threads.deferToThread(move_file, \
fullpath, '%s/%s' % (targetdir, dest), trim_empty_dirs=True)
else:
dfr = threads.deferToThread(copy_file, \
fullpath, '%s/%s' % (targetdir, dest))
dfr.addCallback(file_op_complete, download, file, firstRun, \
library.directory, unicode(dest), download.media_type)
dl.append(dfr)
return defer.DeferredList(dl)
# Update database
def file_op_complete(success, download, file, firstRun, newdir, newfile, newtype):
if success:
file.directory = newdir
file.filename = newfile
file.media_type = newtype
elif firstRun:
download.files.remove(file)
def get_metadata(path, source, filename=None):
metadata = {'a': None, 'b': None, 'd': None, 'D': None,
'e': None, 'E': None, 'f': None, 'm': None, 'n': None,
'N': None, 'p': path, 's': None, 'S': None, 't': '1',
'T': '01', 'x': None, 'y': None, 'z': None, 'Z': None}
filename = os.path.basename(path)
pos = filename.rfind('.')
if pos > -1:
metadata['f'] = filename[:pos]
metadata['x'] = filename[pos+1:]
else:
metadata['f'] = filename
# Parse metadata from filename
if source and source.media_type:
for m in match_patterns[source.media_type]:
match = m.search(path)
if match:
metadata.update(match.groupdict())
break;
# Override with real metadata if file exists
if filename and os.access(filename, os.R_OK):
metadata.update(get_file_metadata(filename))
# Source can be either feed or download
name = None
if hasattr(source, 'feed') and source.feed:
name = source.feed.name
elif hasattr(source, 'name'):
name = source.name
normalize_metadata(metadata, name)
return metadata
# TODO Merge in real metadata from hachoir-metadata parser
def get_file_metadata(path):
return {}
def normalize_metadata(metadata, name=None):
if name:
metadata['z'] = name
metadata['Z'] = metadata['z'].replace(' ', '.')
elif metadata['z']:
metadata['z'] = metadata['z'].replace('.', ' ')
metadata['z'] = metadata['z'].replace('_', ' ')
metadata['Z'] = metadata['z'].replace(' ', '.')
elif metadata['Z']:
metadata['Z'] = metadata['Z'].replace(' ', '.')
metadata['Z'] = metadata['Z'].replace('_', '.')
metadata['z'] = metadata['Z'].replace('.', ' ')
elif name:
metadata['z'] = name
metadata['Z'] = metadata['z'].replace(' ', '.')
if metadata['n']:
metadata['n'] = metadata['n'].replace('.', ' ')
metadata['n'] = metadata['n'].replace('_', ' ')
metadata['N'] = metadata['n'].replace(' ', '.')
elif metadata['N']:
metadata['N'] = metadata['N'].replace(' ', '.')
metadata['N'] = metadata['N'].replace('_', '.')
metadata['n'] = metadata['N'].replace('.', ' ')
else:
metadata['n'] = 'Unknown Title'
metadata['N'] = 'Unknown.Title'
for sw in stopwords:
if metadata['z'] and sw.search(metadata['z']):
metadata['z'] = sw.sub('', metadata['z'])
if metadata['Z'] and sw.search(metadata['Z']):
metadata['Z'] = sw.sub('', metadata['Z'])
if metadata['n'] and sw.search(metadata['n']):
metadata['n'] = sw.sub('', metadata['n'])
if metadata['N'] and sw.search(metadata['N']):
metadata['N'] = sw.sub('', metadata['N'])
if metadata['e']:
e = int(metadata['e'])
metadata['e'] = '%02d' % e
metadata['E'] = '%d' % e
if metadata['s']:
s = int(metadata['s'])
metadata['s'] = '%02d' % s
metadata['S'] = '%d' % s
if metadata['D']:
d = parsedate(metadata['D'])
metadata['D'] = d.strftime('%Y-%m-%d')
if not metadata['y']:
metadata['y'] = d.strftime('%Y')
if not metadata['m']:
metadata['m'] = d.strftime('%m')
if not metadata['d']:
metadata['d'] = d.strftime('%d')
elif metadata['y']:
if not metadata['d']:
metadata['d'] = '01'
if not metadata['m']:
metadata['m'] = '01'
metadata['D'] = '%s-%s-%s' % (metadata['y'], metadata['m'], metadata['d'])
def pattern_replace(pattern, values):
for m in values:
if values[m] is None:
pattern = pattern.replace('%' + m, '')
else:
pattern = pattern.replace('%' + m, values[m])
return pattern
def move_file(src, dest, trim_empty_dirs=False):
try:
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
shutil.move(src, dest)
if trim_empty_dirs:
srcdir = os.path.dirname(src)
while os.path.exists(srcdir) and not len(os.listdir(srcdir)):
os.rmdir(srcdir)
srcdir = os.path.dirname(srcdir)
return True
except Exception as e:
return False
def remove_file(file, trim_empty_dirs=False):
try:
os.remove(file)
if trim_empty_dirs:
srcdir = os.path.dirname(src)
while os.path.exists(srcdir) and not len(os.listdir(srcdir)):
os.rmdir(srcdir)
srcdir = os.path.dirname(srcdir)
return True
except Exception as e:
return False
def copy_file(src, dest):
try:
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
shutil.copy(src, dest)
return True
except Exception as e:
return False
def move_files(filemap, trim_empty_dirs=False):
for src in filemap:
dest = filemap[src]
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
shutil.move(src, dest)
srcdir = os.path.dirname(src)
while os.path.exists(srcdir) and not len(os.listdir(srcdir)):
os.rmdir(srcdir)
srcdir = os.path.dirname(srcdir)
def copy_files(filemap):
for src in filemap:
dest = filemap[src]
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
shutil.copy(src, dest)
def get_media_types():
return mediatypes
def get_media_libraries(userlibs):
libraries = {}
for t in mediatypes:
for l in userlibs:
if l.media_type == t:
libraries[t] = l
if not t in libraries:
libraries[t] = None
return libraries
def get_file_patterns():
patterndesc = {}
replacements = {
'a': 'Artist',
'b': 'Album',
'd': '15',
'D': '2009-10-15',
'e': '03',
'f': 'filename',
'E': '3',
'm': '10',
'n': 'Media Title',
'N': 'Media.Title',
'p': 'filename.ext',
's': '01',
'S': '1',
't': '05',
'T': '5',
'y': '2009',
'z': 'Series Name',
'Z': 'Series.Name',
'x': 'ext',
}
for t in rename_patterns:
patterndesc[t] = {}
for p in rename_patterns[t]:
patterndesc[t][p] = pattern_replace(p, replacements)
return patterndesc | PypiClean |
/Monzo%20API-0.3.0.tar.gz/Monzo API-0.3.0/monzo/endpoints/account.py | from __future__ import annotations
from datetime import datetime
from typing import List, Optional
from monzo.authentication import Authentication
from monzo.endpoints.balance import Balance
from monzo.endpoints.monzo import Monzo
from monzo.exceptions import MonzoHTTPError, MonzoPermissionsError
from monzo.helpers import create_date
ACCOUNT_TYPES = [
'uk_retail',
'uk_retail_joint',
]
MONZO_ACCOUNT_TYPES = {
'user_': 'Current Account',
'monzoflex_': 'Flex',
'monzoflexbackingloan_': 'Loan (Flex)',
'loan_': 'Loan',
}
class Account(Monzo):
"""
Class to manage accounts.
Class provides methods to fetch accounts and related information. To properly utilise the
class the fetch class method should be utilised.
"""
__slots__ = ['_account_id', '_auth', '_balance', '_created', '_description', '_has_balance', '_closed']
def __init__(self, auth: Authentication, account_id: str, description: str, created: datetime, closed: bool):
"""
Initialize Account.
Args:
account_id: ID of the account
description: Description of the account
created: Date and time the account was created
closed: Boolean for account status
"""
self._auth: Authentication = auth
self._account_id: str = account_id
self._balance: Optional[Balance] = None
self._created: datetime = created
self._description: str = description
self._has_balance: bool = True
self._closed: bool = closed
super().__init__(auth=auth)
@property
def account_id(self) -> str:
"""
Property for account_id.
Returns:
Account ID for the account
"""
return self._account_id
def account_type(self) -> str:
"""
Property to identify the type of Monzo account.
Returns:
Type of account mapped from MONZO_ACCOUNT_TYPES, default to UNKNOWN
"""
return next(
(
MONZO_ACCOUNT_TYPES[account_type]
for account_type in MONZO_ACCOUNT_TYPES.keys()
if self.description.lower().startswith(account_type)
),
'UNKNOWN',
)
def fetch_balance(self) -> Optional[Balance]:
"""
Fetch the live balance.
This will always carry out an API call to fetch the new balance. If the originally fetched balance is good
enough use the balance property.
Returns:
Balance object
"""
if self._has_balance:
try:
self._balance = Balance.fetch(auth=self._auth, account_id=self._account_id)
except (MonzoHTTPError, MonzoPermissionsError):
self._has_balance = False
return self._balance
@property
def balance(self) -> Optional[Balance]:
"""
Property for balance.
If a balance has not been fetched yet this will trigger a fetch, otherwise it will return the already fetched
balance. To always fetch the live balance use fetch_balance().
Returns:
Balance object
"""
if not self._balance and self._has_balance:
return self.fetch_balance()
return self._balance
@property
def created(self) -> datetime:
"""
Property for created.
Returns:
When the account was created
"""
return self._created
@property
def description(self) -> str:
"""
Property for description.
Returns:
Description for the account
"""
return self._description
@property
def closed(self) -> bool:
"""
Property for closed.
Returns:
Boolean for account status
"""
return self._closed
@classmethod
def fetch(cls, auth: Authentication, account_type: str = '') -> List[Account]:
"""
Implement and instantiates an Account object.
Args:
auth: Monzo authentication object
account_type: Optional type of account required, must be in ACCOUNT_TYPES
Returns:
List of instantiated Account objects
"""
data = {}
if account_type and account_type.lower() in ACCOUNT_TYPES:
data['account_type'] = account_type.lower()
res = auth.make_request(path='/accounts', data=data)
account_list = []
for account_item in res['data']['accounts']:
account = Account(
auth=auth,
account_id=account_item['id'],
description=account_item['description'],
created=create_date(account_item['created']),
closed=account_item['closed'],
)
account_list.append(account)
return account_list | PypiClean |
/NLP_LIB_cpu-0.0.12.tar.gz/NLP_LIB_cpu-0.0.12/NLP_LIB/transforms/bert_sentencepiece_pretrain_wrapper.py | import sys
sys.path.append('.')
import numpy as np
import os
from NLP_LIB.nlp_core.data_transform_wrapper import DataTransformWrapper
import sentencepiece as spm
import random
import tensorflow as tf
import six
from tensorflow.keras import backend as K
from tensorflow.keras.layers import *
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
class BERTSPMExampleBuilder(object):
"""Given a stream of input text, creates pretraining examples."""
def __init__(self, spm_model, cls_id, sep_id, mask_id, max_length):
self._spm_model = spm_model
self._current_sentences = []
self._current_length = 0
self._max_length = max_length
self._target_length = max_length
self.cls_id = cls_id
self.sep_id = sep_id
self.mask_id = mask_id
def add_line(self, line):
print('Add Line: ' + str(line))
"""Adds a line of text to the current example being built."""
line = line.strip().replace("\n", " ")
if (not line) and self._current_length != 0: # empty lines separate docs
return self._create_example()
bert_tokids = self._spm_model.EncodeAsIds(line)
#bert_tokens = self._tokenizer.tokenize(line)
#bert_tokids = self._tokenizer.convert_tokens_to_ids(bert_tokens)
self._current_sentences.append(bert_tokids)
self._current_length += len(bert_tokids)
if self._current_length >= self._target_length:
return self._create_example()
return None
def _create_example(self):
"""Creates a pre-training example from the current list of sentences."""
# small chance to only have one segment as in classification tasks
# Because we have randomness here, we cannot separate file for input/output column
# because it can create different data file for X and Y here!!.
# To keep it in sync, BERT need column_id "0" for both input and output side,
# But we will use "is_input" field in config to diffrentiate logic in transformation instead!
if random.random() < 0.1:
first_segment_target_length = 100000
else:
# -3 due to not yet having [CLS]/[SEP] tokens in the input text
first_segment_target_length = (self._target_length - 3) // 2
first_segment = []
second_segment = []
for sentence in self._current_sentences:
# the sentence goes to the first segment if (1) the first segment is
# empty, (2) the sentence doesn't put the first segment over length or
# (3) 50% of the time when it does put the first segment over length
if (len(first_segment) == 0 or
len(first_segment) + len(sentence) < first_segment_target_length or
(len(second_segment) == 0 and
len(first_segment) < first_segment_target_length and
random.random() < 0.5)):
first_segment += sentence
else:
second_segment += sentence
# trim to max_length while accounting for not-yet-added [CLS]/[SEP] tokens
first_segment = first_segment[:self._max_length - 2]
second_segment = second_segment[:max(0, self._max_length -
len(first_segment) - 3)]
# prepare to start building the next example
self._current_sentences = []
self._current_length = 0
# small chance for random-length instead of max_length-length example
if random.random() < 0.05:
self._target_length = random.randint(5, self._max_length)
else:
self._target_length = self._max_length
return self._make_dict_example(first_segment, second_segment)
def _make_dict_example(self, first_segment, second_segment):
"""Converts two "segments" of text into a tf.train.Example."""
input_ids = [self.cls_id] + first_segment + [self.sep_id]
segment_ids = [0] * len(input_ids)
if second_segment:
input_ids += second_segment + [self.sep_id]
segment_ids += [1] * (len(second_segment) + 1)
input_mask = [1] * len(input_ids)
input_ids += [0] * (self._max_length - len(input_ids))
input_mask += [0] * (self._max_length - len(input_mask))
segment_ids += [0] * (self._max_length - len(segment_ids))
'''
dict_example = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
}
return dict_example
'''
tf_example = tf.train.Example(features=tf.train.Features(feature={
"input_ids": create_int_feature(input_ids),
"input_mask": create_int_feature(input_mask),
"segment_ids": create_int_feature(segment_ids)
}))
return tf_example
'''
class BERTFullDictExampleWriter(object):
"""Writes pre-training examples to disk."""
def __init__(self, job_id, vocab_file, output_dir, max_seq_length,
num_jobs, blanks_separate_docs, do_lower_case,
num_out_files=1000):
self._blanks_separate_docs = blanks_separate_docs
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file,
do_lower_case=do_lower_case)
self._example_builder = BERTFullDictExampleBuilder(tokenizer, max_seq_length)
self._writers = []
for i in range(num_out_files):
if i % num_jobs == job_id:
output_fname = os.path.join(
output_dir, "pretrain_data.tfrecord-{:}-of-{:}".format(
i, num_out_files))
self._writers.append(tf.io.TFRecordWriter(output_fname))
self.n_written = 0
def write_examples(self, input_file):
"""Writes out examples from the provided input file."""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
line = line.strip()
if line or self._blanks_separate_docs:
example = self._example_builder.add_line(line)
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
example = self._example_builder.add_line("")
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
def finish(self):
for writer in self._writers:
writer.close()
'''
class BERTSPMExampleWriter(object):
"""Writes pre-training examples to disk."""
def __init__(self, job_id, spm_model, output_dir, max_seq_length,
num_jobs, blanks_separate_docs, do_lower_case,
cls_id, sep_id, mask_id,
num_out_files=1):
self._blanks_separate_docs = blanks_separate_docs
self._example_builder = BERTSPMExampleBuilder(spm_model, cls_id, sep_id, mask_id, max_seq_length)
self._writers = []
os.makedirs(output_dir)
for i in range(num_out_files):
if i % num_jobs == job_id:
output_fname = os.path.join(
output_dir, "pretrain_data.tfrecord-{:}-of-{:}".format(
i, num_out_files))
self._writers.append(tf.io.TFRecordWriter(output_fname))
self.n_written = 0
def write_examples(self, input_file):
print('*** write_examples: ' + input_file)
"""Writes out examples from the provided input file."""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
line = line.strip()
print('line = ' + str(line))
if line or self._blanks_separate_docs:
example = self._example_builder.add_line(line)
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
example = self._example_builder.add_line("")
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
def finish(self):
print('>>>>>>>>>>>>>>>>> n_written = ' + str(self.n_written))
for writer in self._writers:
writer.close()
class BERTSentencePiecePretrainWrapper(DataTransformWrapper):
# When initialize DataTransformWrapper, we pass configuration and dataset object to constructor.
# For BERT pretrained dataset, we perform encoding at initialization step
# because we need to separate sentence as chunk 1, 2 and add segment id information.
# The encode function is used mainly in inference step only as all text will be in segment 0 only.
def __init__(self, config, dataset):
super(BERTSentencePiecePretrainWrapper, self).__init__(config, dataset)
print('dataset = ' + str(dataset))
column_id = config['column_id']
min_freq = 0
max_dict_size = 15000
if 'max_dict_size' in config and config['max_dict_size'] is not None:
max_dict_size = config['max_dict_size']
self.max_dict_size = max_dict_size
self.sentence_piece_processor = spm.SentencePieceProcessor()
self.trivial_token_separator = dataset.get_trivial_token_separator()
self.max_seq_length = config['max_seq_length']
self.preaggregated_data_path = None
self.preaggregated_validation_data_path = None
self.aggregated_tensors = None
print('Max Dictionary Size = ' + str(max_dict_size))
print('Column ID = ' + str(column_id))
# Step 1: Check and load dict
# Load from dict from cache if possible
local_data_dir = dataset.get_local_data_dir()
print('local_data_dir = ' + str(local_data_dir))
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
local_dict_path_prefix = os.path.join(local_data_dir, 'dict_' +
type(self).__name__ +
'_dict' + str(max_dict_size))
local_dict_vocab_path = local_dict_path_prefix + str(column_id) + '.vocab'
local_dict_model_path = local_dict_path_prefix + str(column_id) + '.model'
local_untokened_data_file = local_dict_path_prefix + str(column_id) + '.untoken'
local_untokened_validation_data_file = local_dict_path_prefix + str(column_id) + '.valid.untoken'
# We ensure that untokenized data file is available because we will use as inputs
# to BERT example writer (For both training and validation dataset)
if not os.path.exists(local_untokened_data_file) or not os.path.exists(local_dict_model_path):
# Create untokened data file
with open(local_untokened_data_file, 'w', encoding='utf-8') as fout:
print('Constructing untokened document')
(x, y, _, _) = dataset.load_as_list()
data = []
if column_id == 0:
data = [x]
elif column_id == 1:
data = [y]
elif column_id == -1:
data = [x, y]
for each_data in data:
for line in each_data:
untokened_line = ''
for word in line:
if len(untokened_line) > 0:
untokened_line = untokened_line + self.trivial_token_separator
untokened_line = untokened_line + word
fout.write(untokened_line + '\n')
# Train sentence piece model (only on training data file)
spm.SentencePieceTrainer.Train('--pad_id=0 --bos_id=2 --eos_id=3 --unk_id=1 --user_defined_symbols=<MASK> --input=' +
local_untokened_data_file +
' --model_prefix=sp --vocab_size=' + str(max_dict_size) + ' --hard_vocab_limit=false')
# Move sp.model / sp.vocab to the dict paths
os.rename("sp.model", local_dict_model_path)
os.rename("sp.vocab", local_dict_vocab_path)
self.sentence_piece_processor.Load(local_dict_model_path)
else:
self.sentence_piece_processor.Load(local_dict_model_path)
if not os.path.exists(local_untokened_validation_data_file):
# Create untokened data file for validation dataset
with open(local_untokened_validation_data_file, 'w', encoding='utf-8') as fout:
print('Constructing untokened document')
(_, _, x, y) = dataset.load_as_list()
data = []
if column_id == 0:
data = [x]
elif column_id == 1:
data = [y]
elif column_id == -1:
data = [x, y]
for each_data in data:
for line in each_data:
untokened_line = ''
for word in line:
if len(untokened_line) > 0:
untokened_line = untokened_line + self.trivial_token_separator
untokened_line = untokened_line + word
fout.write(untokened_line + '\n')
print('Dictionary size = ' +str(self.sentence_piece_processor.GetPieceSize()))
# Step 2: Check and create data as 4 features set
local_data_record_dir = os.path.join(local_data_dir, 'features_' +
type(self).__name__ + '_dict' + str(max_dict_size)) + str(column_id) + '_len' + str(config['max_seq_length'])
self.preaggregated_data_path = local_data_record_dir
if not os.path.exists(local_data_record_dir):
print('[INFO] Start generating TFRecord file from untokenned data file at: ' + local_data_record_dir)
example_writer = BERTSPMExampleWriter(
job_id=0,
spm_model=self.sentence_piece_processor,
output_dir=local_data_record_dir,
max_seq_length=config['max_seq_length'],
num_jobs=1,
blanks_separate_docs=True, # args.blanks_separate_docs,
do_lower_case=True, # args.do_lower_case
cls_id=2,
sep_id=3,
mask_id=4
)
example_writer.write_examples(local_untokened_data_file)
example_writer.finish()
print('[INFO] Finished generating TFRecord (Training Dataset): ' + local_data_record_dir)
local_validation_data_record_dir = os.path.join(local_data_dir, 'features_validation_' +
type(self).__name__ + '_dict' + str(max_dict_size)) + str(column_id) + '_len' + str(config['max_seq_length'])
self.preaggregated_validation_data_path = local_validation_data_record_dir
if not os.path.exists(local_validation_data_record_dir):
print('[INFO] Start generating TFRecord file from untokenned data file at: ' + local_validation_data_record_dir)
example_writer = BERTSPMExampleWriter(
job_id=0,
spm_model=self.sentence_piece_processor,
output_dir=local_validation_data_record_dir,
max_seq_length=config['max_seq_length'],
num_jobs=1,
blanks_separate_docs=True, # args.blanks_separate_docs,
do_lower_case=True, # args.do_lower_case
cls_id=2,
sep_id=3,
mask_id=4
)
example_writer.write_examples(local_untokened_validation_data_file)
example_writer.finish()
print('[INFO] Finished generating TFRecord (Training Dataset): ' + local_validation_data_record_dir)
# Step 3: Mask out some token and store as seperated label file
def startid(self): return 2
def endid(self): return 3
def maskid(self): return 4
# Function used for encode batch of string data into batch of encoded integer
def encode(self, token_list, max_length = 999):
if 'max_seq_length' in self.config:
max_length = self.config['max_seq_length']
mask_last_token = False
if 'mask_last_token' in self.config:
mask_last_token = self.config['mask_last_token']
# This is to force placing special clf_id not exceed specific location (Such as len-1 in decoder only architecture because it trims the last token out)
clf_id = None
clf_pos_offset = None
if 'clf_id' in self.config:
clf_id = self.config['clf_id']
if 'clf_pos_offset' in self.config:
clf_pos_offset = self.config['clf_pos_offset']
'''
"input_ids": create_int_feature(input_ids),
"input_mask": create_int_feature(input_mask),
"segment_ids": create_int_feature(segment_ids)
'''
input_ids = np.zeros((len(token_list), max_length), dtype='int32')
input_mask = np.zeros((len(token_list), max_length), dtype='int32')
segment_ids = np.zeros((len(token_list), max_length), dtype='int32')
input_ids[:,0] = self.startid()
for i, x in enumerate(token_list):
x = x[:max_length - 1]
x = self.trivial_token_separator.join(x).strip()
encoded_x = self.sentence_piece_processor.EncodeAsIds(x)
# sys.stdout.buffer.write(x.encode('utf8'))
# Ensure that we are not
encoded_x = encoded_x[:max_length - 1]
input_ids[i, 1:len(encoded_x) + 1] = encoded_x
# If sentence is not end, then don't add end symbol at the end of encoded tokens
# We have to mask out last token in some case (Language Model). Note that masked token can be endid() (predict end of sequence)
if 1 + len(encoded_x) < max_length:
if mask_last_token:
input_ids[i, 1 + len(encoded_x)] = 0
input_mask[i, 0:1 + len(encoded_x)] = 1
else:
input_ids[i, 1 + len(encoded_x)] = self.endid()
input_mask[i, 0:1 + len(encoded_x)] = 1
else:
if mask_last_token:
input_ids[i, len(encoded_x)] = 0
input_mask[i, 0:len(encoded_x)] = 1
# If clf_pos_offset is specified, we trim data to the length and set clf_id at the position
if clf_pos_offset is not None:
clf_pos = min(1 + len(encoded_x), max_length - 1 + clf_pos_offset)
input_ids[i, clf_pos] = clf_id
input_ids[i, clf_pos + 1:] = 0
# print('Encoded Ids = ' + str(input_ids[i,:]))
X = [
input_ids,
input_mask,
segment_ids,
]
if self.config['is_input'] == True:
return X
else:
return X[0] # We need only 'input_ids' for output side
# Function used for decode batch of integers back to batch of string
def decode(self, id_list):
ret = []
for i, x in enumerate(id_list):
x = [int(n) for n in x]
text = self.sentence_piece_processor.DecodeIds(x)
ret.append(text)
return ret
# Function to return size of dictionary (key size)
def num(self):
return self.sentence_piece_processor.GetPieceSize()
# Function to return list of objects to differentiate cached of input/output that model will use.
# Basically it is configurations that effect encoded data.
def get_data_effected_configs(self):
mask_last_token = False
if 'mask_last_token' in self.config:
mask_last_token = self.config['mask_last_token']
clf_id = None
clf_pos_offset = None
if 'clf_id' in self.config:
clf_id = self.config['clf_id']
if 'clf_pos_offset' in self.config:
clf_pos_offset = self.config['clf_pos_offset']
clf_txt = ''
if clf_pos_offset is not None:
clf_txt = '_clf' + str(clf_id) + 'at' + str(clf_pos_offset)
max_seq_length_txt = ''
if 'max_seq_length' in self.config:
max_seq_length_txt = '_len' + str(self.config['max_seq_length'])
if mask_last_token:
return '_dict' + str(self.max_dict_size) + '_masklast' + clf_txt + max_seq_length_txt
else:
return '_dict' + str(self.max_dict_size) + '_' + clf_txt + max_seq_length_txt
# This function returns dimention of data it consumes.
# Ex: X = int[Count] => return 1
# Ex: X = [int[Count], int[Count]] => return 2
def get_data_dimension(self):
if self.config["is_input"] == True:
return 3 # [input_ids, input_mask, segment_ids]
else:
return 1 # Output also need only 'input_ids' tensors
# Function indicates of the data transform has aggregated transformation applied on raw dataset or not.
# Example is that BERT pretrained data transform will try to batch many lines of text from dataset.load_as_list()
# into single data row to maximize length of tranformed dataset.
# For such case, in model training, we should not use dataset.load_as_list() and call transform.encode one by one row
# but instead we should load already transformed data. The flag is to indicate which loading approach to be used.
# Note that encode/decode function should still be implemented because we will call it in online inference mode
# or non-pretrained mode (ex, during finetuning)
def is_data_preaggregated(self):
if self.config['is_pretrain'] == True:
return True
else:
return False
# If data is pre-aggregated, this function is called to load pre-aggregated data instead of calling dataset.load_as_list().
# Returns from this function should be (X, Y, X_valid, Y_valid) - or generator in future...
def load_preaggregated_data(self):
# Return objects of this function
X = None
Y = None
X_valid = None
Y_valid = None
# Load pre-aggregated training dataset
tfrecord_file_list = os.listdir(self.preaggregated_data_path)
tfrecord_file_list = [os.path.join(self.preaggregated_data_path, k) for k in tfrecord_file_list]
print('Pre-aggregated file list = ' + str(tfrecord_file_list))
reader = tf.TFRecordReader()
key, examples = reader.read(tf.train.string_input_producer(tfrecord_file_list, num_epochs=1)) # Only generate all data once
name_to_features = {
"input_ids": tf.io.FixedLenFeature([self.max_seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([self.max_seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([self.max_seq_length], tf.int64),
}
parsed_example = tf.parse_single_example(examples, name_to_features)
parsed_example_values = list(parsed_example.values())
# Reuse Keras Session
sess = K.get_session()
# Just read all data into array for now.
# TODO: Implment generator to support very large dataset that is not fit into RAM
all_data = []
sess.run(tf.initialize_local_variables())
tf.train.start_queue_runners(sess=sess)
try:
while True:
data = sess.run(parsed_example_values)
for i in range(len(data)):
if len(all_data) <= i:
all_data.append([])
all_data[i].append(data[i])
except tf.errors.OutOfRangeError:
pass
all_data = [np.array(a) for a in all_data]
X = all_data
Y = all_data[0] # Y is only 'input_ids' tensor
K.clear_session() # sess object is not valid anymore after this
# Load pre-aggregated validation dataset
tfrecord_file_list = os.listdir(self.preaggregated_validation_data_path)
tfrecord_file_list = [os.path.join(self.preaggregated_validation_data_path, k) for k in tfrecord_file_list]
print('Pre-aggregated file list = ' + str(tfrecord_file_list))
reader = tf.TFRecordReader()
key, examples = reader.read(tf.train.string_input_producer(tfrecord_file_list, num_epochs=1)) # Only generate all data once
name_to_features = {
"input_ids": tf.io.FixedLenFeature([self.max_seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([self.max_seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([self.max_seq_length], tf.int64),
}
parsed_example = tf.parse_single_example(examples, name_to_features)
parsed_example_values = list(parsed_example.values())
# Reuse Keras Session
sess = K.get_session()
# Just read all data into array for now.
# TODO: Implment generator to support very large dataset that is not fit into RAM
all_data = []
sess.run(tf.initialize_local_variables())
tf.train.start_queue_runners(sess=sess)
try:
while True:
data = sess.run(parsed_example_values)
for i in range(len(data)):
if len(all_data) <= i:
all_data.append([])
all_data[i].append(data[i])
except tf.errors.OutOfRangeError:
pass
all_data = [np.array(a) for a in all_data]
X_valid = all_data
Y_valid = all_data[0] # Y is only 'input_ids' tensor
K.clear_session() # sess object is not valid anymore after this
#print(len(X_valid))
#print(len(Y_valid))
return (X, Y, X_valid, Y_valid)
# Function indicates if there is dynamic preprocessing needed to be applied on data or not.
# Dynamic preprocessing is the logics those will be applied on data at starting of each epoch before feeding into to the model.
# Example for such situation is "BERT" which we want to "mask" some tokens out, but we want it to be dynamically random in each eopch,
# which mean for the same input string, we mask different tokens in each epoch of training.
# This actually can be done once in data pre-aggregation step that create multiply dataset with different mask,
# or can be done here dynamically on-the-fly without need to multiple training data rows.
def is_data_dynamically_aggregated(self):
# We want to perform tokens random masking for input side only...
if self.config["is_input"] == True:
return True
else:
return False # Output also need only 'input_ids' tensors
def scatter_update(self, sequence, updates, positions):
"""Scatter-update a sequence.
Args:
sequence: A [batch_size, seq_len] or [batch_size, seq_len, depth] tensor
updates: A tensor of size batch_size*seq_len(*depth)
positions: A [batch_size, n_positions] tensor
Returns: A tuple of two tensors. First is a [batch_size, seq_len] or
[batch_size, seq_len, depth] tensor of "sequence" with elements at
"positions" replaced by the values at "updates." Updates to index 0 are
ignored. If there are duplicated positions the update is only applied once.
Second is a [batch_size, seq_len] mask tensor of which inputs were updated.
"""
shape = self.get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
sequence = tf.expand_dims(sequence, -1)
N = self.get_shape_list(positions)[1]
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(positions + shift, [-1, 1])
flat_updates = tf.reshape(updates, [-1, D])
updates = tf.scatter_nd(flat_positions, flat_updates, [B * L, D])
updates = tf.reshape(updates, [B, L, D])
flat_updates_mask = tf.ones([B * N], tf.int32)
updates_mask = tf.scatter_nd(flat_positions, flat_updates_mask, [B * L])
updates_mask = tf.reshape(updates_mask, [B, L])
not_first_token = tf.concat([tf.zeros((B, 1), tf.int32),
tf.ones((B, L - 1), tf.int32)], -1)
updates_mask *= not_first_token
updates_mask_3d = tf.expand_dims(updates_mask, -1)
# account for duplicate positions
if sequence.dtype == tf.float32:
updates_mask_3d = tf.cast(updates_mask_3d, tf.float32)
updates /= tf.maximum(1.0, updates_mask_3d)
else:
assert sequence.dtype == tf.int32
updates = tf.math.floordiv(updates, tf.maximum(1, updates_mask_3d))
updates_mask = tf.minimum(updates_mask, 1)
updates_mask_3d = tf.minimum(updates_mask_3d, 1)
updated_sequence = (((1 - updates_mask_3d) * sequence) +
(updates_mask_3d * updates))
if not depth_dimension:
updated_sequence = tf.squeeze(updated_sequence, -1)
return updated_sequence, updates_mask
def _get_candidates_mask(self, all_inputs,
disallow_from_mask=None):
"""Returns a mask tensor of positions in the input that can be masked out."""
input_ids, input_mask, segment_ids = all_inputs
ignore_ids = [self.startid(), self.endid(), self.maskid()]
candidates_mask = tf.ones_like(input_ids, tf.bool)
for ignore_id in ignore_ids:
candidates_mask &= tf.not_equal(input_ids, ignore_id)
candidates_mask &= tf.cast(input_mask, tf.bool)
if disallow_from_mask is not None:
candidates_mask &= ~disallow_from_mask
return candidates_mask
def assert_rank(self, tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def get_shape_list(self, tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
if name is None:
name = tensor.name
if expected_rank is not None:
self.assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def mask(self, max_predictions_per_seq,
all_inputs, mask_prob, proposal_distribution=1.0,
disallow_from_mask=None, already_masked=None):
"""Implementation of dynamic masking. The optional arguments aren't needed for
BERT/ELECTRA and are from early experiments in "strategically" masking out
tokens instead of uniformly at random.
Args:
config: configure_pretraining.PretrainingConfig
inputs: pretrain_data.Inputs containing input input_ids/input_mask
mask_prob: percent of tokens to mask
proposal_distribution: for non-uniform masking can be a [B, L] tensor
of scores for masking each position.
disallow_from_mask: a boolean tensor of [B, L] of positions that should
not be masked out
already_masked: a boolean tensor of [B, N] of already masked-out tokens
for multiple rounds of masking
Returns: a pretrain_data.Inputs with masking added
"""
input_ids, input_mask, segment_ids = all_inputs
# Get the batch size, sequence length, and max masked-out tokens
N = max_predictions_per_seq
B, L = self.get_shape_list(input_ids)
# Find indices where masking out a token is allowed
candidates_mask = self._get_candidates_mask(all_inputs, disallow_from_mask)
# Set the number of tokens to mask out per example
num_tokens = tf.cast(tf.reduce_sum(input_mask, -1), tf.float32)
num_to_predict = tf.maximum(1, tf.minimum(
N, tf.cast(tf.round(num_tokens * mask_prob), tf.int32)))
masked_lm_weights = tf.cast(tf.sequence_mask(num_to_predict, N), tf.float32)
if already_masked is not None:
masked_lm_weights *= (1 - already_masked)
# Get a probability of masking each position in the sequence
candidate_mask_float = tf.cast(candidates_mask, tf.float32)
sample_prob = (proposal_distribution * candidate_mask_float)
sample_prob /= tf.reduce_sum(sample_prob, axis=-1, keepdims=True)
# Sample the positions to mask out
sample_prob = tf.stop_gradient(sample_prob)
sample_logits = tf.log(sample_prob)
masked_lm_positions = tf.random.categorical(
sample_logits, N, dtype=tf.int32)
masked_lm_positions *= tf.cast(masked_lm_weights, tf.int32)
# Get the ids of the masked-out tokens
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(masked_lm_positions + shift, [-1, 1])
masked_lm_ids = tf.gather_nd(tf.reshape(input_ids, [-1]),
flat_positions)
masked_lm_ids = tf.reshape(masked_lm_ids, [B, -1])
masked_lm_ids *= tf.cast(masked_lm_weights, tf.int32)
# Update the input ids
replace_with_mask_positions = masked_lm_positions * tf.cast(
tf.less(tf.random.uniform([B, N]), 0.85), tf.int32)
inputs_ids, _ = self.scatter_update(
input_ids, tf.fill([B, N], self.maskid() ),
replace_with_mask_positions)
return [tf.stop_gradient(inputs_ids),
masked_lm_positions,
masked_lm_ids,
masked_lm_weights]
# This function returns tensor operators in Keras layer form to perform dynamically aggregation on training data.
# Note that this will be added to calculation graph for to perform the operations on each input before feeding to model.
# (or append after model output in case of output transformation)
# We cannot perform it outside calculation graph because it will be much more slower and will break Keras training loop.
def get_dynamically_aggregation_layer(self, all_input_tensors):
# We want to perform tokens random masking for input side only...
if self.aggregated_tensors is not None:
return self.aggregated_tensors
if self.config["is_input"] == True:
print(all_input_tensors)
# If we are not in pretrained mode, just do not mask input.
# Set masked_lm_positions, masked_lm_weights as None
if self.config["is_pretrain"] == False:
# Get the batch size, sequence length, and max masked-out tokens
mask_prob = 0.15
max_predictions_per_seq = int((mask_prob + 0.005) * self.max_seq_length)
N = max_predictions_per_seq
B, L = self.get_shape_list(all_input_tensors[0])
null_masked_lm_ids = tf.zeros([B, N], dtype=tf.int32, name='null_masked_lm_ids')
null_masked_lm_weights = tf.zeros([B, N], dtype=tf.float32, name='null_masked_lm_weights')
self.aggregated_tensors = [*all_input_tensors, null_masked_lm_ids, null_masked_lm_weights]
return self.aggregated_tensors
def do_mask(all_inputs):
input_ids, input_mask, segment_ids = all_inputs
#input_ids = tf.Print(input_ids, ['input_ids', tf.shape(input_ids), input_ids], summarize=32)
#input_mask = tf.Print(input_mask, ['input_mask', tf.shape(input_mask), input_mask], summarize=32)
mask_prob = 0.15
max_predictions_per_seq = int((mask_prob + 0.005) * self.max_seq_length)
updated_input_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights = self.mask(max_predictions_per_seq, all_inputs, mask_prob)
''' For debugging purpose, assign fixed masked tokens
updated_input_ids = tf.constant([[3, 6, 4 ,8, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=tf.int32)
masked_lm_positions = tf.constant([[2, 4]], dtype=tf.int32)
masked_lm_ids = tf.constant([[5, 9]], dtype=tf.int32)
masked_lm_weights = tf.constant([[1.0, 1.0]], dtype=tf.float32)
'''
'''
updated_input_ids = tf.Print(updated_input_ids, ['updated_input_ids', tf.shape(updated_input_ids), updated_input_ids], summarize=32)
masked_lm_positions = tf.Print(masked_lm_positions, ['masked_lm_positions', tf.shape(masked_lm_positions), masked_lm_positions], summarize=32)
masked_lm_ids = tf.Print(masked_lm_ids, ['masked_lm_ids', tf.shape(masked_lm_ids), masked_lm_ids], summarize=32)
masked_lm_weights = tf.Print(masked_lm_weights, ['masked_lm_weights', tf.shape(masked_lm_weights), masked_lm_weights], summarize=32)
'''
return [updated_input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_weights]
input_ids, input_mask, token_type_ids = all_input_tensors
all_aggregated_tensors = Lambda(do_mask, name='bert_random_mask')([input_ids, input_mask, token_type_ids])
self.aggregated_tensors = all_aggregated_tensors
return all_aggregated_tensors
else:
# For output, we only need the 'input_ids' tensor
self.aggregated_tensors = all_input_tensors
return all_input_tensors
# Unit Test
print('-===================-')
print(__name__)
if __name__ == '__unittest__':
#if __name__ == '__main__' or __name__ == 'tensorflow.keras.initializers':
print('=== UNIT TESTING ===')
config = {
"column_id": 0,
"max_seq_length": 16,
"is_input": True,
"is_pretrain": True
}
from NLP_LIB.datasets.array_dataset_wrapper import ArrayDatasetWrapper
dataset = ArrayDatasetWrapper({
'values': [
['Hello', 'World','Hello', 'World','Hello', 'World','Hello', 'World','Hello', 'World'], # X
['Hello', 'World','Hello', 'World','Hello', 'World','Hello', 'World','Hello', 'World'], # Y
['Hella', 'Warld','aello', 'World','Hello', 'Uorld','Hello', 'WWrld','HellZ', 'World'], # X Valid
['Hello', 'World','Hello', 'World','Hello', 'World','Hello', 'World','Hello', 'World'], # Y Valid
]
})
# duplicate them
transform = BERTSentencePiecePretrainWrapper(config, dataset)
test_data = ['Hello', 'World']
print('test_data = ' + str(test_data))
encoded_data = transform.encode(test_data)
print('encoded_data = ' + str(encoded_data))
token_ids = encoded_data[0]
print('token_ids = ' + str(token_ids))
decoded_data = transform.decode(token_ids)
print('decoded_data = ' + str(decoded_data))
X, Y, X_valid, Y_valid = transform.load_preaggregated_data()
X_ids = X[0]
print('X_ids = ' + str(X_ids))
decoded_X = transform.decode(X_ids)
print('decoded_X = ' + str(decoded_X))
X_valid_ids = X_valid[0]
print('X_valid_ids = ' + str(X_valid_ids))
decoded_X_valid = transform.decode(X_valid_ids)
print('decoded_X_valid = ' + str(decoded_X_valid))
print('Finished') | PypiClean |
/ClusterShell-1.9.1.tar.gz/ClusterShell-1.9.1/doc/txt/clustershell.rst | ClusterShell is an event-driven open source Python framework, designed to run
local or distant commands in parallel on server farms or on large Linux
clusters. It will take care of common issues encountered on HPC clusters, such
as operating on groups of nodes, running distributed commands using optimized
execution algorithms, as well as gathering results and merging identical
outputs, or retrieving return codes. ClusterShell takes advantage of existing
remote shell facilities already installed on your systems, like SSH.
User tools
----------
ClusterShell provides clush, clubak and cluset/nodeset, convenient command-line
tools that allow traditional shell scripts to benefit from some of the
library's features:
- **clush**: issue commands to cluster nodes and format output
Example of use:
::
$ clush -abL uname -r
node[32-49,51-71,80,82-150,156-159]: 2.6.18-164.11.1.el5
node[3-7,72-79]: 2.6.18-164.11.1.el5_lustre1.10.0.36
node[2,151-155]: 2.6.31.6-145.fc11.2.x86_64
See *man clush* for more details.
- **clubak**: improved dshbak to gather and sort dsh-like outputs
See *man clubak* for more details.
- **nodeset** (or **cluset**): compute advanced nodeset/nodegroup operations
Examples of use:
::
$ echo node160 node161 node162 node163 | nodeset -f
node[160-163]
$ nodeset -f node[0-7,32-159] node[160-163]
node[0-7,32-163]
$ nodeset -e node[160-163]
node160 node161 node162 node163
$ nodeset -f node[32-159] -x node33
node[32,34-159]
$ nodeset -f node[32-159] -i node[0-7,20-21,32,156-159]
node[32,156-159]
$ nodeset -f node[33-159] --xor node[32-33,156-159]
node[32,34-155]
$ nodeset -l
@oss
@mds
@io
@compute
$ nodeset -e @mds
node6 node7
See *man nodeset* (or *man cluset*) for more details.
Please visit the ClusterShell website_.
.. _website: http://cea-hpc.github.io/clustershell/
| PypiClean |
/11l-2021.3-py3-none-any.whl/_11l_to_cpp/tokenizer.py | R"""
После данной обработки отступы перестают играть роль — границу `scope` всегда определяют фигурные скобки.
Также здесь выполняется склеивание строк, и таким образом границу statement\утверждения задаёт либо символ `;`,
либо символ новой строки (при условии, что перед ним не стоит символ `…`!).
===============================================================================================================
Ошибки:
---------------------------------------------------------------------------------------------------------------
Error: `if/else/fn/loop/switch/type` scope is empty.
---------------------------------------------------------------------------------------------------------------
Существуют операторы, которые всегда требуют нового scope\блока, который можно обозначить двумя способами:
1. Начать следующую строку с отступом относительно предыдущей, например:
if condition\условие
scope\блок
2. Заключить блок\scope в фигурные скобки:
if condition\условие {scope\блок}
Примечание. При использовании второго способа блок\scope может иметь произвольный уровень отступа:
if condition\условие
{
scope\блок
}
---------------------------------------------------------------------------------------------------------------
Error: `if/else/fn/loop/switch/type` scope is empty, after applied implied line joining: ```...```
---------------------------------------------------------------------------------------------------------------
Сообщение об ошибке аналогично предыдущему, но выделено в отдельное сообщение об ошибке, так как может
возникать по вине ошибочного срабатывания автоматического склеивания строк (и показывается оно тогда, когда
было произведено склеивание строк в месте данной ошибки).
---------------------------------------------------------------------------------------------------------------
Error: mixing tabs and spaces in indentation: `...`
---------------------------------------------------------------------------------------------------------------
В одной строке для отступа используется смесь пробелов и символов табуляции.
Выберите что-либо одно (желательно сразу для всего файла): либо пробелы для отступа, либо табуляцию.
Примечание: внутри строковых литералов, в комментариях, а также внутри строк кода можно смешивать пробелы и
табуляцию. Эта ошибка генерируется только при проверке отступов (отступ — последовательность символов пробелов
или табуляции от самого начала строки до первого символа отличного от пробела и табуляции).
---------------------------------------------------------------------------------------------------------------
Error: inconsistent indentations: ```...```
---------------------------------------------------------------------------------------------------------------
В текущей строке кода для отступа используются пробелы, а в предыдущей строке — табуляция (либо наоборот).
[[[
Сообщение было предназначено для несколько другой ошибки: для любых двух соседних строк, если взять отступ
одной из них, то другой отступ должен начинаться с него же {если отступ текущей строки отличается от отступа
предыдущей, то:
1. Когда отступ текущей строки начинается на отступ предыдущей строки, это INDENT.
2. Когда отступ предыдущей строки начинается на отступ текущей строки, это DEDENT.
}. Например:
if a:
SSTABif b:
SSTABTABi = 0
SSTABSi = 0
Последняя пара строк не удовлетворяет этому требованию, так как ни строка ‘SSTABTAB’ не начинается на строку
‘SSTABS’, ни ‘SSTABS’ не начинается на ‘SSTABTAB’.
Эта проверка имела бы смысл в случае разрешения смешения пробелов и табуляции для отступа в пределах одной
строки (а это разрешено в Python). Но я решил отказаться от этой идеи, а лучшего текста сообщения для этой
ошибки не придумал.
]]]
---------------------------------------------------------------------------------------------------------------
Error: unindent does not match any outer indentation level
---------------------------------------------------------------------------------------------------------------
[-Добавить описание ошибки.-]
===============================================================================================================
"""
from enum import IntEnum
from typing import List, Tuple
Char = str
keywords = ['V', 'C', 'I', 'E', 'F', 'L', 'N', 'R', 'S', 'T', 'X',
'П', 'С', 'Е', 'И', 'Ф', 'Ц', 'Н', 'Р', 'В', 'Т', 'Х',
'var', 'in', 'if', 'else', 'fn', 'loop', 'null', 'return', 'switch', 'type', 'exception',
'перем', 'С', 'если', 'иначе', 'фн', 'цикл', 'нуль', 'вернуть', 'выбрать', 'тип', 'исключение']
#keywords.remove('C'); keywords.remove('С'); keywords.remove('in') # it is more convenient to consider C/in as an operator, not a keyword (however, this line is not necessary)
# new_scope_keywords = ['else', 'fn', 'if', 'loop', 'switch', 'type']
# Решил отказаться от учёта new_scope_keywords на уровне лексического анализатора из-за loop.break и case в switch
empty_list_of_str : List[str] = []
binary_operators : List[List[str]] = [empty_list_of_str, [str('+'), '-', '*', '/', '%', '^', '&', '|', '<', '>', '=', '?'], ['<<', '>>', '<=', '>=', '==', '!=', '+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '->', '..', '.<', '.+', '<.', 'I/', 'Ц/', 'C ', 'С '], ['<<=', '>>=', '‘’=', '[+]', '[&]', '[|]', '(+)', '<.<', 'I/=', 'Ц/=', 'in ', '!C ', '!С '], ['[+]=', '[&]=', '[|]=', '(+)=', '!in ']]
unary_operators : List[List[str]] = [empty_list_of_str, [str('!')], ['++', '--'], ['(-)']]
sorted_operators = sorted(binary_operators[1] + binary_operators[2] + binary_operators[3] + binary_operators[4] + unary_operators[1] + unary_operators[2] + unary_operators[3], key = lambda x: len(x), reverse = True)
binary_operators[1].remove('^') # for `^L.break` support
binary_operators[2].remove('..') # for `L(n) 1..`
class Error(Exception):
message : str
pos : int
end : int
def __init__(self, message, pos):
self.message = message
self.pos = pos
self.end = pos
class Token:
class Category(IntEnum): # why ‘Category’: >[https://docs.python.org/3/reference/lexical_analysis.html#other-tokens]:‘the following categories of tokens exist’
NAME = 0 # or IDENTIFIER
KEYWORD = 1
CONSTANT = 2
DELIMITER = 3 # SEPARATOR = 3
OPERATOR = 4
NUMERIC_LITERAL = 5
STRING_LITERAL = 6
STRING_CONCATENATOR = 7 # special token inserted between adjacent string literal and some identifier
SCOPE_BEGIN = 8 # similar to ‘INDENT token in Python’[https://docs.python.org/3/reference/lexical_analysis.html][-1]
SCOPE_END = 9 # similar to ‘DEDENT token in Python’[-1]
STATEMENT_SEPARATOR = 10
start : int
end : int
category : Category
def __init__(self, start, end, category):
self.start = start
self.end = end
self.category = category
def __repr__(self):
return str(self.start)
def value(self, source):
return source[self.start:self.end]
def to_str(self, source):
return 'Token('+str(self.category)+', "'+self.value(source)+'")'
def tokenize(source : str, implied_scopes : List[Tuple[Char, int]] = None, line_continuations : List[int] = None, comments : List[Tuple[int, int]] = None):
tokens : List[Token] = []
indentation_levels : List[Tuple[int, bool]] = []
nesting_elements : List[Tuple[Char, int]] = [] # логически этот стек можно объединить с indentation_levels, но так немного удобнее (конкретно: для проверок `nesting_elements[-1][0] != ...`)
i = 0
begin_of_line = True
indentation_tabs : bool
prev_linestart : int
def skip_multiline_comment():
nonlocal i, source, comments
comment_start = i
lbr = source[i+1]
rbr = {"‘": "’", "(": ")", "{": "}", "[": "]"}[lbr]
i += 2
nesting_level = 1
while True:
ch = source[i]
i += 1
if ch == lbr:
nesting_level += 1
elif ch == rbr:
nesting_level -= 1
if nesting_level == 0:
break
if i == len(source):
raise Error('there is no corresponding opening parenthesis/bracket/brace/qoute for `' + lbr + '`', comment_start+1)
if comments is not None:
comments.append((comment_start, i))
while i < len(source):
if begin_of_line: # at the beginning of each line, the line's indentation level is compared to the last indentation_levels [:1]
begin_of_line = False
linestart = i
tabs = False
spaces = False
while i < len(source):
if source[i] == ' ':
spaces = True
elif source[i] == "\t":
tabs = True
else:
break
i += 1
if i == len(source): # end of source
break
ii = i
if source[i:i+2] in (R'\‘', R'\(', R'\{', R'\['): # ]})’
skip_multiline_comment()
while i < len(source) and source[i] in " \t": # skip whitespace characters
i += 1
if i == len(source): # end of source
break
if source[i] in "\r\n" or source[i:i+2] in ('//', R'\\'): # lines with only whitespace and/or comments do not affect the indentation
continue
if source[i] in "{}": # Indentation level of lines starting with { or } is ignored
continue
if len(tokens) \
and tokens[-1].category == Token.Category.STRING_CONCATENATOR \
and source[i] in '"\'‘': # ’ and not source[i+1:i+2] in ({'"':'"', '‘':'’'}[source[i]],):
if line_continuations is not None:
line_continuations.append(tokens[-1].end)
if source[i:i+2] in ('""', '‘’'):
i += 2
continue
if len(tokens) \
and tokens[-1].category == Token.Category.STRING_LITERAL \
and source[i:i+2] in ('""', '‘’'):
if line_continuations is not None:
line_continuations.append(tokens[-1].end)
tokens.append(Token(i, i, Token.Category.STRING_CONCATENATOR))
i += 2
continue
if (len(tokens)
and tokens[-1].category == Token.Category.OPERATOR
and tokens[-1].value(source) in binary_operators[tokens[-1].end - tokens[-1].start] # ‘Every line of code which ends with any binary operator should be joined with the following line of code.’:[https://github.com/JuliaLang/julia/issues/2097#issuecomment-339924750][-339924750]<
and source[tokens[-1].end-4:tokens[-1].end] != '-> &'): # for `F symbol(id, bp = 0) -> &`
if line_continuations is not None:
line_continuations.append(tokens[-1].end)
continue
# if not (len(indentation_levels) and indentation_levels[-1][0] == -1): # сразу после символа `{` это [:правило] не действует ...а хотя не могу подобрать пример, который бы показывал необходимость такой проверки, а потому оставлю этот if закомментированным # }
if ((source[i ] in binary_operators[1]
or source[i:i+2] in binary_operators[2]
or source[i:i+3] in binary_operators[3]
or source[i:i+4] in binary_operators[4]) # [правило:] ‘Every line of code which begins with any binary operator should be joined with the previous line of code.’:[-339924750]<
and not (source[i ] in unary_operators[1] # Rude fix for:
or source[i:i+2] in unary_operators[2] # a=b
or source[i:i+3] in unary_operators[3]) # ++i // Plus symbol at the beginning here should not be treated as binary + operator, so there is no implied line joining
and (source[i] not in ('&', '-') or source[i+1:i+2] == ' ')): # Символы `&` и `-` обрабатываются по-особенному — склеивание строк происходит только если после одного из этих символов стоит пробел
if len(tokens) == 0:
raise Error('source can not starts with a binary operator', i)
if line_continuations is not None:
line_continuations.append(tokens[-1].end)
continue
if source[i:i+2] == R'\.': # // Support for constructions like: ||| You need just to add `\` at the each line starting from dot:
if len(tokens): # \\ result = abc.method1() ||| result = abc.method1()
i += 1 # \\ .method2() ||| \.method2()
#else: # with `if len(tokens): i += 1` there is no need for this else branch
# raise Error('unexpected character `\`')
if line_continuations is not None:
line_continuations.append(tokens[-1].end)
continue
if tabs and spaces:
next_line_pos = source.find("\n", i)
raise Error('mixing tabs and spaces in indentation: `' + source[linestart:i].replace(' ', 'S').replace("\t", 'TAB') + source[i:next_line_pos if next_line_pos != -1 else len(source)] + '`', i)
indentation_level = ii - linestart
if len(indentation_levels) and indentation_levels[-1][0] == -1: # сразу после символа `{` идёт новый произвольный отступ (понижение уровня отступа может быть полезно, если вдруг отступ оказался слишком большой), который действует вплоть до парного символа `}`
indentation_levels[-1] = (indentation_level, indentation_levels[-1][1]) #indentation_levels[-1][0] = indentation_level # || maybe this is unnecessary (actually it is necessary, see test "fn f()\n{\na = 1") // }
indentation_tabs = tabs
else:
prev_indentation_level = indentation_levels[-1][0] if len(indentation_levels) else 0
if indentation_level > 0 and prev_indentation_level > 0 and indentation_tabs != tabs:
e = i + 1
while e < len(source) and source[e] not in "\r\n":
e += 1
raise Error("inconsistent indentations:\n```\n" + prev_indentation_level*('TAB' if indentation_tabs else 'S') + source[prev_linestart:linestart]
+ (ii-linestart)*('TAB' if tabs else 'S') + source[ii:e] + "\n```", ii)
prev_linestart = ii
if indentation_level == prev_indentation_level: # [1:] [-1]:‘If it is equal, nothing happens.’ :)(: [:2]
if len(tokens) and tokens[-1].category != Token.Category.SCOPE_END:
tokens.append(Token(linestart-1, linestart, Token.Category.STATEMENT_SEPARATOR))
elif indentation_level > prev_indentation_level: # [2:] [-1]:‘If it is larger, it is pushed on the stack, and one INDENT token is generated.’ [:3]
if prev_indentation_level == 0: # len(indentation_levels) == 0 or indentation_levels[-1][0] == 0:
indentation_tabs = tabs # первоначальная/новая установка символа для отступа (либо табуляция, либо пробелы) производится только от нулевого уровня отступа
indentation_levels.append((indentation_level, False))
tokens.append(Token(linestart, ii, Token.Category.SCOPE_BEGIN))
if implied_scopes is not None:
implied_scopes.append((Char('{'), tokens[-2].end + (1 if source[tokens[-2].end] in " \n" else 0)))
else: # [3:] [-1]:‘If it is smaller, it ~‘must’ be one of the numbers occurring on the stack; all numbers on the stack that are larger are popped off, and for each number popped off a DEDENT token is generated.’ [:4]
while True:
if indentation_levels[-1][1]:
raise Error('too much unindent, what is this unindent intended for?', ii)
indentation_levels.pop()
tokens.append(Token(ii, ii, Token.Category.SCOPE_END))
if implied_scopes is not None:
implied_scopes.append((Char('}'), ii))
level = indentation_levels[-1][0] if len(indentation_levels) else 0 #level, explicit_scope_via_curly_braces = indentation_levels[-1] if len(indentation_levels) else [0, False]
if level == indentation_level:
break
if level < indentation_level:
raise Error('unindent does not match any outer indentation level', ii)
ch = source[i]
if ch in " \t":
i += 1 # just skip whitespace characters
elif ch in "\r\n":
#if newline_chars is not None: # rejected this code as it does not count newline characters inside comments and string literals
# newline_chars.append(i)
i += 1
if ch == "\r" and source[i:i+1] == "\n":
i += 1
if len(nesting_elements) == 0 or nesting_elements[-1][0] not in '([': # если мы внутри скобок, то начинать новую строку не нужно # ])
begin_of_line = True
elif (ch == '/' and source[i+1:i+2] == '/' ) \
or (ch == '\\' and source[i+1:i+2] == '\\'): # single-line comment
comment_start = i
i += 2
while i < len(source) and source[i] not in "\r\n":
i += 1
if comments is not None:
comments.append((comment_start, i))
elif ch == '\\' and source[i+1:i+2] in "‘({[": # multi-line comment # ]})’
skip_multiline_comment()
else:
def is_hexadecimal_digit(ch):
return '0' <= ch <= '9' or 'A' <= ch <= 'F' or 'a' <= ch <= 'f' or ch in 'абсдефАБСДЕФ'
operator_s = ''
# if ch in 'CС' and not (source[i+1:i+2].isalpha() or source[i+1:i+2].isdigit()): # without this check [and if 'C' is in binary_operators] when identifier starts with `C` (for example `Circle`), then this first letter of identifier is mistakenly considered as an operator
# operator_s = ch
# else:
for op in sorted_operators:
if source[i:i+len(op)] == op:
operator_s = op
break
lexem_start = i
i += 1
category : Token.Category
if operator_s != '':
i = lexem_start + len(operator_s)
if source[i-1] == ' ': # for correct handling of operator 'C '/'in ' in external tools (e.g. keyletters_to_keywords.py)
i -= 1
category = Token.Category.OPERATOR
elif ch.isalpha() or ch in ('_', '@'): # this is NAME/IDENTIFIER or KEYWORD
if ch == '@':
while i < len(source) and source[i] == '@':
i += 1
if i < len(source) and source[i] == '=':
i += 1
while i < len(source):
ch = source[i]
if not (ch.isalpha() or ch in '_?:' or '0' <= ch <= '9'):
break
i += 1
# Tokenize `fs:path:dirname` to ['fs:path', ':', 'dirname']
j = i - 1
while j > lexem_start:
if source[j] == ':':
i = j
break
j -= 1
if source[i:i+1] == '/' and source[i-1:i] in 'IЦ':
if source[i-2:i-1] == ' ':
category = Token.Category.OPERATOR
else:
raise Error('please clarify your intention by putting space character before or after `I`', i-1)
elif source[i:i+1] == "'": # this is a named argument, a raw string or a hexadecimal number
i += 1
if source[i:i+1] == ' ': # this is a named argument
category = Token.Category.NAME
elif source[i:i+1] in ('‘', "'"): # ’ # this is a raw string
i -= 1
category = Token.Category.NAME
else: # this is a hexadecimal number
while i < len(source) and (is_hexadecimal_digit(source[i]) or source[i] == "'"):
i += 1
if not (source[lexem_start+4:lexem_start+5] == "'" or source[i-3:i-2] == "'" or source[i-2:i-1] == "'"):
raise Error('digit separator in this hexadecimal number is located in the wrong place', lexem_start)
category = Token.Category.NUMERIC_LITERAL
elif source[lexem_start:i] in keywords:
if source[lexem_start:i] in ('V', 'П', 'var', 'перем'): # it is more convenient to consider V/var as [type] name, not a keyword
category = Token.Category.NAME
if source[i:i+1] == '&':
i += 1
elif source[lexem_start:i] in ('N', 'Н', 'null', 'нуль'):
category = Token.Category.CONSTANT
else:
category = Token.Category.KEYWORD
if source[i:i+1] == '.': # this is composite keyword like `L.break`
i += 1
while i < len(source) and (source[i].isalpha() or source[i] in '_.'):
i += 1
if source[lexem_start:i] in ('L.index', 'Ц.индекс', 'loop.index', 'цикл.индекс'): # for correct STRING_CONCATENATOR insertion
category = Token.Category.NAME
else:
category = Token.Category.NAME
elif '0' <= ch <= '9': # this is NUMERIC_LITERAL or CONSTANT 0B or 1B
if ch in '01' and source[i:i+1] in ('B', 'В') and not (is_hexadecimal_digit(source[i+1:i+2]) or source[i+1:i+2] == "'"):
i += 1
category = Token.Category.CONSTANT
else:
is_hex = False
while i < len(source) and is_hexadecimal_digit(source[i]):
if not ('0' <= source[i] <= '9'):
if source[i] in 'eE' and source[i+1:i+2] in ('-', '+'): # fix `1e-10`
break
is_hex = True
i += 1
next_digit_separator = 0
is_oct_or_bin = False
if i < len(source) and source[i] == "'":
if i - lexem_start in (2, 1): # special handling for 12'345/1'234 (чтобы это не считалось short/ultrashort hexadecimal number)
j = i + 1
while j < len(source) and is_hexadecimal_digit(source[j]):
if not ('0' <= source[j] <= '9'):
is_hex = True
j += 1
next_digit_separator = j - 1 - i
elif i - lexem_start == 4: # special handling for 1010'1111b (чтобы это не считалось hexadecimal number)
j = i + 1
while j < len(source) and ((is_hexadecimal_digit(source[j]) and not source[j] in 'bд') or source[j] == "'"): # I know, checking for `in 'bд'` is hacky
j += 1
if j < len(source) and source[j] in 'oоbд':
is_oct_or_bin = True
if i < len(source) and source[i] == "'" and ((i - lexem_start == 4 and not is_oct_or_bin) or (i - lexem_start in (2, 1) and (next_digit_separator != 3 or is_hex))): # this is a hexadecimal number
if i - lexem_start == 2: # this is a short hexadecimal number
while True:
i += 1
if i + 2 > len(source) or not is_hexadecimal_digit(source[i]) or not is_hexadecimal_digit(source[i+1]):
raise Error('wrong short hexadecimal number', lexem_start)
i += 2
if i < len(source) and is_hexadecimal_digit(source[i]):
raise Error('expected end of short hexadecimal number', i)
if source[i:i+1] != "'":
break
elif i - lexem_start == 1: # this is an ultrashort hexadecimal number
i += 1
if i + 1 > len(source) or not is_hexadecimal_digit(source[i]):
raise Error('wrong ultrashort hexadecimal number', lexem_start)
i += 1
if i < len(source) and is_hexadecimal_digit(source[i]):
raise Error('expected end of ultrashort hexadecimal number', i)
else:
i += 1
while i < len(source) and is_hexadecimal_digit(source[i]):
i += 1
if (i - lexem_start) % 5 == 4 and i < len(source):
if source[i] != "'":
if not is_hexadecimal_digit(source[i]):
break
raise Error('here should be a digit separator in hexadecimal number', i)
i += 1
if i < len(source) and source[i] == "'":
raise Error('digit separator in hexadecimal number is located in the wrong place', i)
if (i - lexem_start) % 5 != 4:
raise Error('after this digit separator there should be 4 digits in hexadecimal number', source.rfind("'", 0, i))
else:
while i < len(source) and ('0' <= source[i] <= '9' or source[i] in "'.eE"):
if source[i:i+2] in ('..', '.<', '.+'):
break
if source[i] in 'eE':
if source[i+1:i+2] in '-+':
i += 1
i += 1
if source[i:i+1] in ('o', 'о', 'b', 'д', 's', 'i'):
i += 1
elif "'" in source[lexem_start:i] and not '.' in source[lexem_start:i]: # float numbers do not checked for a while
number = source[lexem_start:i].replace("'", '')
number_with_separators = ''
j = len(number)
while j > 3:
number_with_separators = "'" + number[j-3:j] + number_with_separators
j -= 3
number_with_separators = number[0:j] + number_with_separators
if source[lexem_start:i] != number_with_separators:
raise Error('digit separator in this number is located in the wrong place (should be: '+ number_with_separators +')', lexem_start)
category = Token.Category.NUMERIC_LITERAL
elif ch == "'" and source[i:i+1] == ',': # this is a named-only arguments mark
i += 1
category = Token.Category.DELIMITER
elif ch == '"':
if source[i] == '"' \
and tokens[-1].category == Token.Category.STRING_CONCATENATOR \
and tokens[-2].category == Token.Category.STRING_LITERAL \
and tokens[-2].value(source)[0] == '‘': # ’ // for cases like r = abc‘some big ...’""
i += 1 # \\ ‘... string’
continue # [(
startqpos = i - 1
if len(tokens) and tokens[-1].end == startqpos and ((tokens[-1].category == Token.Category.NAME and tokens[-1].value(source)[-1] != "'") or tokens[-1].value(source) in (')', ']')):
tokens.append(Token(lexem_start, lexem_start, Token.Category.STRING_CONCATENATOR))
while True:
if i == len(source):
raise Error('unclosed string literal', startqpos)
ch = source[i]
i += 1
if ch == '\\':
if i == len(source):
continue
i += 1
elif ch == '"':
break
if source[i:i+1].isalpha() or source[i:i+1] in ('_', '@', ':', '‘', '('): # )’
tokens.append(Token(lexem_start, i, Token.Category.STRING_LITERAL))
tokens.append(Token(i, i, Token.Category.STRING_CONCATENATOR))
continue
category = Token.Category.STRING_LITERAL
elif ch in "‘'":
if source[i] == '’' \
and tokens[-1].category == Token.Category.STRING_CONCATENATOR \
and tokens[-2].category == Token.Category.STRING_LITERAL \
and tokens[-2].value(source)[0] == '"': # // for cases like r = abc"some big ..."‘’
i += 1 # \\ ‘... string’
continue # ‘[(
if len(tokens) and tokens[-1].end == i - 1 and ((tokens[-1].category == Token.Category.NAME and tokens[-1].value(source)[-1] != "'") or tokens[-1].value(source) in (')', ']')):
tokens.append(Token(lexem_start, lexem_start, Token.Category.STRING_CONCATENATOR))
if source[i] == '’': # for cases like `a‘’b`
i += 1
continue
i -= 1
while i < len(source) and source[i] == "'":
i += 1
if source[i:i+1] != '‘': # ’
raise Error('expected left single quotation mark', i)
startqpos = i
i += 1
nesting_level = 1
while True:
if i == len(source):
raise Error('unpaired left single quotation mark', startqpos)
ch = source[i]
i += 1
if ch == "‘":
nesting_level += 1
elif ch == "’":
nesting_level -= 1
if nesting_level == 0:
break
while i < len(source) and source[i] == "'":
i += 1
if source[i:i+1].isalpha() or source[i:i+1] in ('_', '@', ':', '"', '('): # )
tokens.append(Token(lexem_start, i, Token.Category.STRING_LITERAL))
tokens.append(Token(i, i, Token.Category.STRING_CONCATENATOR))
continue
category = Token.Category.STRING_LITERAL
elif ch == '{':
indentation_levels.append((-1, True))
nesting_elements.append((Char('{'), lexem_start)) # }
category = Token.Category.SCOPE_BEGIN
elif ch == '}':
if len(nesting_elements) == 0 or nesting_elements[-1][0] != '{':
raise Error('there is no corresponding opening brace for `}`', lexem_start)
nesting_elements.pop()
while indentation_levels[-1][1] != True:
tokens.append(Token(lexem_start, lexem_start, Token.Category.SCOPE_END))
if implied_scopes is not None: # {
implied_scopes.append((Char('}'), lexem_start))
indentation_levels.pop()
assert(indentation_levels.pop()[1] == True)
category = Token.Category.SCOPE_END
elif ch == ';':
category = Token.Category.STATEMENT_SEPARATOR
elif ch in (',', '.', ':'):
category = Token.Category.DELIMITER
elif ch in '([':
if source[lexem_start:lexem_start+3] == '(.)':
i += 2
category = Token.Category.NAME
else:
nesting_elements.append((ch, lexem_start))
category = Token.Category.DELIMITER
elif ch in '])': # ([
if len(nesting_elements) == 0 or nesting_elements[-1][0] != {']':'[', ')':'('}[ch]: # ])
raise Error('there is no corresponding opening parenthesis/bracket for `' + ch + '`', lexem_start)
nesting_elements.pop()
category = Token.Category.DELIMITER
else:
raise Error('unexpected character `' + ch + '`', lexem_start)
tokens.append(Token(lexem_start, i, category))
if len(nesting_elements):
raise Error('there is no corresponding closing parenthesis/bracket/brace for `' + nesting_elements[-1][0] + '`', nesting_elements[-1][1])
# [4:] [-1]:‘At the end of the file, a DEDENT token is generated for each number remaining on the stack that is larger than zero.’
while len(indentation_levels):
assert(indentation_levels[-1][1] != True)
tokens.append(Token(i, i, Token.Category.SCOPE_END))
if implied_scopes is not None: # {
implied_scopes.append((Char('}'), i-1 if source[-1] == "\n" else i))
indentation_levels.pop()
return tokens | PypiClean |
/FastAudioVisual-0.0.1.tar.gz/FastAudioVisual-0.0.1/README.md | # FastAudioVisual
## Usage
A cross-platform command line tool to count the amount of lines and files under current directory.
## Installation
You can install, upgrade, uninstall count-line with these commands(without $):
```
$ pip install count-line
$ pip install --upgrade count-line
$ pip unstall count-line
```
## Help
```
usage: line.py [-h] [-s SUFFIX | -f FILTER] [-d]
count the amount of lines and files under the current directory
optional arguments:
-h, --help show this help message and exit
-s SUFFIX, --suffix SUFFIX
count by suffix file name, format: .suffix1.suffix2...
e.g: .cpp.py (without space)
-f FILTER, --filter FILTER
count without filter name, format: .suffix1.suffix2...
e.g: .cpp.py (without space)
-d, --detail show detail results
```
## Examples
1. Count all files under the current directory:
```
$ line
Search in /Users/macbook/Desktop/Examples1/
file count: 4
line count: 373
```
2. Count all files under the current directory with detail results:
```
$ line -d
Search in /Users/macbook/Desktop/Examples2/
========================================
文件后缀名 文件数 总行数
.py 5 397
.cpp 240 11346
总文件数: 245 总行数: 11743
========================================
```
3. Count specified files under the current directory, using -s to pass suffix as parameters, if there are more than one parameter, don't have space, for example, count cpp files and python files:
```
$ line -s .cpp.py
Search in /Users/macbook/Desktop/Examples3/
file count: 3
line count: 243
$ line -s .cpp.py -d
Search in /Users/macbook/Desktop/Examples3/
========================================
文件后缀名 文件数 总行数
.py 5 397
.cpp 240 11346
总文件数: 245 总行数: 11743
========================================
```
4. Count files under the current directory with filter:
```
$ line -f .py -d
Search in /Users/macbook/Desktop/Examples4/
========================================
文件后缀名 文件数 总行数
.cpp 240 11346
总文件数: 240 总行数: 11528
========================================
$ line -d
Search in /Users/macbook/Desktop/Examples4/
========================================
文件后缀名 文件数 总行数
.py 5 397
.cpp 240 11346
总文件数: 245 总行数: 11743
========================================
```
| PypiClean |
/Obsidian_Snippet_Manager-2.3.2-py3-none-any.whl/Obsidian_Snippeter/CLI.py | import argparse
import os
import sys
from glob import glob
from pathlib import Path
from urllib.parse import urlparse
from rich import print
from rich.console import Console
import Obsidian_Snippeter as manager
from Obsidian_Snippeter.src import environment
from Obsidian_Snippeter.src import github_action
def create_env():
"""
Create environment variable with :
- Vault : Absolute path to obsidian vault
- Snippet folder : Absolute path to the folder which will contains the downloaded snippets.
:return: /
"""
basedir = manager.__path__[0]
console = Console()
env_path = Path(f"{basedir}/.obsidian-snippet-manager")
print(f"[bold]Creating environnement in [u]{env_path}[/][/]")
vault = ""
folder_snippet = ""
while (
vault == ""
or not os.path.isdir(vault)
or not os.path.isdir(os.path.join(vault, ".obsidian"))
):
vault = str(
console.input(
"Please provide your [u bold]obsidian vault[/] absolute path: "
)
)
while folder_snippet == "":
folder_snippet = str(
console.input(
"Please provide the [u bold]Snippet Manager Folder[/] absolute path: "
)
)
if not os.path.isdir(Path(folder_snippet)):
Path(folder_snippet).mkdir(exist_ok=True)
console.print(
f"[u bold]Snippet Manager Folder[/] created in [u]{folder_snippet}[/]."
)
excluded = os.path.join(folder_snippet, "exclude.yml")
if not os.path.isfile(Path(excluded)):
f = open(excluded, "w", encoding="utf-8")
f.close()
with open(env_path, "w", encoding="utf-8") as env:
env.write(f"vault={vault}\n")
env.write(f"folder_snippet={folder_snippet}\n")
sys.exit("Environment created.")
def check_environnement():
"""
Get environment variable from files
:return: BASEDIR: Path / VAULT: Path
"""
BASEDIR, VAULT = environment.get_environments()
if (
len(str(BASEDIR)) == 0
or len(str(VAULT)) == 0
or not os.path.isdir(BASEDIR)
or not os.path.isdir(VAULT)
):
create_env()
return BASEDIR, VAULT
def clone_message(repo_url, BASEDIR):
"""
Rich python the info from clone return
:param repo_url: Repository github url
:param BASEDIR: Folder Snippet folder
:return:
"""
working_dir, message = github_action.git_clone(repo_url)
repo_name = urlparse(repo_url).path[1:].split("/")[1]
if message:
print(f"[link={repo_url}]{repo_name}[/link] was cloned in [i u]{BASEDIR}.[/]")
elif not message:
if working_dir == "Already exists":
print(f"[link={repo_url}]{repo_name}[/link] already exists !")
else:
print(f"[link={repo_url}]{repo_name}[/link] doesn't exists !")
return working_dir
def pull_message(repo_path):
"""
Print message from github action return
:param repo_path: Path to newly cloned repo
:return:
"""
exc = github_action.git_pull(repo_path)
if exc != "0":
print(f":warning: [red] Git returns an error :[/] {exc}")
def cli_exclude(BASEDIR, exclude_args, add):
if add is not None and len(add) > 0:
all = [x for x in glob(os.path.join(BASEDIR, "**"), recursive=True)]
for i in add:
if i in all:
i = os.path.basename(i)
github_action.exclude_folder(i)
return exclude_args + github_action.read_exclude(BASEDIR)
def cli_clone(repo, BASEDIR, console, excluded, select):
repo_path = clone_message(repo, BASEDIR)
if repo_path != "0" and repo_path != "Already exists":
if excluded is not None and len(excluded) > 0:
for i in excluded:
if not i.endswith(".css"):
i = i + ".css"
github_action.exclude_folder(i)
if select is not None and len(select) > 0:
all_file = [
x
for x in glob(os.path.join(repo_path, "**"), recursive=True)
if x.endswith(".css")
]
css_file = []
for i in select:
if not i.endswith(".css"):
i = i + ".css"
pathfile = [x for x in all_file if os.path.basename(x) == i]
if pathfile:
file = pathfile[0]
css_file.append(github_action.move_to_obsidian(file))
else:
css_file = github_action.move_to_obsidian(repo_path)
if len(css_file) > 0:
console.print(f"🎉 [u]{repo}[/] successfull added to Obsidian.")
if excluded is not None and len(excluded) > 0:
github_action.exclude_folder(repo_path)
else:
console.print(f"🤨 There is no CSS file in {repo}.")
def cli_update(repository_name, BASEDIR, only, console):
all_folder = [x for x in glob(os.path.join(str(BASEDIR), "**")) if os.path.isdir(x)]
repo_name = [x for x in all_folder if os.path.basename(x) in repository_name]
if len(repo_name) > 0:
for i in repo_name:
repo_path = Path(i)
pull_message(repo_path)
css_file = []
if only:
all_file = [
x
for x in glob(os.path.join(repo_path, "**"), recursive=True)
if x.endswith(".css")
]
for j in only:
if not ".css" in j:
j = j + ".css"
file = [x for x in all_file if os.path.basename(x) == j]
if file:
j = file[0]
css_file.append(github_action.move_to_obsidian(j))
else:
css_file = github_action.move_to_obsidian(repo_path)
if len(css_file) > 0:
console.print(f"🎉 [u]{repository_name}[/] successfully updated.")
else:
console.print(f"🤨 There is no CSS file in [u]{repository_name}[/].")
else:
console.print(
"[u]This repository doesn't exists[/]. Did you use the correct folder"
" name ?"
)
def cli_list(BASEDIR, console):
all_folder = [
os.path.basename(x)
for x in glob(os.path.join(str(BASEDIR), "**"))
if os.path.isdir(x)
]
if len(all_folder) > 1:
folder_msg = "\n- ".join(all_folder)
folder_msg = f"[u] The repository present are :[/]\n- {folder_msg}"
elif len(all_folder) == 1:
folder_msg = "".join(all_folder)
folder_msg = f"The repository present is [u]{folder_msg}[/]"
else:
folder_msg = f"[u]There is no repository in {BASEDIR}[/]"
console.print(folder_msg)
def cli_update_all(BASEDIR, console, exclude):
all_folder = [x for x in glob(os.path.join(str(BASEDIR), "**")) if os.path.isdir(x)]
info = []
for i in all_folder:
if (
os.path.isdir(os.path.join(i, ".git"))
and not os.path.basename(i) in exclude
):
pull_message(i)
css_file = github_action.move_to_obsidian(i)
if len(css_file) > 0:
info.append(os.path.basename(i))
if len(info) > 0:
if len(info) > 1:
info = "\n- ".join(info)
console.print(f"Successfull updated :\n- [u]{info}[/]")
else:
info = "".join(info)
console.print(f"Successfull updated [u]{info}[/]")
else:
console.print("🤨 There is no file to update in these repository")
def main():
"""
Main function used in CLI
:return: /
"""
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
# retrieve subparsers from parser
subparsers_actions = [
action
for action in parser._actions
if isinstance(action, argparse._SubParsersAction)
]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
print("{}".format(choice))
print(subparser.format_help())
parser.exit()
console = Console()
parser = argparse.ArgumentParser(
description="Git pull and copy the css files in .obsidian/snippet",
add_help=False,
)
parser.add_argument(
"--help", action=_HelpAction, help="show this help message and exit"
)
subparser = parser.add_subparsers(dest="cmd")
parser_clone = subparser.add_parser(
"clone", help="Clone a repository and add the snippet to Obsidian"
)
parser_clone.add_argument(
"repository",
help="Clone a new repository",
action="store",
)
parser_clone.add_argument(
"--excluded",
"--e",
"--no",
help="Exclude this repository or file from update",
action="store",
nargs="*",
)
parser_clone.add_argument(
"--select",
"--s",
help="Download only these snippets",
action="store",
nargs="*",
)
parser_update = subparser.add_parser(
"update", help="Update a specific CSS snippet."
)
parser_update.add_argument(
"--only",
"--select",
"--s",
help="Use only selectionned file",
action="store",
nargs="+",
)
parser_update.add_argument(
"repository_name",
help="The repo you want to update",
action="store",
)
parser_config = subparser.add_parser(
"configuration", help="Edit the configuration file"
)
parser_list = subparser.add_parser(
"list", help="List all Github Repository you cloned."
)
parser_exclude = subparser.add_parser(
"exclude", help="Exclude repository from update"
)
parser_exclude.add_argument(
"exclude", help="Exclude repository from the update", action="store", nargs="+"
)
parser_exclude.add_argument(
"--add",
help="Exclude everytime these file/repo from update",
action="store",
nargs="*",
)
args = parser.parse_args()
if args.cmd == "config":
create_env()
sys.exit()
global_value = check_environnement()
BASEDIR = global_value[0]
exclude = []
if args.cmd == "exclude":
exclude = cli_exclude(BASEDIR, args.exclude, args.add)
if args.cmd == "clone":
cli_clone(args.repository, BASEDIR, console, args.excluded, args.select)
elif args.cmd == "update":
cli_update(args.repository_name, BASEDIR, args.only, console)
elif args.cmd == "list":
cli_list(BASEDIR, console)
else:
cli_update_all(BASEDIR, console, exclude)
sys.exit()
if __name__ == "__main__":
main() | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/@mapbox/node-pre-gyp/lib/util/napi.js | 'use strict';
const fs = require('fs');
module.exports = exports;
const versionArray = process.version
.substr(1)
.replace(/-.*$/, '')
.split('.')
.map((item) => {
return +item;
});
const napi_multiple_commands = [
'build',
'clean',
'configure',
'package',
'publish',
'reveal',
'testbinary',
'testpackage',
'unpublish'
];
const napi_build_version_tag = 'napi_build_version=';
module.exports.get_napi_version = function() {
// returns the non-zero numeric napi version or undefined if napi is not supported.
// correctly supporting target requires an updated cross-walk
let version = process.versions.napi; // can be undefined
if (!version) { // this code should never need to be updated
if (versionArray[0] === 9 && versionArray[1] >= 3) version = 2; // 9.3.0+
else if (versionArray[0] === 8) version = 1; // 8.0.0+
}
return version;
};
module.exports.get_napi_version_as_string = function(target) {
// returns the napi version as a string or an empty string if napi is not supported.
const version = module.exports.get_napi_version(target);
return version ? '' + version : '';
};
module.exports.validate_package_json = function(package_json, opts) { // throws Error
const binary = package_json.binary;
const module_path_ok = pathOK(binary.module_path);
const remote_path_ok = pathOK(binary.remote_path);
const package_name_ok = pathOK(binary.package_name);
const napi_build_versions = module.exports.get_napi_build_versions(package_json, opts, true);
const napi_build_versions_raw = module.exports.get_napi_build_versions_raw(package_json);
if (napi_build_versions) {
napi_build_versions.forEach((napi_build_version)=> {
if (!(parseInt(napi_build_version, 10) === napi_build_version && napi_build_version > 0)) {
throw new Error('All values specified in napi_versions must be positive integers.');
}
});
}
if (napi_build_versions && (!module_path_ok || (!remote_path_ok && !package_name_ok))) {
throw new Error('When napi_versions is specified; module_path and either remote_path or ' +
"package_name must contain the substitution string '{napi_build_version}`.");
}
if ((module_path_ok || remote_path_ok || package_name_ok) && !napi_build_versions_raw) {
throw new Error("When the substitution string '{napi_build_version}` is specified in " +
'module_path, remote_path, or package_name; napi_versions must also be specified.');
}
if (napi_build_versions && !module.exports.get_best_napi_build_version(package_json, opts) &&
module.exports.build_napi_only(package_json)) {
throw new Error(
'The Node-API version of this Node instance is ' + module.exports.get_napi_version(opts ? opts.target : undefined) + '. ' +
'This module supports Node-API version(s) ' + module.exports.get_napi_build_versions_raw(package_json) + '. ' +
'This Node instance cannot run this module.');
}
if (napi_build_versions_raw && !napi_build_versions && module.exports.build_napi_only(package_json)) {
throw new Error(
'The Node-API version of this Node instance is ' + module.exports.get_napi_version(opts ? opts.target : undefined) + '. ' +
'This module supports Node-API version(s) ' + module.exports.get_napi_build_versions_raw(package_json) + '. ' +
'This Node instance cannot run this module.');
}
};
function pathOK(path) {
return path && (path.indexOf('{napi_build_version}') !== -1 || path.indexOf('{node_napi_label}') !== -1);
}
module.exports.expand_commands = function(package_json, opts, commands) {
const expanded_commands = [];
const napi_build_versions = module.exports.get_napi_build_versions(package_json, opts);
commands.forEach((command)=> {
if (napi_build_versions && command.name === 'install') {
const napi_build_version = module.exports.get_best_napi_build_version(package_json, opts);
const args = napi_build_version ? [napi_build_version_tag + napi_build_version] : [];
expanded_commands.push({ name: command.name, args: args });
} else if (napi_build_versions && napi_multiple_commands.indexOf(command.name) !== -1) {
napi_build_versions.forEach((napi_build_version)=> {
const args = command.args.slice();
args.push(napi_build_version_tag + napi_build_version);
expanded_commands.push({ name: command.name, args: args });
});
} else {
expanded_commands.push(command);
}
});
return expanded_commands;
};
module.exports.get_napi_build_versions = function(package_json, opts, warnings) { // opts may be undefined
const log = require('npmlog');
let napi_build_versions = [];
const supported_napi_version = module.exports.get_napi_version(opts ? opts.target : undefined);
// remove duplicates, verify each napi version can actaully be built
if (package_json.binary && package_json.binary.napi_versions) {
package_json.binary.napi_versions.forEach((napi_version) => {
const duplicated = napi_build_versions.indexOf(napi_version) !== -1;
if (!duplicated && supported_napi_version && napi_version <= supported_napi_version) {
napi_build_versions.push(napi_version);
} else if (warnings && !duplicated && supported_napi_version) {
log.info('This Node instance does not support builds for Node-API version', napi_version);
}
});
}
if (opts && opts['build-latest-napi-version-only']) {
let latest_version = 0;
napi_build_versions.forEach((napi_version) => {
if (napi_version > latest_version) latest_version = napi_version;
});
napi_build_versions = latest_version ? [latest_version] : [];
}
return napi_build_versions.length ? napi_build_versions : undefined;
};
module.exports.get_napi_build_versions_raw = function(package_json) {
const napi_build_versions = [];
// remove duplicates
if (package_json.binary && package_json.binary.napi_versions) {
package_json.binary.napi_versions.forEach((napi_version) => {
if (napi_build_versions.indexOf(napi_version) === -1) {
napi_build_versions.push(napi_version);
}
});
}
return napi_build_versions.length ? napi_build_versions : undefined;
};
module.exports.get_command_arg = function(napi_build_version) {
return napi_build_version_tag + napi_build_version;
};
module.exports.get_napi_build_version_from_command_args = function(command_args) {
for (let i = 0; i < command_args.length; i++) {
const arg = command_args[i];
if (arg.indexOf(napi_build_version_tag) === 0) {
return parseInt(arg.substr(napi_build_version_tag.length), 10);
}
}
return undefined;
};
module.exports.swap_build_dir_out = function(napi_build_version) {
if (napi_build_version) {
const rm = require('rimraf');
rm.sync(module.exports.get_build_dir(napi_build_version));
fs.renameSync('build', module.exports.get_build_dir(napi_build_version));
}
};
module.exports.swap_build_dir_in = function(napi_build_version) {
if (napi_build_version) {
const rm = require('rimraf');
rm.sync('build');
fs.renameSync(module.exports.get_build_dir(napi_build_version), 'build');
}
};
module.exports.get_build_dir = function(napi_build_version) {
return 'build-tmp-napi-v' + napi_build_version;
};
module.exports.get_best_napi_build_version = function(package_json, opts) {
let best_napi_build_version = 0;
const napi_build_versions = module.exports.get_napi_build_versions(package_json, opts);
if (napi_build_versions) {
const our_napi_version = module.exports.get_napi_version(opts ? opts.target : undefined);
napi_build_versions.forEach((napi_build_version)=> {
if (napi_build_version > best_napi_build_version &&
napi_build_version <= our_napi_version) {
best_napi_build_version = napi_build_version;
}
});
}
return best_napi_build_version === 0 ? undefined : best_napi_build_version;
};
module.exports.build_napi_only = function(package_json) {
return package_json.binary && package_json.binary.package_name &&
package_json.binary.package_name.indexOf('{node_napi_label}') === -1;
}; | PypiClean |
/OASYS1-ESRF-Extensions-0.0.69.tar.gz/OASYS1-ESRF-Extensions-0.0.69/orangecontrib/esrf/wofry/util/thin_object_corrector.py | import numpy
from oasys.util.oasys_util import write_surface_file
from orangecontrib.esrf.wofry.util.thin_object import WOThinObject, WOThinObject1D #TODO from wofryimpl....
from wofry.beamline.decorators import OpticalElementDecorator
class WOThinObjectCorrector(WOThinObject, OpticalElementDecorator):
def __init__(self, name="Undefined",
file_with_thickness_mesh="",
material="",
refraction_index_delta=1e-07,
att_coefficient=0.0,
correction_method=1,
focus_at=10.0,
wall_thickness=0.0,
apply_correction_to_wavefront=0,
file_with_thickness_mesh_flag=0,
):
super().__init__(name=name,
file_with_thickness_mesh=file_with_thickness_mesh,
material=material,
)
self._correction_method = correction_method
self._focus_at = focus_at
self._wall_thickness = wall_thickness
self._apply_correction_to_wavefront = apply_correction_to_wavefront
self._file_with_thickness_mesh_flag = file_with_thickness_mesh_flag
self._refraction_index_delta = refraction_index_delta
self._att_coefficient = att_coefficient
def calculate_correction_profile(self, wavefront):
photon_energy = wavefront.get_photon_energy()
x = wavefront.get_coordinate_x()
y = wavefront.get_coordinate_y()
if self._correction_method == 0: # write file with zero profile
profile = numpy.zeros((x.size, y.size))
elif self._correction_method == 1: # focus to waist
print("\n\n\n ========== parameters from optical element : ")
print(self.info())
refraction_index_delta, att_coefficient = self.get_refraction_index(photon_energy)
# auxiliar spherical wavefront
wavefront_model = wavefront.duplicate()
wavefront_model.set_spherical_wave(radius=-self._focus_at, complex_amplitude=1.0,)
phase_correction = numpy.angle( wavefront_model.get_complex_amplitude() / wavefront.get_complex_amplitude())
profile = -phase_correction / wavefront.get_wavenumber() / refraction_index_delta
profile += self._wall_thickness
if self._file_with_thickness_mesh_flag:
write_surface_file(profile.T, x, y, self.get_file_with_thickness_mesh(), overwrite=True)
print("\nFile for OASYS " + self.get_file_with_thickness_mesh() + " written to disk.")
# for info
# H profile
n = profile.shape[0]
one_over_fraction_in_length = 10
w = n // (2 * one_over_fraction_in_length)
profile_line = profile[:, w]
xx = x[(n // 2 - w):(n // 2 + w)]
yy = profile_line[(n // 2 - w):(n // 2 + w)]
yder = numpy.gradient(yy, xx)
coeff = numpy.polyfit(xx, yder, 1)
print("\n\n\n ========== fitted radius in the H profile center (over 1/%d of length): " % one_over_fraction_in_length)
print("fitted lens (with two curved sides) of radius = %g m " % (2 / coeff[0]))
print("which corresponds to a focal length of %g m " % (1 / coeff[0] / refraction_index_delta))
# V profile
n = profile.shape[1]
one_over_fraction_in_length = 10
w = n // (2 * one_over_fraction_in_length)
profile_line = profile[w, :]
xx = y[(n // 2 - w):(n // 2 + w)]
yy = profile_line[(n // 2 - w):(n // 2 + w)]
yder = numpy.gradient(yy, xx)
coeff = numpy.polyfit(xx, yder, 1)
print("\n\n\n ========== fitted radius in the V profile center (over 1/%d of length): " % one_over_fraction_in_length)
print("fitted lens (with two curved sides) of radius = %g m " % (2 / coeff[0]))
print("which corresponds to a focal length of %g m " % (1 / coeff[0] / refraction_index_delta))
return profile, x, y
def applyOpticalElement(self, wavefront, parameters=None, element_index=None):
profile, x, y = self.calculate_correction_profile(wavefront)
if self._apply_correction_to_wavefront > 0:
#TODO change this....
output_wavefront = super().applyOpticalElement(wavefront, parameters=parameters, element_index=element_index)
else:
output_wavefront = wavefront
return output_wavefront
def to_python_code(self, data=None):
txt = ""
txt += "\nfrom orangecontrib.esrf.wofry.util.thin_object_corrector import WOThinObjectCorrector #TODO update"
txt += "\n"
txt += "\noptical_element = WOThinObjectCorrector("
txt += "\n name='%s'," % self.get_name()
txt += "\n file_with_thickness_mesh_flag=%d," % self._file_with_thickness_mesh_flag
txt += "\n file_with_thickness_mesh='%s'," % self.get_file_with_thickness_mesh()
txt += "\n material='%s'," % self.get_material()
if self.get_material() == "External":
txt += "\n refraction_index_delta=%g," % self._refraction_index_delta
txt += "\n att_coefficient=%g," % self._att_coefficient
txt += "\n focus_at=%g," % self._focus_at
txt += "\n wall_thickness=%g," % self._wall_thickness
txt += "\n apply_correction_to_wavefront=%d)" % self._apply_correction_to_wavefront
txt += "\n"
return txt
class WOThinObjectCorrector1D(WOThinObject1D, OpticalElementDecorator):
def __init__(self, name="Undefined",
file_with_thickness_mesh="",
material="",
refraction_index_delta=1e-07,
att_coefficient=0.0,
correction_method=1,
focus_at=10.0,
wall_thickness=0.0,
apply_correction_to_wavefront=0,
file_with_thickness_mesh_flag=0,
fit_fraction_in_length=0.1,
fit_filename="",
):
super().__init__(name=name,
file_with_thickness_mesh=file_with_thickness_mesh,
material=material,
refraction_index_delta=refraction_index_delta,
att_coefficient=att_coefficient,
)
self._correction_method = correction_method
self._focus_at = focus_at
self._wall_thickness = wall_thickness
self._apply_correction_to_wavefront = apply_correction_to_wavefront
self._file_with_thickness_mesh_flag = file_with_thickness_mesh_flag
self._fit_fraction_in_length = fit_fraction_in_length
self._fit_filename = fit_filename
def calculate_correction_profile(self, wavefront):
photon_energy = wavefront.get_photon_energy()
x = wavefront.get_abscissas()
if self._correction_method == 0: # write file with zero profile
profile = numpy.zeros_like(x)
profile += self._wall_thickness
elif self._correction_method == 1: # focus to waist
print("\n\n\n ========== parameters from optical element : ")
print(self.info())
refraction_index_delta, att_coefficient = self.get_refraction_index(photon_energy)
# auxiliar spherical wavefront
target_wavefront = wavefront.duplicate()
target_wavefront.set_spherical_wave(radius=-self._focus_at, complex_amplitude=1.0, )
phase_input = wavefront.get_phase(unwrap=True)
phase_target = target_wavefront.get_phase(unwrap=True)
phase_correction = phase_target - phase_input
profile = - phase_correction / (wavefront.get_wavenumber() * refraction_index_delta)
profile -= profile.min()
profile += self._wall_thickness
if self._file_with_thickness_mesh_flag:
f = open(self.get_file_with_thickness_mesh(), 'w')
for i in range(x.size):
f.write("%g %g\n" % (x[i], profile[i]))
f.close()
print("\nFile 1D for OASYS " + self.get_file_with_thickness_mesh() + " written to disk.")
# for info
n = profile.size
fraction_in_length = self._fit_fraction_in_length
w = int((n * fraction_in_length) / 2)
if w <= 1: w = 1
xx = x[(n // 2 - w):(n // 2 + w)]
yy = profile[(n // 2 - w):(n // 2 + w)]
yder = numpy.gradient(yy, xx)
coeff = numpy.polyfit(xx, yder, 1)
print("\n\n\n ========== fitted radius in the profile center (over %g of length): " % fraction_in_length)
print("fitted lens (with two curved sides) of radius = %g m " % (2 / coeff[0]))
print("which corresponds to a focal length of %g m " % (1 / coeff[0] / refraction_index_delta))
if self._fit_filename != "":
f = open(self._fit_filename, 'w')
f.write("# ========== fitted radius in the profile center (over %g of length): \n" % fraction_in_length)
f.write("# fitted lens (with two curved sides) of radius = %g m \n" % (2 / coeff[0]))
f.write("# which corresponds to a focal length of %g m \n" % (1 / coeff[0] / refraction_index_delta))
f.write("%g\n" % (2 / coeff[0]))
f.write("%g\n" % (1 / coeff[0] / refraction_index_delta))
f.close()
print("File %s written to disk." % self._fit_filename)
return x, profile
def applyOpticalElement(self, wavefront, parameters=None, element_index=None):
x, profile = self.calculate_correction_profile(wavefront)
refraction_index_delta, att_coefficient = self.get_refraction_index(wavefront.get_photon_energy())
if self._apply_correction_to_wavefront > 0:
amp_factors = numpy.exp(-1.0 * att_coefficient * profile / 2) # factor of 2 because it is amplitude
phase_shifts = -1.0 * wavefront.get_wavenumber() * refraction_index_delta * profile
output_wavefront = wavefront.duplicate()
output_wavefront.rescale_amplitudes(amp_factors)
output_wavefront.add_phase_shifts(phase_shifts)
else:
output_wavefront = wavefront
return output_wavefront
def to_python_code(self, data=None):
txt = ""
txt += "\nfrom orangecontrib.esrf.wofry.util.thin_object_corrector import WOThinObjectCorrector1D #TODO update"
txt += "\n"
txt += "\noptical_element = WOThinObjectCorrector1D("
txt += "\n name='%s'," % self.get_name()
txt += "\n file_with_thickness_mesh_flag=%d," % self._file_with_thickness_mesh_flag
txt += "\n file_with_thickness_mesh='%s'," % self.get_file_with_thickness_mesh()
txt += "\n material='%s'," % self.get_material()
if self.get_material() == "External":
txt += "\n refraction_index_delta=%g," % self._refraction_index_delta
txt += "\n att_coefficient=%g," % self._att_coefficient
txt += "\n focus_at=%g," % self._focus_at
txt += "\n wall_thickness=%g," % self._wall_thickness
txt += "\n apply_correction_to_wavefront=%d," % self._apply_correction_to_wavefront
txt += "\n fit_fraction_in_length=%g," % self._fit_fraction_in_length
txt += "\n fit_filename='%s')" % self._fit_filename
txt += "\n"
return txt | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/core/operations.py | from ..utilities import ordered
from .basic import _aresame
from .cache import cacheit
from .evaluate import global_evaluate
from .expr import Expr
from .sympify import sympify
class AssocOp(Expr):
"""Associative operations, can separate noncommutative and
commutative parts.
(a op b) op c == a op (b op c) == a op b op c.
Base class for Add and Mul.
This is an abstract base class, concrete derived classes must define
the attribute `identity`.
"""
@cacheit
def __new__(cls, *args, **options):
from ..series import Order
args = [sympify(a, strict=True) for a in args]
if not options.pop('evaluate', global_evaluate[0]):
return cls._from_args(args)
else:
args = [a for a in args if a is not cls.identity]
if len(args) == 0:
return cls.identity
if len(args) == 1:
return args[0]
c_part, nc_part, order_symbols = cls.flatten(args)
obj = cls._from_args(c_part + nc_part)
if order_symbols is not None:
return Order(obj, *order_symbols) # pylint: disable=not-an-iterable
return obj
@classmethod
def _from_args(cls, args):
"""Create new instance with already-processed args."""
if len(args) == 0:
return cls.identity
elif len(args) == 1:
return args[0]
return super().__new__(cls, *args)
def _new_rawargs(self, *args, **kwargs):
"""Create new instance of own class with args exactly as provided by
caller but returning the self class identity if args is empty.
This is handy when we want to optimize things, e.g.
>>> e = Mul(3, x, y)
>>> e.args
(3, x, y)
>>> Mul(*e.args[1:])
x*y
>>> e._new_rawargs(*e.args[1:]) # the same as above, but faster
x*y
Note: use this with caution. There is no checking of arguments at
all. This is best used when you are rebuilding an Add or Mul after
simply removing one or more terms. If modification which result,
for example, in extra 1s being inserted (as when collecting an
expression's numerators and denominators) they will not show up in
the result but a Mul will be returned nonetheless:
>>> m = (x*y)._new_rawargs(Integer(1), x)
>>> m
x
>>> m == x
False
>>> m.is_Mul
True
Another issue to be aware of is that the commutativity of the result
is based on the commutativity of self. If you are rebuilding the
terms that came from a commutative object then there will be no
problem, but if self was non-commutative then what you are
rebuilding may now be commutative.
Although this routine tries to do as little as possible with the
input, getting the commutativity right is important, so this level
of safety is enforced: commutativity will always be recomputed if
self is non-commutative and kwarg `reeval=False` has not been
passed.
"""
return self._from_args(args)
@classmethod
def flatten(cls, seq):
"""Return seq so that none of the elements are of type `cls`.
This is the vanilla routine that will be used if a class derived
from AssocOp does not define its own flatten routine.
"""
# apply associativity, no commutativity property is used
new_seq = []
for o in seq:
if o.__class__ is cls: # classes must match exactly
seq.extend(o.args)
else:
new_seq.append(o)
return [], new_seq, None # c_part, nc_part, order_symbols
def _matches_commutative(self, expr, repl_dict={}):
"""
Matches Add/Mul "pattern" to an expression "expr".
repl_dict ... a dictionary of (wild: expression) pairs, that get
returned with the results
This function is the main workhorse for Add/Mul.
For instance:
>>> a = Wild('a')
>>> b = Wild('b')
>>> c = Wild('c')
>>> (a + sin(b)*c)._matches_commutative(x + sin(y)*z)
{a_: x, b_: y, c_: z}
In the example above, "a+sin(b)*c" is the pattern, and "x+sin(y)*z" is
the expression.
The repl_dict contains parts that were already matched. For example
here:
>>> (x + sin(b)*c)._matches_commutative(x + sin(y)*z, repl_dict={a: x})
{a_: x, b_: y, c_: z}
the only function of the repl_dict is to return it in the
result, e.g. if you omit it:
>>> (x + sin(b)*c)._matches_commutative(x + sin(y)*z)
{b_: y, c_: z}
the "a: x" is not returned in the result, but otherwise it is
equivalent.
"""
# make sure expr is Expr if pattern is Expr
from .expr import Add, Expr
from .mul import Mul
if isinstance(self, Expr) and not isinstance(expr, Expr):
return
# handle simple patterns
if self == expr:
return repl_dict
d = self._matches_simple(expr, repl_dict)
if d is not None:
return d
# eliminate exact part from pattern: (2+a+w1+w2)._matches(expr) -> (w1+w2)._matches(expr-a-2)
from .function import WildFunction
from .symbol import Wild
wild_part = []
exact_part = []
for p in ordered(self.args):
if p.has(Wild, WildFunction) and (not expr.has(p)):
# not all Wild should stay Wilds, for example:
# (w2+w3)._matches(w1) -> (w1+w3)._matches(w1) -> w3._matches(0)
wild_part.append(p)
else:
exact_part.append(p)
if exact_part:
exact = self.func(*exact_part)
free = expr.free_symbols
if free and (exact.free_symbols - free):
# there are symbols in the exact part that are not
# in the expr; but if there are no free symbols, let
# the matching continue
return
newpattern = self.func(*wild_part)
newexpr = self._combine_inverse(expr, exact)
if all(isinstance(e, AssocOp) for e in [expr, newexpr]):
if newexpr.count_ops() > expr.count_ops():
return
return newpattern._matches(newexpr, repl_dict)
# now to real work ;)
i = 0
saw = set()
while expr not in saw:
saw.add(expr)
expr_list = (self.identity,) + tuple(ordered(self.make_args(expr)))
for last_op in reversed(expr_list):
for w in reversed(wild_part):
d1 = w._matches(last_op, repl_dict)
if d1 is not None:
d2 = self.xreplace(d1)._matches(expr, d1)
if d2 is not None:
return d2
if i == 0:
if self.is_Mul:
# make e**i look like Mul
if expr.is_Pow and expr.exp.is_Integer:
if expr.exp > 0:
expr = Mul(*[expr.base, expr.base**(expr.exp - 1)], evaluate=False)
else:
expr = Mul(*[1/expr.base, expr.base**(expr.exp + 1)], evaluate=False)
i += 1
continue
elif self.is_Add:
# make i*e look like Add
c, e = expr.as_coeff_Mul()
if abs(c) > 1:
if c > 0:
expr = Add(*[e, (c - 1)*e], evaluate=False)
else:
expr = Add(*[-e, (c + 1)*e], evaluate=False)
i += 1
continue
# try collection on non-Wild symbols
from ..simplify.radsimp import collect
was = expr
did = set()
for w in reversed(wild_part):
c, w = w.as_coeff_mul(Wild)
free = c.free_symbols - did
if free:
did.update(free)
expr = collect(expr, free)
if expr != was:
i += 0
continue
else:
raise NotImplementedError
break # if we didn't continue, there is nothing more to do
def _has_matcher(self):
"""Helper for .has()."""
def _ncsplit(expr):
# this is not the same as args_cnc because here
# we don't assume expr is a Mul -- hence deal with args --
# and always return a set.
cpart, ncpart = [], []
for arg in expr.args:
if arg.is_commutative:
cpart.append(arg)
else:
ncpart.append(arg)
return set(cpart), ncpart
c, nc = _ncsplit(self)
cls = self.__class__
def is_in(expr):
if expr == self:
return True
elif isinstance(expr, cls):
_c, _nc = _ncsplit(expr)
if (c & _c) == c:
if not nc:
return True
elif len(nc) <= len(_nc):
for i in range(len(_nc) - len(nc)):
if _nc[i:i + len(nc)] == nc:
return True
return False
return is_in
def _eval_evalf(self, prec):
"""
Evaluate the parts of self that are numbers; if the whole thing
was a number with no functions it would have been evaluated, but
it wasn't so we must judiciously extract the numbers and reconstruct
the object. This is *not* simply replacing numbers with evaluated
numbers. Nunmbers should be handled in the largest pure-number
expression as possible. So the code below separates ``self`` into
number and non-number parts and evaluates the number parts and
walks the args of the non-number part recursively (doing the same
thing).
"""
from .add import Add
from .function import AppliedUndef
from .mul import Mul
from .symbol import Symbol
if isinstance(self, (Mul, Add)):
x, tail = self.as_independent(Symbol, AppliedUndef)
# if x is an AssocOp Function then the _evalf below will
# call _eval_evalf (here) so we must break the recursion
if not (tail is self.identity or
isinstance(x, AssocOp) and x.is_Function):
# here, we have a number so we just call to _evalf with prec;
# prec is not the same as n, it is the binary precision so
# that's why we don't call to evalf.
x = x._evalf(prec) if x is not self.identity else self.identity
args = []
for a in self.func.make_args(tail):
# here we call to _eval_evalf since we don't know what we
# are dealing with and all other _eval_evalf routines should
# be doing the same thing (i.e. taking binary prec and
# finding the evalf-able args)
newa = a._eval_evalf(prec)
if newa is None:
args.append(a)
else:
args.append(newa)
if not _aresame(tuple(args), self.func.make_args(tail)):
tail = self.func(*args)
return self.func(x, tail)
# this is the same as above, but there were no pure-number args to
# deal with
args = []
for a in self.args:
newa = a.evalf(prec, strict=False)
args.append(newa)
if not _aresame(tuple(args), self.args):
return self.func(*args)
return self
@classmethod
def make_args(cls, expr):
"""
Return a sequence of elements `args` such that cls(*args) == expr
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y))
{y, x*y}
"""
if isinstance(expr, cls):
return expr.args
else:
return expr,
class ShortCircuit(Exception):
"""Helper exception to detect absorbing element among arguments."""
class LatticeOp(AssocOp):
"""
Join/meet operations of an algebraic lattice[1].
These binary operations are associative (op(op(a, b), c) = op(a, op(b, c))),
commutative (op(a, b) = op(b, a)) and idempotent (op(a, a) = op(a) = a).
Common examples are AND, OR, Union, Intersection, max or min. They have an
identity element (op(identity, a) = a) and an absorbing element
conventionally called zero (op(zero, a) = zero).
This is an abstract base class, concrete derived classes must declare
attributes zero and identity. All defining properties are then respected.
>>> class MyJoin(LatticeOp):
... zero = Integer(0)
... identity = Integer(1)
>>> MyJoin(2, 3) == MyJoin(3, 2)
True
>>> MyJoin(2, MyJoin(3, 4)) == MyJoin(2, 3, 4)
True
>>> MyJoin(0, 1, 4, 2, 3, 4)
0
>>> MyJoin(1, 2)
2
References
==========
* https://en.wikipedia.org/wiki/Lattice_%28order%29
"""
is_commutative = True
def __new__(cls, *args, **options):
args = (sympify(arg, strict=True) for arg in args)
if options.pop('evaluate', global_evaluate[0]):
try:
_args = frozenset(cls._new_args_filter(args))
except ShortCircuit:
return sympify(cls.zero)
if not _args:
return sympify(cls.identity)
elif len(_args) == 1:
return set(_args).pop()
else:
_args = frozenset(args)
obj = super(AssocOp, cls).__new__(cls, _args) # pylint: disable=bad-super-call
obj._argset = _args
return obj
@classmethod
def _new_args_filter(cls, arg_sequence, call_cls=None):
"""Generator filtering args."""
ncls = call_cls or cls
for arg in arg_sequence:
if arg == ncls.zero:
raise ShortCircuit(arg)
if arg == ncls.identity:
continue
if arg.func == ncls:
for x in arg.args:
yield x
else:
yield arg
@classmethod
def make_args(cls, expr):
"""
Return a sequence of elements `args` such that cls(*args) == expr
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y))
{y, x*y}
"""
if isinstance(expr, cls):
return expr._argset
else:
return frozenset([expr])
@property # type: ignore[misc]
@cacheit
def args(self):
return tuple(ordered(self._argset)) | PypiClean |
/Agatsuma-0.2.176.default.3499b00918ca.tip.tar.gz/Agatsuma-0.2.176.default.3499b00918ca.tip/agatsuma/third_party/dictconfig.py |
import logging.handlers
import re
import sys
import types
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError, te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError, e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError, e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError, e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError, te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError, e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure() | PypiClean |
/ChemDataExtractor-IDE-1.3.2.tar.gz/ChemDataExtractor-IDE-1.3.2/chemdataextractor/doc/meta.py | from .element import BaseElement
import logging
log = logging.getLogger(__name__)
class MetaData(BaseElement):
def __init__(self, data):
super(MetaData, self).__init__()
self._data = data
self._title = None
self._authors = None
self._publisher = None
self._journal = None
self._volume = None
self._issue = None
self._firstpage = None
self._lastpage = None
self._doi = None
self._date = None
self._language = None
self._pdf_url = None
self._html_url = None
for key, value in data.items():
setattr(self, key, value)
def __repr__(self):
return {k: v for k, v in self.data.items() if v}.__str__()
@property
def records(self):
return []
def serialize(self):
return {k: v for k, v in self.data.items() if v}
@property
def title(self):
"""The article title"""
return self._title
@property
def authors(self):
"""The article Authors
type:: list()
"""
return self._authors
@property
def publisher(self):
"""The source publisher"""
return self._publisher
@property
def journal(self):
"""The source journal"""
return self._journal
@property
def volume(self):
"""The source volume"""
return self._volume
@property
def issue(self):
"""The source issue"""
return self._issue
@property
def firstpage(self):
"""The source first page title"""
return self._firstpage
@property
def lastpage(self):
"""The source last page"""
return self._lastpage
@property
def doi(self):
"""The source DOI"""
return self._doi
@property
def pdf_url(self):
"""The source url to the PDF version"""
return self._pdf_url
@property
def html_url(self):
"""The source url to the HTML version"""
return self._html_url
@property
def date(self):
"""The source publish date"""
return self._date
@property
def data(self):
"""Returns all data as a dict()"""
return {k.lstrip('_'): v for k, v in self._data.items()}
@property
def abbreviation_definitions(self):
return []
@property
def definitions(self):
return [] | PypiClean |
/ApiRequestManager-1.0.5-py3-none-any.whl/src/Pipelines.py | from datetime import datetime
import requests
import time
from abc import ABC, abstractmethod
from src.RequestFactory import RequestFactory
class GenericPipeline(ABC):
"""Abstract Pipeline class
All Pipeline class must inherit from this class
methods read, process and write needs to be override in the subclass
"""
_data = None
def load_data(self, data):
"""Check if data is an iterable and load data in self._data attribute
if data argument hasn't __iter__ method implemented,
ValueError is raised
"""
if hasattr(data, '__iter__'):
self._data = data
else:
raise ValueError("PyPipeline data must be a Generator or a Sequence(implement __iter__ method)")
@abstractmethod
def read(self, entry):
"""called in first for each element of the 'data' loaded (to parse)
Arguments:
entry:
a data element that is passed through this function in run_pipe method
"""
pass
@abstractmethod
def process(self, entry):
"""called in second for each element of the 'data' loaded (to process transformations)
Arguments:
entry:
a data element that is passed through this function in run_pipe method
"""
pass
@abstractmethod
def write(self, entry_pack):
"""called in third for groups of elements of the 'data' loaded (to write it in base for example)
Arguments:
entry_pack:
a group of data element that is passed through this function in run_pipe method
"""
pass
def run_pipe(self, transaction_rate=None):
"""method to call to execute the pipe
Arguments:
transaction_rate(Optional):
Integer.
Provides the number of data elements that need to be write together
with the write method
Put it to 1(one) to write after each element process
if transaction_rate number is higher than data length, write method
is executed once for all data elements at the end
if transaction_rate number is None(Not specified) write method is called
once a the end of the pipe
"""
# vide le cache d'erreur
if hasattr(self, '_err_log'):
self._err_log = []
if transaction_rate is not None:
count = 0
data_storage = []
for entry in self._data:
data_fragment = self.read(entry)
data_fragment = self.process(data_fragment)
if data_fragment is not None:
data_storage.append(data_fragment)
count += 1
if count == transaction_rate:
self.write(data_storage)
count = 0
data_storage = []
if data_storage:
self.write(data_storage)
else:
data_storage = []
for entry in self._data:
data_fragment = self.read(entry)
data_fragment = self.process(data_fragment)
if data_fragment is not None:
data_storage.append(data_fragment)
if data_storage:
self.write(data_storage)
class ApiPipeline(GenericPipeline, ABC):
""" Abstract ApiPipeline
All ApiPipeline class must inherit from this class
methods read, process and write needs to be override in the subclass
Arguments:
request_factory(Required):
RequestFactory instance (see the doc).
A RequestFactory instance that will create all requests of the pipe
sleeping_time(Optional):
Float.
If api calls need to be delayed, add the time in seconds you
want that pipe sleep after each request to 'sleeping_time' argument
"""
request_factory = None
_err_log = []
@property
def err_log(self):
""" List of errors occured during Pipe
Log objects are 4-tuple like
("entry", "status_code_if_there_is", "datetime", "typeError")
Errors catched are requests.exceptions.ConnectionError, Timeout, and HttpError
"""
return [(str(err[0]), err[1], err[2], err[3]) for err in self._err_log]
def err_params_log(self):
"""return error logs parameters to rerun the pipe with failed requests"""
return [err[0].get_request_params() for err in self._err_log]
def __init__(self, request_factory: RequestFactory, sleeping_time: float = None):
if not isinstance(request_factory, RequestFactory):
raise ValueError("request_factory argument needs to be an instance of RequestFactory")
self.request_factory = request_factory
self._sleeping_time = sleeping_time
def read(self, entry):
"""wrap request parameters in the requestFactory
create a request with a data element passed in argument
and the requestFactory
Data elements are not validated!
data element need to be a 2-tuple (end_url:string, params:dict)
Arguments:
entry:
a data element that is passed through this function in run_pipe method
a correct data element for api call is
("the end of the url", {"param_name":"param_val"})
or
("the end of the url", None) if there is no params
or
(None, None) if there is no params and no end_url
"""
read = self.request_factory(*entry)
return read
def process(self, entry):
"""execute the requests created by read() method and sleep if needed
if an error Occurs during request execution an log object is added to
err_log argument
Log objects are 4-tuple like
("entry", "status_code_if_there_is", "datetime", "typeError")
Errors catched are requests.exceptions.ConnectionError, Timeout, and HttpError
Arguments:
entry:
a request element that is passed through this function in run_pipe method
check read() method documentation
"""
start_time = time.time()
try:
result = entry.get_response()
except requests.exceptions.ConnectionError as e:
self._err_log.append((entry, None, datetime.now(), "ConnectionError"), )
result = None
except requests.exceptions.Timeout as e:
self._err_log.append((entry, None, datetime.now(), "TimeOut"), )
result = None
try:
result.raise_for_status()
except requests.exceptions.HTTPError as e:
self._err_log.append((entry, result.status_code, datetime.now(), "HttpError"),)
result = None
if self._sleeping_time is not None and result is not None:
run_time = time.time() - start_time
if run_time < self._sleeping_time:
time.sleep(self._sleeping_time - run_time)
return result
def __eq__(self, other):
"""Pipe with same request factorys are equals"""
return self.request_factory == other.request_factory
def __hash__(self):
"""Pipe with same request fatorys have same hash"""
return hash(self.request_factory)
def __repr__(self):
return f"{self.__class__.__name__}(%r, %r)" % (self.request_factory, self._sleeping_time)
@abstractmethod
def write(self, entry_pack):
"""called in third for groups of elements of the 'data' loaded (to write it in base for example)
You need to override this method. Provide the behavior you want for this data after the processing
Arguments:
entry_pack:
a group of requests_results that is passed through this function in run_pipe method
"""
pass | PypiClean |
/MJOLNIR-1.3.1.tar.gz/MJOLNIR-1.3.1/test/Instrument.py | from MJOLNIR.Geometry.Instrument import Instrument,prediction
import MJOLNIR.Geometry.Analyser as Analyser
import MJOLNIR.Geometry.Detector as Detector
import MJOLNIR.Geometry.Wedge as Wedge
from MJOLNIR.Data import Sample
import pytest
import numpy as np
import warnings
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import os
dataPath = 'samlpedata'
def test_Instrument_init():
Instr = Instrument()
assert(np.all(Instr.position==(0,0,0)))
Det = Detector.Detector(position=(1.0,1,0),direction=(1,0,0))
Ana = Analyser.Analyser(position=(0.5,0,0),direction=(1,0,1))
wedge = Wedge.Wedge(detectors=[Det,Det],analysers=Ana)
Instr.wedges=[wedge,wedge]
assert(Instr.settings['Initialized']==False)
def test_Instrument_error():
try:
Instr = Instrument(fileName='wrongDummyFile.bin')
assert False
except ValueError:
assert True
Instr = Instrument()
Ana = Analyser.FlatAnalyser(position=(0.5,0,0),direction=(1,0,1))
try:
Instr.wedges=Ana
assert False
except AttributeError:
assert True
try:
Instr.wedges=[Ana,Ana]
assert False
except AttributeError:
assert True
try:
Instr.append("Wrong object type")
assert False
except AttributeError:
assert True
try:
Instr.append(["List of",3.0,"wrong objects"])
assert False
except AttributeError:
assert True
try:
Instr.settings = {'Name','New dictionary'}
assert False
except NotImplementedError:
return True
def test_Instrument_warnings():
Instr = Instrument()
wedge = Wedge.Wedge(position=(0.5,0,0))
Instr.wedges = wedge
with warnings.catch_warnings(record=True) as w: # From https://docs.python.org/3.1/library/warnings.html
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
Instr.wedges = wedge
# Verify some things
assert len(w) == 1
assert issubclass(w[0].category, UserWarning)
assert 'The list of wedges is not empty! Appending new wedges(s)' in str(w[0].message)
def test_Instrument_append():
Instr = Instrument()
wedge = Wedge.Wedge(position=(0.5,0,0))
Instr.append([wedge,wedge])
Instr.append(wedge)
assert(len(Instr.wedges)==3)
def test_Instrument_plot():
Instr = Instrument()
wedge = Wedge.Wedge(position=(0.5,0,0))
Det = Detector.TubeDetector1D(position=(1.0,1,0),direction=(1,0,0))
Ana = Analyser.FlatAnalyser(position=(0.5,0,0),direction=(1,0,1))
wedge.append([Det,Ana])
Instr.append(wedge)
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
Instr.plot(ax)
def test_Instrument_Setting():
Instr = Instrument()
Instr.settings['SettingVersion']=1.0
assert(Instr.settings['SettingVersion']==1.0)
def test_Instrument_Initialization():
Instr = Instrument()
wedge = Wedge.Wedge(position=(0.5,0,0),concept='ManyToMany')
pixels=33
split = [12]
Det = Detector.TubeDetector1D(position=(1.0,1,0),direction=(1,0,0),pixels=pixels,split=split)
Ana = Analyser.FlatAnalyser(position=(0.5,0,0),direction=(1,0,1))
wedge.append([Det,Det,Ana,Ana,Ana])
try:
Instr.initialize()
assert False
except ValueError:
assert True
try:
print(Instr.A4)
assert False
except RuntimeError:
assert True
try:
print(Instr.Ef)
assert False
except RuntimeError:
assert True
Instr.append(wedge)
try:
Instr.initialize()
assert False
except ValueError:
assert True
Instr.wedges[0].detectors[0].split = [0,12,20,pixels]
Instr.initialize()
assert(len(Instr.A4)==1)
assert(len(Instr.A4[0])==2)
assert(len(Instr.A4[0][0])==pixels)
assert(len(Instr.A4)==len(Instr.Ef))
assert(len(Instr.A4[0])==len(Instr.Ef[0]))
assert(len(Instr.A4[0][0])==len(Instr.Ef[0][0]))
assert(Instr.settings['Initialized']==True)
try:
Instr.A4 = []
assert False
except NotImplementedError:
assert True
try:
Instr.Ef = []
assert False
except NotImplementedError:
assert True
def test_Instrument_saveload():
import os
Instr = Instrument(position=(0,1,0))
Instr2 = Instrument()
wedge = Wedge.Wedge(position=(0.5,0,0))
Det = Detector.TubeDetector1D(position=(1.0,1,0),direction=(1,0,0))
Ana = Analyser.FlatAnalyser(position=(0.5,0,0),direction=(1,0,1))
wedge.append([Det,Ana])
Instr.append(wedge)
tempFile = 'temp.bin'
Instr.save(tempFile)
Instr2.load(tempFile)
os.remove(tempFile)
assert(Instr==Instr2)
def test_parseXML(): # Improve this test!
tempFileName = '__temp__.xml'
Instr = Instrument()
Instr.settings['Author'] = 'Jakob Lass'
wedge = Wedge.Wedge(position=(0.5,0,0))
Det = Detector.TubeDetector1D(position=(1.0,1,0),direction=(1,0,0))
Ana = Analyser.FlatAnalyser(position=(0.5,0,0),direction=(1,0,1))
wedge.append([Det,Ana])
Instr.append([wedge,wedge])
Instr.append(wedge)
Instr.saveXML(tempFileName)
InstrLoaded = Instrument(fileName=tempFileName)
os.remove(tempFileName)
assert(Instr==InstrLoaded)
def test_XML_errors():
fileString = ""
fileString+="<?xml version='1.0'?>"
fileString+="<Instrument Initialized='False' Author='Jakob Lass' Date ='16/03/18' position='0.0,0.0,0.0'>"
fileString+="<Wedge position='0.0,0.0,0.0' concept='ManyToMany'>"
fileString+="<FlatAnalyser direction='0.707,0.0,0.707' d_spacing='3.35' mosaicity='60' width='0.05' height='0.1'></FlatAnalyser>"
fileString+="<TubeDetector1D position='1.198,0.0580,0.71' direction='0.998,0.04841,0.0' pixels='456' length='0.883' diameter='0.02' split='57, 114, 171, 228, 285, 342, 399'></TubeDetector1D>"
fileString+="</Wedge>"
fileString+="</Instrument>"
temp_file = 'Tempfile.xml'
f = open(temp_file,'w')
f.write(fileString)
f.close()
try:
Instr = Instrument(fileName=temp_file)
del Instr
assert False
except ValueError:
assert True
fileString = ""
fileString+="<?xml version='1.0'?>"
fileString+="<Instrument Initialized='False' Author='Jakob Lass' Date ='16/03/18' position='0.0,0.0,0.0'>"
fileString+="<Wedge position='0.0,0.0,0.0' concept='ManyToMany'>"
fileString+="<FlatAnalyser position='0.0580,0.71' direction='0.707,0.0,0.707' d_spacing='3.35' mosaicity='60' width='0.05' height='0.1'></FlatAnalyser>"
fileString+="<TubeDetector1D position='1.198,0.0580,0.71' direction='0.998,0.04841,0.0' pixels='456' length='0.883' diameter='0.02' split='57, 114, 171, 228, 285, 342, 399'></TubeDetector1D>"
fileString+="</Wedge>"
fileString+="</Instrument>"
f = open(temp_file,'w')
f.write(fileString)
f.close()
try:
Instr = Instrument(fileName=temp_file)
assert False
except AttributeError:
assert True
fileString = ""
fileString+="<?xml version='1.0'?>"
fileString+="<Instrument Initialized='False' Author='Jakob Lass' Date ='16/03/18' position='0.0,0.0,0.0'>"
fileString+="<FlatAnalyser position='0.0,0.0,0.0' concept='ManyToMany'>"
fileString+="<FlatAnalyser position='0.0580,0.71' direction='0.707,0.0,0.707' d_spacing='3.35' mosaicity='60' width='0.05' height='0.1'></FlatAnalyser>"
fileString+="<TubeDetector1D position='1.198,0.0580,0.71' direction='0.998,0.04841,0.0' pixels='456' length='0.883' diameter='0.02' split='57, 114, 171, 228, 285, 342, 399'></TubeDetector1D>"
fileString+="</FlatAnalyser>"
fileString+="</Instrument>"
f = open(temp_file,'w')
f.write(fileString)
f.close()
try:
Instr = Instrument(fileName=temp_file)
assert False
except ValueError:
assert True
os.remove(temp_file)
def test_instrument_string_dummy(): # Todo: Improve test!
Instr = Instrument()
string = str(Instr)
del string
assert True
def test_instrument_create_xml():
Instr = Instrument()
filename = 'temp'
Instr.generateCAMEAXML(filename)
Instr2 = Instrument(fileName=filename+'.xml')
os.remove(filename+'.xml')
assert(len(Instr2.wedges)==8)
@pytest.mark.unit
def test_Normalization_tables(quick):
Instr = Instrument(fileName=os.path.join('Data','CAMEA_Updated.xml'))
Instr.initialize()
NF = os.path.join(dataPath,'camea2023n000083.hdf')
#AF = 'TestData/1024/A4Normalization.h5'
try:
Instr.generateCalibration(Vanadiumdatafile=NF ,savelocation=os.path.join(dataPath,''),plot=False,tables=[]) # No binning specified
assert False
except AttributeError:
assert True
try:
Instr.generateCalibration(Vanadiumdatafile=NF ,savelocation=os.path.join(dataPath,''),plot=False,tables=['Nothing?']) # Wrong binning
assert False
except AttributeError:
assert True
if not quick==True:
Instr.generateCalibration(Vanadiumdatafile=NF, savelocation=os.path.join(dataPath,''),plot=False,tables=[1,3,8],sampleMass=4.7)
else:
Instr.generateCalibration(Vanadiumdatafile=NF ,savelocation=os.path.join(dataPath,''),plot=False,tables=[1],sampleMass=4.7)
def test_Prediction():
A3Start = 0.0
A3Stop = 100
A3Steps = 101
Ei = 5.0
A4 = [-36,-40]
points = False
# [H,K,L,A3,A4,0.0,0.0,Ei,Ef]
HKL1 = np.array([1,0,0])
HKL2 = np.array([0,0,1])
A3R1 = 25.0
A3R2 = 115.0
#r1 = np.array([1,0,0,25.0,-24,0.0,0.0,Ei,Ei])
#r2 = np.array([0,0,1,115.0,-24,0.0,0.0,Ei,Ei])
cell = np.array([6.0,6.0,6.0,90.0,90.0,90.0])
sample = Sample.calculateSample(cell,HKL1,HKL2,A3R1=A3R1,A3R2=A3R2, Ei=Ei,Ef=Ei)
plt.ion()
ax = prediction(A3Start=A3Start,A3Stop=A3Stop,A3Steps=A3Steps,A4Positions=A4,Ei=Ei,sample=sample,
points=points, instrument='CAMEA')
ax = prediction(A3Start=A3Start,A3Stop=A3Stop,A3Steps=A3Steps,A4Positions=A4,Ei=Ei,sample=sample,
points=points, instrument='MultiFLEXX')
ax = prediction(A3Start=A3Start,A3Stop=A3Stop,A3Steps=A3Steps,A4Positions=A4,Ei=Ei,sample=sample,
points=points, instrument='Bambus') | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/AppCenter/simpleui/static/admin/simpleui-x/elementui/locale/lang/el.js | 'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'Εντάξει',
clear: 'Καθαρισμός'
},
datepicker: {
now: 'Τώρα',
today: 'Σήμερα',
cancel: 'Ακύρωση',
clear: 'Καθαρισμός',
confirm: 'Εντάξει',
selectDate: 'Επιλέξτε ημέρα',
selectTime: 'Επιλέξτε ώρα',
startDate: 'Ημερομηνία Έναρξης',
startTime: 'Ωρα Έναρξης',
endDate: 'Ημερομηνία Λήξης',
endTime: 'Ωρα Λήξης',
prevYear: 'Προηγούμενο Έτος',
nextYear: 'Επόμενο Έτος',
prevMonth: 'Προηγούμενος Μήνας',
nextMonth: 'Επόμενος Μήνας',
year: 'Έτος',
month1: 'Ιανουάριος',
month2: 'Φεβρουάριος',
month3: 'Μάρτιος',
month4: 'Απρίλιος',
month5: 'Μάιος',
month6: 'Ιούνιος',
month7: 'Ιούλιος',
month8: 'Αύγουστος',
month9: 'Σεπτέμβριος',
month10: 'Οκτώβριος',
month11: 'Νοέμβριος',
month12: 'Δεκέμβριος',
// week: 'εβδομάδα',
weeks: {
sun: 'Κυρ',
mon: 'Δευ',
tue: 'Τρι',
wed: 'Τετ',
thu: 'Πεμ',
fri: 'Παρ',
sat: 'Σαβ'
},
months: {
jan: 'Ιαν',
feb: 'Φεβ',
mar: 'Μαρ',
apr: 'Απρ',
may: 'Μαϊ',
jun: 'Ιουν',
jul: 'Ιουλ',
aug: 'Αυγ',
sep: 'Σεπ',
oct: 'Οκτ',
nov: 'Νοε',
dec: 'Δεκ'
}
},
select: {
loading: 'Φόρτωση',
noMatch: 'Δεν βρέθηκαν αποτελέσματα',
noData: 'Χωρίς δεδομένα',
placeholder: 'Επιλογή'
},
cascader: {
noMatch: 'Δεν βρέθηκαν αποτελέσματα',
loading: 'Φόρτωση',
placeholder: 'Επιλογή',
noData: 'Χωρίς δεδομένα'
},
pagination: {
goto: 'Μετάβαση σε',
pagesize: '/σελίδα',
total: 'Σύνολο {total}',
pageClassifier: ''
},
messagebox: {
title: 'Μήνυμα',
confirm: 'Εντάξει',
cancel: 'Ακύρωση',
error: 'Άκυρη εισαγωγή'
},
upload: {
deleteTip: 'Πάτησε Διαγραφή για αφαίρεση',
delete: 'Διαγραφή',
preview: 'Προεπισκόπηση',
continue: 'Συνέχεια'
},
table: {
emptyText: 'Χωρίς Δεδομένα',
confirmFilter: 'Επιβεβαίωση',
resetFilter: 'Επαναφορά',
clearFilter: 'Όλα',
sumText: 'Σύνολο'
},
tree: {
emptyText: 'Χωρίς Δεδομένα'
},
transfer: {
noMatch: 'Δεν βρέθηκαν αποτελέσματα',
noData: 'Χωρίς δεδομένα',
titles: ['Λίστα 1', 'Λίστα 2'],
filterPlaceholder: 'Αναζήτηση',
noCheckedFormat: '{total} Αντικείμενα',
hasCheckedFormat: '{checked}/{total} επιλεγμένα'
},
image: {
error: 'FAILED' // to be translated
},
pageHeader: {
title: 'Back' // to be translated
}
}
}; | PypiClean |
/HyperKitty-1.3.7.tar.gz/HyperKitty-1.3.7/hyperkitty/static/hyperkitty/libs/bootstrap/javascripts/bootstrap.bundle.min.js | !function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery")):"function"==typeof define&&define.amd?define(["exports","jquery"],e):e((t=t||self).bootstrap={},t.jQuery)}(this,function(t,p){"use strict";function i(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function s(t,e,n){return e&&i(t.prototype,e),n&&i(t,n),t}function l(o){for(var t=1;t<arguments.length;t++){var r=null!=arguments[t]?arguments[t]:{},e=Object.keys(r);"function"==typeof Object.getOwnPropertySymbols&&(e=e.concat(Object.getOwnPropertySymbols(r).filter(function(t){return Object.getOwnPropertyDescriptor(r,t).enumerable}))),e.forEach(function(t){var e,n,i;e=o,i=r[n=t],n in e?Object.defineProperty(e,n,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[n]=i})}return o}p=p&&p.hasOwnProperty("default")?p.default:p;var e="transitionend";function n(t){var e=this,n=!1;return p(this).one(m.TRANSITION_END,function(){n=!0}),setTimeout(function(){n||m.triggerTransitionEnd(e)},t),this}var m={TRANSITION_END:"bsTransitionEnd",getUID:function(t){for(;t+=~~(1e6*Math.random()),document.getElementById(t););return t},getSelectorFromElement:function(t){var e=t.getAttribute("data-target");if(!e||"#"===e){var n=t.getAttribute("href");e=n&&"#"!==n?n.trim():""}try{return document.querySelector(e)?e:null}catch(t){return null}},getTransitionDurationFromElement:function(t){if(!t)return 0;var e=p(t).css("transition-duration"),n=p(t).css("transition-delay"),i=parseFloat(e),o=parseFloat(n);return i||o?(e=e.split(",")[0],n=n.split(",")[0],1e3*(parseFloat(e)+parseFloat(n))):0},reflow:function(t){return t.offsetHeight},triggerTransitionEnd:function(t){p(t).trigger(e)},supportsTransitionEnd:function(){return Boolean(e)},isElement:function(t){return(t[0]||t).nodeType},typeCheckConfig:function(t,e,n){for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var o=n[i],r=e[i],s=r&&m.isElement(r)?"element":(a=r,{}.toString.call(a).match(/\s([a-z]+)/i)[1].toLowerCase());if(!new RegExp(o).test(s))throw new Error(t.toUpperCase()+': Option "'+i+'" provided type "'+s+'" but expected type "'+o+'".')}var a},findShadowRoot:function(t){if(!document.documentElement.attachShadow)return null;if("function"!=typeof t.getRootNode)return t instanceof ShadowRoot?t:t.parentNode?m.findShadowRoot(t.parentNode):null;var e=t.getRootNode();return e instanceof ShadowRoot?e:null}};p.fn.emulateTransitionEnd=n,p.event.special[m.TRANSITION_END]={bindType:e,delegateType:e,handle:function(t){if(p(t.target).is(this))return t.handleObj.handler.apply(this,arguments)}};var o="alert",r="bs.alert",a="."+r,c=p.fn[o],h={CLOSE:"close"+a,CLOSED:"closed"+a,CLICK_DATA_API:"click"+a+".data-api"},u="alert",f="fade",d="show",g=function(){function i(t){this._element=t}var t=i.prototype;return t.close=function(t){var e=this._element;t&&(e=this._getRootElement(t)),this._triggerCloseEvent(e).isDefaultPrevented()||this._removeElement(e)},t.dispose=function(){p.removeData(this._element,r),this._element=null},t._getRootElement=function(t){var e=m.getSelectorFromElement(t),n=!1;return e&&(n=document.querySelector(e)),n||(n=p(t).closest("."+u)[0]),n},t._triggerCloseEvent=function(t){var e=p.Event(h.CLOSE);return p(t).trigger(e),e},t._removeElement=function(e){var n=this;if(p(e).removeClass(d),p(e).hasClass(f)){var t=m.getTransitionDurationFromElement(e);p(e).one(m.TRANSITION_END,function(t){return n._destroyElement(e,t)}).emulateTransitionEnd(t)}else this._destroyElement(e)},t._destroyElement=function(t){p(t).detach().trigger(h.CLOSED).remove()},i._jQueryInterface=function(n){return this.each(function(){var t=p(this),e=t.data(r);e||(e=new i(this),t.data(r,e)),"close"===n&&e[n](this)})},i._handleDismiss=function(e){return function(t){t&&t.preventDefault(),e.close(this)}},s(i,null,[{key:"VERSION",get:function(){return"4.3.1"}}]),i}();p(document).on(h.CLICK_DATA_API,'[data-dismiss="alert"]',g._handleDismiss(new g)),p.fn[o]=g._jQueryInterface,p.fn[o].Constructor=g,p.fn[o].noConflict=function(){return p.fn[o]=c,g._jQueryInterface};var _="button",v="bs.button",y="."+v,E=".data-api",b=p.fn[_],w="active",C="btn",T="focus",S='[data-toggle^="button"]',D='[data-toggle="buttons"]',I='input:not([type="hidden"])',A=".active",O=".btn",N={CLICK_DATA_API:"click"+y+E,FOCUS_BLUR_DATA_API:"focus"+y+E+" blur"+y+E},k=function(){function n(t){this._element=t}var t=n.prototype;return t.toggle=function(){var t=!0,e=!0,n=p(this._element).closest(D)[0];if(n){var i=this._element.querySelector(I);if(i){if("radio"===i.type)if(i.checked&&this._element.classList.contains(w))t=!1;else{var o=n.querySelector(A);o&&p(o).removeClass(w)}if(t){if(i.hasAttribute("disabled")||n.hasAttribute("disabled")||i.classList.contains("disabled")||n.classList.contains("disabled"))return;i.checked=!this._element.classList.contains(w),p(i).trigger("change")}i.focus(),e=!1}}e&&this._element.setAttribute("aria-pressed",!this._element.classList.contains(w)),t&&p(this._element).toggleClass(w)},t.dispose=function(){p.removeData(this._element,v),this._element=null},n._jQueryInterface=function(e){return this.each(function(){var t=p(this).data(v);t||(t=new n(this),p(this).data(v,t)),"toggle"===e&&t[e]()})},s(n,null,[{key:"VERSION",get:function(){return"4.3.1"}}]),n}();p(document).on(N.CLICK_DATA_API,S,function(t){t.preventDefault();var e=t.target;p(e).hasClass(C)||(e=p(e).closest(O)),k._jQueryInterface.call(p(e),"toggle")}).on(N.FOCUS_BLUR_DATA_API,S,function(t){var e=p(t.target).closest(O)[0];p(e).toggleClass(T,/^focus(in)?$/.test(t.type))}),p.fn[_]=k._jQueryInterface,p.fn[_].Constructor=k,p.fn[_].noConflict=function(){return p.fn[_]=b,k._jQueryInterface};var L="carousel",x="bs.carousel",P="."+x,H=".data-api",j=p.fn[L],R={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0,touch:!0},F={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean",touch:"boolean"},M="next",W="prev",U="left",B="right",q={SLIDE:"slide"+P,SLID:"slid"+P,KEYDOWN:"keydown"+P,MOUSEENTER:"mouseenter"+P,MOUSELEAVE:"mouseleave"+P,TOUCHSTART:"touchstart"+P,TOUCHMOVE:"touchmove"+P,TOUCHEND:"touchend"+P,POINTERDOWN:"pointerdown"+P,POINTERUP:"pointerup"+P,DRAG_START:"dragstart"+P,LOAD_DATA_API:"load"+P+H,CLICK_DATA_API:"click"+P+H},K="carousel",Q="active",V="slide",Y="carousel-item-right",z="carousel-item-left",X="carousel-item-next",G="carousel-item-prev",$="pointer-event",J=".active",Z=".active.carousel-item",tt=".carousel-item",et=".carousel-item img",nt=".carousel-item-next, .carousel-item-prev",it=".carousel-indicators",ot="[data-slide], [data-slide-to]",rt='[data-ride="carousel"]',st={TOUCH:"touch",PEN:"pen"},at=function(){function r(t,e){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this.touchStartX=0,this.touchDeltaX=0,this._config=this._getConfig(e),this._element=t,this._indicatorsElement=this._element.querySelector(it),this._touchSupported="ontouchstart"in document.documentElement||0<navigator.maxTouchPoints,this._pointerEvent=Boolean(window.PointerEvent||window.MSPointerEvent),this._addEventListeners()}var t=r.prototype;return t.next=function(){this._isSliding||this._slide(M)},t.nextWhenVisible=function(){!document.hidden&&p(this._element).is(":visible")&&"hidden"!==p(this._element).css("visibility")&&this.next()},t.prev=function(){this._isSliding||this._slide(W)},t.pause=function(t){t||(this._isPaused=!0),this._element.querySelector(nt)&&(m.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},t.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},t.to=function(t){var e=this;this._activeElement=this._element.querySelector(Z);var n=this._getItemIndex(this._activeElement);if(!(t>this._items.length-1||t<0))if(this._isSliding)p(this._element).one(q.SLID,function(){return e.to(t)});else{if(n===t)return this.pause(),void this.cycle();var i=n<t?M:W;this._slide(i,this._items[t])}},t.dispose=function(){p(this._element).off(P),p.removeData(this._element,x),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},t._getConfig=function(t){return t=l({},R,t),m.typeCheckConfig(L,t,F),t},t._handleSwipe=function(){var t=Math.abs(this.touchDeltaX);if(!(t<=40)){var e=t/this.touchDeltaX;0<e&&this.prev(),e<0&&this.next()}},t._addEventListeners=function(){var e=this;this._config.keyboard&&p(this._element).on(q.KEYDOWN,function(t){return e._keydown(t)}),"hover"===this._config.pause&&p(this._element).on(q.MOUSEENTER,function(t){return e.pause(t)}).on(q.MOUSELEAVE,function(t){return e.cycle(t)}),this._config.touch&&this._addTouchEventListeners()},t._addTouchEventListeners=function(){var n=this;if(this._touchSupported){var e=function(t){n._pointerEvent&&st[t.originalEvent.pointerType.toUpperCase()]?n.touchStartX=t.originalEvent.clientX:n._pointerEvent||(n.touchStartX=t.originalEvent.touches[0].clientX)},i=function(t){n._pointerEvent&&st[t.originalEvent.pointerType.toUpperCase()]&&(n.touchDeltaX=t.originalEvent.clientX-n.touchStartX),n._handleSwipe(),"hover"===n._config.pause&&(n.pause(),n.touchTimeout&&clearTimeout(n.touchTimeout),n.touchTimeout=setTimeout(function(t){return n.cycle(t)},500+n._config.interval))};p(this._element.querySelectorAll(et)).on(q.DRAG_START,function(t){return t.preventDefault()}),this._pointerEvent?(p(this._element).on(q.POINTERDOWN,function(t){return e(t)}),p(this._element).on(q.POINTERUP,function(t){return i(t)}),this._element.classList.add($)):(p(this._element).on(q.TOUCHSTART,function(t){return e(t)}),p(this._element).on(q.TOUCHMOVE,function(t){var e;(e=t).originalEvent.touches&&1<e.originalEvent.touches.length?n.touchDeltaX=0:n.touchDeltaX=e.originalEvent.touches[0].clientX-n.touchStartX}),p(this._element).on(q.TOUCHEND,function(t){return i(t)}))}},t._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case 37:t.preventDefault(),this.prev();break;case 39:t.preventDefault(),this.next()}},t._getItemIndex=function(t){return this._items=t&&t.parentNode?[].slice.call(t.parentNode.querySelectorAll(tt)):[],this._items.indexOf(t)},t._getItemByDirection=function(t,e){var n=t===M,i=t===W,o=this._getItemIndex(e),r=this._items.length-1;if((i&&0===o||n&&o===r)&&!this._config.wrap)return e;var s=(o+(t===W?-1:1))%this._items.length;return-1===s?this._items[this._items.length-1]:this._items[s]},t._triggerSlideEvent=function(t,e){var n=this._getItemIndex(t),i=this._getItemIndex(this._element.querySelector(Z)),o=p.Event(q.SLIDE,{relatedTarget:t,direction:e,from:i,to:n});return p(this._element).trigger(o),o},t._setActiveIndicatorElement=function(t){if(this._indicatorsElement){var e=[].slice.call(this._indicatorsElement.querySelectorAll(J));p(e).removeClass(Q);var n=this._indicatorsElement.children[this._getItemIndex(t)];n&&p(n).addClass(Q)}},t._slide=function(t,e){var n,i,o,r=this,s=this._element.querySelector(Z),a=this._getItemIndex(s),l=e||s&&this._getItemByDirection(t,s),c=this._getItemIndex(l),h=Boolean(this._interval);if(o=t===M?(n=z,i=X,U):(n=Y,i=G,B),l&&p(l).hasClass(Q))this._isSliding=!1;else if(!this._triggerSlideEvent(l,o).isDefaultPrevented()&&s&&l){this._isSliding=!0,h&&this.pause(),this._setActiveIndicatorElement(l);var u=p.Event(q.SLID,{relatedTarget:l,direction:o,from:a,to:c});if(p(this._element).hasClass(V)){p(l).addClass(i),m.reflow(l),p(s).addClass(n),p(l).addClass(n);var f=parseInt(l.getAttribute("data-interval"),10);this._config.interval=f?(this._config.defaultInterval=this._config.defaultInterval||this._config.interval,f):this._config.defaultInterval||this._config.interval;var d=m.getTransitionDurationFromElement(s);p(s).one(m.TRANSITION_END,function(){p(l).removeClass(n+" "+i).addClass(Q),p(s).removeClass(Q+" "+i+" "+n),r._isSliding=!1,setTimeout(function(){return p(r._element).trigger(u)},0)}).emulateTransitionEnd(d)}else p(s).removeClass(Q),p(l).addClass(Q),this._isSliding=!1,p(this._element).trigger(u);h&&this.cycle()}},r._jQueryInterface=function(i){return this.each(function(){var t=p(this).data(x),e=l({},R,p(this).data());"object"==typeof i&&(e=l({},e,i));var n="string"==typeof i?i:e.slide;if(t||(t=new r(this,e),p(this).data(x,t)),"number"==typeof i)t.to(i);else if("string"==typeof n){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}else e.interval&&e.ride&&(t.pause(),t.cycle())})},r._dataApiClickHandler=function(t){var e=m.getSelectorFromElement(this);if(e){var n=p(e)[0];if(n&&p(n).hasClass(K)){var i=l({},p(n).data(),p(this).data()),o=this.getAttribute("data-slide-to");o&&(i.interval=!1),r._jQueryInterface.call(p(n),i),o&&p(n).data(x).to(o),t.preventDefault()}}},s(r,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return R}}]),r}();p(document).on(q.CLICK_DATA_API,ot,at._dataApiClickHandler),p(window).on(q.LOAD_DATA_API,function(){for(var t=[].slice.call(document.querySelectorAll(rt)),e=0,n=t.length;e<n;e++){var i=p(t[e]);at._jQueryInterface.call(i,i.data())}}),p.fn[L]=at._jQueryInterface,p.fn[L].Constructor=at,p.fn[L].noConflict=function(){return p.fn[L]=j,at._jQueryInterface};var lt="collapse",ct="bs.collapse",ht="."+ct,ut=p.fn[lt],ft={toggle:!0,parent:""},dt={toggle:"boolean",parent:"(string|element)"},pt={SHOW:"show"+ht,SHOWN:"shown"+ht,HIDE:"hide"+ht,HIDDEN:"hidden"+ht,CLICK_DATA_API:"click"+ht+".data-api"},mt="show",gt="collapse",_t="collapsing",vt="collapsed",yt="width",Et="height",bt=".show, .collapsing",wt='[data-toggle="collapse"]',Ct=function(){function a(e,t){this._isTransitioning=!1,this._element=e,this._config=this._getConfig(t),this._triggerArray=[].slice.call(document.querySelectorAll('[data-toggle="collapse"][href="#'+e.id+'"],[data-toggle="collapse"][data-target="#'+e.id+'"]'));for(var n=[].slice.call(document.querySelectorAll(wt)),i=0,o=n.length;i<o;i++){var r=n[i],s=m.getSelectorFromElement(r),a=[].slice.call(document.querySelectorAll(s)).filter(function(t){return t===e});null!==s&&0<a.length&&(this._selector=s,this._triggerArray.push(r))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var t=a.prototype;return t.toggle=function(){p(this._element).hasClass(mt)?this.hide():this.show()},t.show=function(){var t,e,n=this;if(!this._isTransitioning&&!p(this._element).hasClass(mt)&&(this._parent&&0===(t=[].slice.call(this._parent.querySelectorAll(bt)).filter(function(t){return"string"==typeof n._config.parent?t.getAttribute("data-parent")===n._config.parent:t.classList.contains(gt)})).length&&(t=null),!(t&&(e=p(t).not(this._selector).data(ct))&&e._isTransitioning))){var i=p.Event(pt.SHOW);if(p(this._element).trigger(i),!i.isDefaultPrevented()){t&&(a._jQueryInterface.call(p(t).not(this._selector),"hide"),e||p(t).data(ct,null));var o=this._getDimension();p(this._element).removeClass(gt).addClass(_t),this._element.style[o]=0,this._triggerArray.length&&p(this._triggerArray).removeClass(vt).attr("aria-expanded",!0),this.setTransitioning(!0);var r="scroll"+(o[0].toUpperCase()+o.slice(1)),s=m.getTransitionDurationFromElement(this._element);p(this._element).one(m.TRANSITION_END,function(){p(n._element).removeClass(_t).addClass(gt).addClass(mt),n._element.style[o]="",n.setTransitioning(!1),p(n._element).trigger(pt.SHOWN)}).emulateTransitionEnd(s),this._element.style[o]=this._element[r]+"px"}}},t.hide=function(){var t=this;if(!this._isTransitioning&&p(this._element).hasClass(mt)){var e=p.Event(pt.HIDE);if(p(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._getDimension();this._element.style[n]=this._element.getBoundingClientRect()[n]+"px",m.reflow(this._element),p(this._element).addClass(_t).removeClass(gt).removeClass(mt);var i=this._triggerArray.length;if(0<i)for(var o=0;o<i;o++){var r=this._triggerArray[o],s=m.getSelectorFromElement(r);if(null!==s)p([].slice.call(document.querySelectorAll(s))).hasClass(mt)||p(r).addClass(vt).attr("aria-expanded",!1)}this.setTransitioning(!0);this._element.style[n]="";var a=m.getTransitionDurationFromElement(this._element);p(this._element).one(m.TRANSITION_END,function(){t.setTransitioning(!1),p(t._element).removeClass(_t).addClass(gt).trigger(pt.HIDDEN)}).emulateTransitionEnd(a)}}},t.setTransitioning=function(t){this._isTransitioning=t},t.dispose=function(){p.removeData(this._element,ct),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},t._getConfig=function(t){return(t=l({},ft,t)).toggle=Boolean(t.toggle),m.typeCheckConfig(lt,t,dt),t},t._getDimension=function(){return p(this._element).hasClass(yt)?yt:Et},t._getParent=function(){var t,n=this;m.isElement(this._config.parent)?(t=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(t=this._config.parent[0])):t=document.querySelector(this._config.parent);var e='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]',i=[].slice.call(t.querySelectorAll(e));return p(i).each(function(t,e){n._addAriaAndCollapsedClass(a._getTargetFromElement(e),[e])}),t},t._addAriaAndCollapsedClass=function(t,e){var n=p(t).hasClass(mt);e.length&&p(e).toggleClass(vt,!n).attr("aria-expanded",n)},a._getTargetFromElement=function(t){var e=m.getSelectorFromElement(t);return e?document.querySelector(e):null},a._jQueryInterface=function(i){return this.each(function(){var t=p(this),e=t.data(ct),n=l({},ft,t.data(),"object"==typeof i&&i?i:{});if(!e&&n.toggle&&/show|hide/.test(i)&&(n.toggle=!1),e||(e=new a(this,n),t.data(ct,e)),"string"==typeof i){if("undefined"==typeof e[i])throw new TypeError('No method named "'+i+'"');e[i]()}})},s(a,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return ft}}]),a}();p(document).on(pt.CLICK_DATA_API,wt,function(t){"A"===t.currentTarget.tagName&&t.preventDefault();var n=p(this),e=m.getSelectorFromElement(this),i=[].slice.call(document.querySelectorAll(e));p(i).each(function(){var t=p(this),e=t.data(ct)?"toggle":n.data();Ct._jQueryInterface.call(t,e)})}),p.fn[lt]=Ct._jQueryInterface,p.fn[lt].Constructor=Ct,p.fn[lt].noConflict=function(){return p.fn[lt]=ut,Ct._jQueryInterface};for(var Tt="undefined"!=typeof window&&"undefined"!=typeof document,St=["Edge","Trident","Firefox"],Dt=0,It=0;It<St.length;It+=1)if(Tt&&0<=navigator.userAgent.indexOf(St[It])){Dt=1;break}var At=Tt&&window.Promise?function(t){var e=!1;return function(){e||(e=!0,window.Promise.resolve().then(function(){e=!1,t()}))}}:function(t){var e=!1;return function(){e||(e=!0,setTimeout(function(){e=!1,t()},Dt))}};function Ot(t){return t&&"[object Function]"==={}.toString.call(t)}function Nt(t,e){if(1!==t.nodeType)return[];var n=t.ownerDocument.defaultView.getComputedStyle(t,null);return e?n[e]:n}function kt(t){return"HTML"===t.nodeName?t:t.parentNode||t.host}function Lt(t){if(!t)return document.body;switch(t.nodeName){case"HTML":case"BODY":return t.ownerDocument.body;case"#document":return t.body}var e=Nt(t),n=e.overflow,i=e.overflowX,o=e.overflowY;return/(auto|scroll|overlay)/.test(n+o+i)?t:Lt(kt(t))}var xt=Tt&&!(!window.MSInputMethodContext||!document.documentMode),Pt=Tt&&/MSIE 10/.test(navigator.userAgent);function Ht(t){return 11===t?xt:10===t?Pt:xt||Pt}function jt(t){if(!t)return document.documentElement;for(var e=Ht(10)?document.body:null,n=t.offsetParent||null;n===e&&t.nextElementSibling;)n=(t=t.nextElementSibling).offsetParent;var i=n&&n.nodeName;return i&&"BODY"!==i&&"HTML"!==i?-1!==["TH","TD","TABLE"].indexOf(n.nodeName)&&"static"===Nt(n,"position")?jt(n):n:t?t.ownerDocument.documentElement:document.documentElement}function Rt(t){return null!==t.parentNode?Rt(t.parentNode):t}function Ft(t,e){if(!(t&&t.nodeType&&e&&e.nodeType))return document.documentElement;var n=t.compareDocumentPosition(e)&Node.DOCUMENT_POSITION_FOLLOWING,i=n?t:e,o=n?e:t,r=document.createRange();r.setStart(i,0),r.setEnd(o,0);var s,a,l=r.commonAncestorContainer;if(t!==l&&e!==l||i.contains(o))return"BODY"===(a=(s=l).nodeName)||"HTML"!==a&&jt(s.firstElementChild)!==s?jt(l):l;var c=Rt(t);return c.host?Ft(c.host,e):Ft(t,Rt(e).host)}function Mt(t){var e="top"===(1<arguments.length&&void 0!==arguments[1]?arguments[1]:"top")?"scrollTop":"scrollLeft",n=t.nodeName;if("BODY"!==n&&"HTML"!==n)return t[e];var i=t.ownerDocument.documentElement;return(t.ownerDocument.scrollingElement||i)[e]}function Wt(t,e){var n="x"===e?"Left":"Top",i="Left"===n?"Right":"Bottom";return parseFloat(t["border"+n+"Width"],10)+parseFloat(t["border"+i+"Width"],10)}function Ut(t,e,n,i){return Math.max(e["offset"+t],e["scroll"+t],n["client"+t],n["offset"+t],n["scroll"+t],Ht(10)?parseInt(n["offset"+t])+parseInt(i["margin"+("Height"===t?"Top":"Left")])+parseInt(i["margin"+("Height"===t?"Bottom":"Right")]):0)}function Bt(t){var e=t.body,n=t.documentElement,i=Ht(10)&&getComputedStyle(n);return{height:Ut("Height",e,n,i),width:Ut("Width",e,n,i)}}var qt=function(){function i(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}return function(t,e,n){return e&&i(t.prototype,e),n&&i(t,n),t}}(),Kt=function(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t},Qt=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(t[i]=n[i])}return t};function Vt(t){return Qt({},t,{right:t.left+t.width,bottom:t.top+t.height})}function Yt(t){var e={};try{if(Ht(10)){e=t.getBoundingClientRect();var n=Mt(t,"top"),i=Mt(t,"left");e.top+=n,e.left+=i,e.bottom+=n,e.right+=i}else e=t.getBoundingClientRect()}catch(t){}var o={left:e.left,top:e.top,width:e.right-e.left,height:e.bottom-e.top},r="HTML"===t.nodeName?Bt(t.ownerDocument):{},s=r.width||t.clientWidth||o.right-o.left,a=r.height||t.clientHeight||o.bottom-o.top,l=t.offsetWidth-s,c=t.offsetHeight-a;if(l||c){var h=Nt(t);l-=Wt(h,"x"),c-=Wt(h,"y"),o.width-=l,o.height-=c}return Vt(o)}function zt(t,e){var n=2<arguments.length&&void 0!==arguments[2]&&arguments[2],i=Ht(10),o="HTML"===e.nodeName,r=Yt(t),s=Yt(e),a=Lt(t),l=Nt(e),c=parseFloat(l.borderTopWidth,10),h=parseFloat(l.borderLeftWidth,10);n&&o&&(s.top=Math.max(s.top,0),s.left=Math.max(s.left,0));var u=Vt({top:r.top-s.top-c,left:r.left-s.left-h,width:r.width,height:r.height});if(u.marginTop=0,u.marginLeft=0,!i&&o){var f=parseFloat(l.marginTop,10),d=parseFloat(l.marginLeft,10);u.top-=c-f,u.bottom-=c-f,u.left-=h-d,u.right-=h-d,u.marginTop=f,u.marginLeft=d}return(i&&!n?e.contains(a):e===a&&"BODY"!==a.nodeName)&&(u=function(t,e){var n=2<arguments.length&&void 0!==arguments[2]&&arguments[2],i=Mt(e,"top"),o=Mt(e,"left"),r=n?-1:1;return t.top+=i*r,t.bottom+=i*r,t.left+=o*r,t.right+=o*r,t}(u,e)),u}function Xt(t){if(!t||!t.parentElement||Ht())return document.documentElement;for(var e=t.parentElement;e&&"none"===Nt(e,"transform");)e=e.parentElement;return e||document.documentElement}function Gt(t,e,n,i){var o=4<arguments.length&&void 0!==arguments[4]&&arguments[4],r={top:0,left:0},s=o?Xt(t):Ft(t,e);if("viewport"===i)r=function(t){var e=1<arguments.length&&void 0!==arguments[1]&&arguments[1],n=t.ownerDocument.documentElement,i=zt(t,n),o=Math.max(n.clientWidth,window.innerWidth||0),r=Math.max(n.clientHeight,window.innerHeight||0),s=e?0:Mt(n),a=e?0:Mt(n,"left");return Vt({top:s-i.top+i.marginTop,left:a-i.left+i.marginLeft,width:o,height:r})}(s,o);else{var a=void 0;"scrollParent"===i?"BODY"===(a=Lt(kt(e))).nodeName&&(a=t.ownerDocument.documentElement):a="window"===i?t.ownerDocument.documentElement:i;var l=zt(a,s,o);if("HTML"!==a.nodeName||function t(e){var n=e.nodeName;if("BODY"===n||"HTML"===n)return!1;if("fixed"===Nt(e,"position"))return!0;var i=kt(e);return!!i&&t(i)}(s))r=l;else{var c=Bt(t.ownerDocument),h=c.height,u=c.width;r.top+=l.top-l.marginTop,r.bottom=h+l.top,r.left+=l.left-l.marginLeft,r.right=u+l.left}}var f="number"==typeof(n=n||0);return r.left+=f?n:n.left||0,r.top+=f?n:n.top||0,r.right-=f?n:n.right||0,r.bottom-=f?n:n.bottom||0,r}function $t(t,e,i,n,o){var r=5<arguments.length&&void 0!==arguments[5]?arguments[5]:0;if(-1===t.indexOf("auto"))return t;var s=Gt(i,n,r,o),a={top:{width:s.width,height:e.top-s.top},right:{width:s.right-e.right,height:s.height},bottom:{width:s.width,height:s.bottom-e.bottom},left:{width:e.left-s.left,height:s.height}},l=Object.keys(a).map(function(t){return Qt({key:t},a[t],{area:(e=a[t],e.width*e.height)});var e}).sort(function(t,e){return e.area-t.area}),c=l.filter(function(t){var e=t.width,n=t.height;return e>=i.clientWidth&&n>=i.clientHeight}),h=0<c.length?c[0].key:l[0].key,u=t.split("-")[1];return h+(u?"-"+u:"")}function Jt(t,e,n){var i=3<arguments.length&&void 0!==arguments[3]?arguments[3]:null;return zt(n,i?Xt(e):Ft(e,n),i)}function Zt(t){var e=t.ownerDocument.defaultView.getComputedStyle(t),n=parseFloat(e.marginTop||0)+parseFloat(e.marginBottom||0),i=parseFloat(e.marginLeft||0)+parseFloat(e.marginRight||0);return{width:t.offsetWidth+i,height:t.offsetHeight+n}}function te(t){var e={left:"right",right:"left",bottom:"top",top:"bottom"};return t.replace(/left|right|bottom|top/g,function(t){return e[t]})}function ee(t,e,n){n=n.split("-")[0];var i=Zt(t),o={width:i.width,height:i.height},r=-1!==["right","left"].indexOf(n),s=r?"top":"left",a=r?"left":"top",l=r?"height":"width",c=r?"width":"height";return o[s]=e[s]+e[l]/2-i[l]/2,o[a]=n===a?e[a]-i[c]:e[te(a)],o}function ne(t,e){return Array.prototype.find?t.find(e):t.filter(e)[0]}function ie(t,n,e){return(void 0===e?t:t.slice(0,function(t,e,n){if(Array.prototype.findIndex)return t.findIndex(function(t){return t[e]===n});var i=ne(t,function(t){return t[e]===n});return t.indexOf(i)}(t,"name",e))).forEach(function(t){t.function&&console.warn("`modifier.function` is deprecated, use `modifier.fn`!");var e=t.function||t.fn;t.enabled&&Ot(e)&&(n.offsets.popper=Vt(n.offsets.popper),n.offsets.reference=Vt(n.offsets.reference),n=e(n,t))}),n}function oe(t,n){return t.some(function(t){var e=t.name;return t.enabled&&e===n})}function re(t){for(var e=[!1,"ms","Webkit","Moz","O"],n=t.charAt(0).toUpperCase()+t.slice(1),i=0;i<e.length;i++){var o=e[i],r=o?""+o+n:t;if("undefined"!=typeof document.body.style[r])return r}return null}function se(t){var e=t.ownerDocument;return e?e.defaultView:window}function ae(t,e,n,i){n.updateBound=i,se(t).addEventListener("resize",n.updateBound,{passive:!0});var o=Lt(t);return function t(e,n,i,o){var r="BODY"===e.nodeName,s=r?e.ownerDocument.defaultView:e;s.addEventListener(n,i,{passive:!0}),r||t(Lt(s.parentNode),n,i,o),o.push(s)}(o,"scroll",n.updateBound,n.scrollParents),n.scrollElement=o,n.eventsEnabled=!0,n}function le(){var t,e;this.state.eventsEnabled&&(cancelAnimationFrame(this.scheduleUpdate),this.state=(t=this.reference,e=this.state,se(t).removeEventListener("resize",e.updateBound),e.scrollParents.forEach(function(t){t.removeEventListener("scroll",e.updateBound)}),e.updateBound=null,e.scrollParents=[],e.scrollElement=null,e.eventsEnabled=!1,e))}function ce(t){return""!==t&&!isNaN(parseFloat(t))&&isFinite(t)}function he(n,i){Object.keys(i).forEach(function(t){var e="";-1!==["width","height","top","right","bottom","left"].indexOf(t)&&ce(i[t])&&(e="px"),n.style[t]=i[t]+e})}var ue=Tt&&/Firefox/i.test(navigator.userAgent);function fe(t,e,n){var i=ne(t,function(t){return t.name===e}),o=!!i&&t.some(function(t){return t.name===n&&t.enabled&&t.order<i.order});if(!o){var r="`"+e+"`",s="`"+n+"`";console.warn(s+" modifier is required by "+r+" modifier in order to work, be sure to include it before "+r+"!")}return o}var de=["auto-start","auto","auto-end","top-start","top","top-end","right-start","right","right-end","bottom-end","bottom","bottom-start","left-end","left","left-start"],pe=de.slice(3);function me(t){var e=1<arguments.length&&void 0!==arguments[1]&&arguments[1],n=pe.indexOf(t),i=pe.slice(n+1).concat(pe.slice(0,n));return e?i.reverse():i}var ge="flip",_e="clockwise",ve="counterclockwise";function ye(t,o,r,e){var s=[0,0],a=-1!==["right","left"].indexOf(e),n=t.split(/(\+|\-)/).map(function(t){return t.trim()}),i=n.indexOf(ne(n,function(t){return-1!==t.search(/,|\s/)}));n[i]&&-1===n[i].indexOf(",")&&console.warn("Offsets separated by white space(s) are deprecated, use a comma (,) instead.");var l=/\s*,\s*|\s+/,c=-1!==i?[n.slice(0,i).concat([n[i].split(l)[0]]),[n[i].split(l)[1]].concat(n.slice(i+1))]:[n];return(c=c.map(function(t,e){var n=(1===e?!a:a)?"height":"width",i=!1;return t.reduce(function(t,e){return""===t[t.length-1]&&-1!==["+","-"].indexOf(e)?(t[t.length-1]=e,i=!0,t):i?(t[t.length-1]+=e,i=!1,t):t.concat(e)},[]).map(function(t){return function(t,e,n,i){var o=t.match(/((?:\-|\+)?\d*\.?\d*)(.*)/),r=+o[1],s=o[2];if(!r)return t;if(0!==s.indexOf("%"))return"vh"!==s&&"vw"!==s?r:("vh"===s?Math.max(document.documentElement.clientHeight,window.innerHeight||0):Math.max(document.documentElement.clientWidth,window.innerWidth||0))/100*r;var a=void 0;switch(s){case"%p":a=n;break;case"%":case"%r":default:a=i}return Vt(a)[e]/100*r}(t,n,o,r)})})).forEach(function(n,i){n.forEach(function(t,e){ce(t)&&(s[i]+=t*("-"===n[e-1]?-1:1))})}),s}var Ee={placement:"bottom",positionFixed:!1,eventsEnabled:!0,removeOnDestroy:!1,onCreate:function(){},onUpdate:function(){},modifiers:{shift:{order:100,enabled:!0,fn:function(t){var e=t.placement,n=e.split("-")[0],i=e.split("-")[1];if(i){var o=t.offsets,r=o.reference,s=o.popper,a=-1!==["bottom","top"].indexOf(n),l=a?"left":"top",c=a?"width":"height",h={start:Kt({},l,r[l]),end:Kt({},l,r[l]+r[c]-s[c])};t.offsets.popper=Qt({},s,h[i])}return t}},offset:{order:200,enabled:!0,fn:function(t,e){var n=e.offset,i=t.placement,o=t.offsets,r=o.popper,s=o.reference,a=i.split("-")[0],l=void 0;return l=ce(+n)?[+n,0]:ye(n,r,s,a),"left"===a?(r.top+=l[0],r.left-=l[1]):"right"===a?(r.top+=l[0],r.left+=l[1]):"top"===a?(r.left+=l[0],r.top-=l[1]):"bottom"===a&&(r.left+=l[0],r.top+=l[1]),t.popper=r,t},offset:0},preventOverflow:{order:300,enabled:!0,fn:function(t,i){var e=i.boundariesElement||jt(t.instance.popper);t.instance.reference===e&&(e=jt(e));var n=re("transform"),o=t.instance.popper.style,r=o.top,s=o.left,a=o[n];o.top="",o.left="",o[n]="";var l=Gt(t.instance.popper,t.instance.reference,i.padding,e,t.positionFixed);o.top=r,o.left=s,o[n]=a,i.boundaries=l;var c=i.priority,h=t.offsets.popper,u={primary:function(t){var e=h[t];return h[t]<l[t]&&!i.escapeWithReference&&(e=Math.max(h[t],l[t])),Kt({},t,e)},secondary:function(t){var e="right"===t?"left":"top",n=h[e];return h[t]>l[t]&&!i.escapeWithReference&&(n=Math.min(h[e],l[t]-("right"===t?h.width:h.height))),Kt({},e,n)}};return c.forEach(function(t){var e=-1!==["left","top"].indexOf(t)?"primary":"secondary";h=Qt({},h,u[e](t))}),t.offsets.popper=h,t},priority:["left","right","top","bottom"],padding:5,boundariesElement:"scrollParent"},keepTogether:{order:400,enabled:!0,fn:function(t){var e=t.offsets,n=e.popper,i=e.reference,o=t.placement.split("-")[0],r=Math.floor,s=-1!==["top","bottom"].indexOf(o),a=s?"right":"bottom",l=s?"left":"top",c=s?"width":"height";return n[a]<r(i[l])&&(t.offsets.popper[l]=r(i[l])-n[c]),n[l]>r(i[a])&&(t.offsets.popper[l]=r(i[a])),t}},arrow:{order:500,enabled:!0,fn:function(t,e){var n;if(!fe(t.instance.modifiers,"arrow","keepTogether"))return t;var i=e.element;if("string"==typeof i){if(!(i=t.instance.popper.querySelector(i)))return t}else if(!t.instance.popper.contains(i))return console.warn("WARNING: `arrow.element` must be child of its popper element!"),t;var o=t.placement.split("-")[0],r=t.offsets,s=r.popper,a=r.reference,l=-1!==["left","right"].indexOf(o),c=l?"height":"width",h=l?"Top":"Left",u=h.toLowerCase(),f=l?"left":"top",d=l?"bottom":"right",p=Zt(i)[c];a[d]-p<s[u]&&(t.offsets.popper[u]-=s[u]-(a[d]-p)),a[u]+p>s[d]&&(t.offsets.popper[u]+=a[u]+p-s[d]),t.offsets.popper=Vt(t.offsets.popper);var m=a[u]+a[c]/2-p/2,g=Nt(t.instance.popper),_=parseFloat(g["margin"+h],10),v=parseFloat(g["border"+h+"Width"],10),y=m-t.offsets.popper[u]-_-v;return y=Math.max(Math.min(s[c]-p,y),0),t.arrowElement=i,t.offsets.arrow=(Kt(n={},u,Math.round(y)),Kt(n,f,""),n),t},element:"[x-arrow]"},flip:{order:600,enabled:!0,fn:function(p,m){if(oe(p.instance.modifiers,"inner"))return p;if(p.flipped&&p.placement===p.originalPlacement)return p;var g=Gt(p.instance.popper,p.instance.reference,m.padding,m.boundariesElement,p.positionFixed),_=p.placement.split("-")[0],v=te(_),y=p.placement.split("-")[1]||"",E=[];switch(m.behavior){case ge:E=[_,v];break;case _e:E=me(_);break;case ve:E=me(_,!0);break;default:E=m.behavior}return E.forEach(function(t,e){if(_!==t||E.length===e+1)return p;_=p.placement.split("-")[0],v=te(_);var n,i=p.offsets.popper,o=p.offsets.reference,r=Math.floor,s="left"===_&&r(i.right)>r(o.left)||"right"===_&&r(i.left)<r(o.right)||"top"===_&&r(i.bottom)>r(o.top)||"bottom"===_&&r(i.top)<r(o.bottom),a=r(i.left)<r(g.left),l=r(i.right)>r(g.right),c=r(i.top)<r(g.top),h=r(i.bottom)>r(g.bottom),u="left"===_&&a||"right"===_&&l||"top"===_&&c||"bottom"===_&&h,f=-1!==["top","bottom"].indexOf(_),d=!!m.flipVariations&&(f&&"start"===y&&a||f&&"end"===y&&l||!f&&"start"===y&&c||!f&&"end"===y&&h);(s||u||d)&&(p.flipped=!0,(s||u)&&(_=E[e+1]),d&&(y="end"===(n=y)?"start":"start"===n?"end":n),p.placement=_+(y?"-"+y:""),p.offsets.popper=Qt({},p.offsets.popper,ee(p.instance.popper,p.offsets.reference,p.placement)),p=ie(p.instance.modifiers,p,"flip"))}),p},behavior:"flip",padding:5,boundariesElement:"viewport"},inner:{order:700,enabled:!1,fn:function(t){var e=t.placement,n=e.split("-")[0],i=t.offsets,o=i.popper,r=i.reference,s=-1!==["left","right"].indexOf(n),a=-1===["top","left"].indexOf(n);return o[s?"left":"top"]=r[n]-(a?o[s?"width":"height"]:0),t.placement=te(e),t.offsets.popper=Vt(o),t}},hide:{order:800,enabled:!0,fn:function(t){if(!fe(t.instance.modifiers,"hide","preventOverflow"))return t;var e=t.offsets.reference,n=ne(t.instance.modifiers,function(t){return"preventOverflow"===t.name}).boundaries;if(e.bottom<n.top||e.left>n.right||e.top>n.bottom||e.right<n.left){if(!0===t.hide)return t;t.hide=!0,t.attributes["x-out-of-boundaries"]=""}else{if(!1===t.hide)return t;t.hide=!1,t.attributes["x-out-of-boundaries"]=!1}return t}},computeStyle:{order:850,enabled:!0,fn:function(t,e){var n=e.x,i=e.y,o=t.offsets.popper,r=ne(t.instance.modifiers,function(t){return"applyStyle"===t.name}).gpuAcceleration;void 0!==r&&console.warn("WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!");var s,a,l,c,h,u,f,d,p,m,g,_,v,y,E=void 0!==r?r:e.gpuAcceleration,b=jt(t.instance.popper),w=Yt(b),C={position:o.position},T=(s=t,a=window.devicePixelRatio<2||!ue,l=s.offsets,c=l.popper,h=l.reference,u=Math.round,f=Math.floor,d=function(t){return t},p=u(h.width),m=u(c.width),g=-1!==["left","right"].indexOf(s.placement),_=-1!==s.placement.indexOf("-"),y=a?u:d,{left:(v=a?g||_||p%2==m%2?u:f:d)(p%2==1&&m%2==1&&!_&&a?c.left-1:c.left),top:y(c.top),bottom:y(c.bottom),right:v(c.right)}),S="bottom"===n?"top":"bottom",D="right"===i?"left":"right",I=re("transform"),A=void 0,O=void 0;if(O="bottom"===S?"HTML"===b.nodeName?-b.clientHeight+T.bottom:-w.height+T.bottom:T.top,A="right"===D?"HTML"===b.nodeName?-b.clientWidth+T.right:-w.width+T.right:T.left,E&&I)C[I]="translate3d("+A+"px, "+O+"px, 0)",C[S]=0,C[D]=0,C.willChange="transform";else{var N="bottom"===S?-1:1,k="right"===D?-1:1;C[S]=O*N,C[D]=A*k,C.willChange=S+", "+D}var L={"x-placement":t.placement};return t.attributes=Qt({},L,t.attributes),t.styles=Qt({},C,t.styles),t.arrowStyles=Qt({},t.offsets.arrow,t.arrowStyles),t},gpuAcceleration:!0,x:"bottom",y:"right"},applyStyle:{order:900,enabled:!0,fn:function(t){var e,n;return he(t.instance.popper,t.styles),e=t.instance.popper,n=t.attributes,Object.keys(n).forEach(function(t){!1!==n[t]?e.setAttribute(t,n[t]):e.removeAttribute(t)}),t.arrowElement&&Object.keys(t.arrowStyles).length&&he(t.arrowElement,t.arrowStyles),t},onLoad:function(t,e,n,i,o){var r=Jt(o,e,t,n.positionFixed),s=$t(n.placement,r,e,t,n.modifiers.flip.boundariesElement,n.modifiers.flip.padding);return e.setAttribute("x-placement",s),he(e,{position:n.positionFixed?"fixed":"absolute"}),n},gpuAcceleration:void 0}}},be=function(){function r(t,e){var n=this,i=2<arguments.length&&void 0!==arguments[2]?arguments[2]:{};!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,r),this.scheduleUpdate=function(){return requestAnimationFrame(n.update)},this.update=At(this.update.bind(this)),this.options=Qt({},r.Defaults,i),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=t&&t.jquery?t[0]:t,this.popper=e&&e.jquery?e[0]:e,this.options.modifiers={},Object.keys(Qt({},r.Defaults.modifiers,i.modifiers)).forEach(function(t){n.options.modifiers[t]=Qt({},r.Defaults.modifiers[t]||{},i.modifiers?i.modifiers[t]:{})}),this.modifiers=Object.keys(this.options.modifiers).map(function(t){return Qt({name:t},n.options.modifiers[t])}).sort(function(t,e){return t.order-e.order}),this.modifiers.forEach(function(t){t.enabled&&Ot(t.onLoad)&&t.onLoad(n.reference,n.popper,n.options,t,n.state)}),this.update();var o=this.options.eventsEnabled;o&&this.enableEventListeners(),this.state.eventsEnabled=o}return qt(r,[{key:"update",value:function(){return function(){if(!this.state.isDestroyed){var t={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};t.offsets.reference=Jt(this.state,this.popper,this.reference,this.options.positionFixed),t.placement=$t(this.options.placement,t.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),t.originalPlacement=t.placement,t.positionFixed=this.options.positionFixed,t.offsets.popper=ee(this.popper,t.offsets.reference,t.placement),t.offsets.popper.position=this.options.positionFixed?"fixed":"absolute",t=ie(this.modifiers,t),this.state.isCreated?this.options.onUpdate(t):(this.state.isCreated=!0,this.options.onCreate(t))}}.call(this)}},{key:"destroy",value:function(){return function(){return this.state.isDestroyed=!0,oe(this.modifiers,"applyStyle")&&(this.popper.removeAttribute("x-placement"),this.popper.style.position="",this.popper.style.top="",this.popper.style.left="",this.popper.style.right="",this.popper.style.bottom="",this.popper.style.willChange="",this.popper.style[re("transform")]=""),this.disableEventListeners(),this.options.removeOnDestroy&&this.popper.parentNode.removeChild(this.popper),this}.call(this)}},{key:"enableEventListeners",value:function(){return function(){this.state.eventsEnabled||(this.state=ae(this.reference,this.options,this.state,this.scheduleUpdate))}.call(this)}},{key:"disableEventListeners",value:function(){return le.call(this)}}]),r}();be.Utils=("undefined"!=typeof window?window:global).PopperUtils,be.placements=de,be.Defaults=Ee;var we="dropdown",Ce="bs.dropdown",Te="."+Ce,Se=".data-api",De=p.fn[we],Ie=new RegExp("38|40|27"),Ae={HIDE:"hide"+Te,HIDDEN:"hidden"+Te,SHOW:"show"+Te,SHOWN:"shown"+Te,CLICK:"click"+Te,CLICK_DATA_API:"click"+Te+Se,KEYDOWN_DATA_API:"keydown"+Te+Se,KEYUP_DATA_API:"keyup"+Te+Se},Oe="disabled",Ne="show",ke="dropup",Le="dropright",xe="dropleft",Pe="dropdown-menu-right",He="position-static",je='[data-toggle="dropdown"]',Re=".dropdown form",Fe=".dropdown-menu",Me=".navbar-nav",We=".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",Ue="top-start",Be="top-end",qe="bottom-start",Ke="bottom-end",Qe="right-start",Ve="left-start",Ye={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic"},ze={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string"},Xe=function(){function c(t,e){this._element=t,this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var t=c.prototype;return t.toggle=function(){if(!this._element.disabled&&!p(this._element).hasClass(Oe)){var t=c._getParentFromElement(this._element),e=p(this._menu).hasClass(Ne);if(c._clearMenus(),!e){var n={relatedTarget:this._element},i=p.Event(Ae.SHOW,n);if(p(t).trigger(i),!i.isDefaultPrevented()){if(!this._inNavbar){if("undefined"==typeof be)throw new TypeError("Bootstrap's dropdowns require Popper.js (https://popper.js.org/)");var o=this._element;"parent"===this._config.reference?o=t:m.isElement(this._config.reference)&&(o=this._config.reference,"undefined"!=typeof this._config.reference.jquery&&(o=this._config.reference[0])),"scrollParent"!==this._config.boundary&&p(t).addClass(He),this._popper=new be(o,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===p(t).closest(Me).length&&p(document.body).children().on("mouseover",null,p.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),p(this._menu).toggleClass(Ne),p(t).toggleClass(Ne).trigger(p.Event(Ae.SHOWN,n))}}}},t.show=function(){if(!(this._element.disabled||p(this._element).hasClass(Oe)||p(this._menu).hasClass(Ne))){var t={relatedTarget:this._element},e=p.Event(Ae.SHOW,t),n=c._getParentFromElement(this._element);p(n).trigger(e),e.isDefaultPrevented()||(p(this._menu).toggleClass(Ne),p(n).toggleClass(Ne).trigger(p.Event(Ae.SHOWN,t)))}},t.hide=function(){if(!this._element.disabled&&!p(this._element).hasClass(Oe)&&p(this._menu).hasClass(Ne)){var t={relatedTarget:this._element},e=p.Event(Ae.HIDE,t),n=c._getParentFromElement(this._element);p(n).trigger(e),e.isDefaultPrevented()||(p(this._menu).toggleClass(Ne),p(n).toggleClass(Ne).trigger(p.Event(Ae.HIDDEN,t)))}},t.dispose=function(){p.removeData(this._element,Ce),p(this._element).off(Te),this._element=null,(this._menu=null)!==this._popper&&(this._popper.destroy(),this._popper=null)},t.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},t._addEventListeners=function(){var e=this;p(this._element).on(Ae.CLICK,function(t){t.preventDefault(),t.stopPropagation(),e.toggle()})},t._getConfig=function(t){return t=l({},this.constructor.Default,p(this._element).data(),t),m.typeCheckConfig(we,t,this.constructor.DefaultType),t},t._getMenuElement=function(){if(!this._menu){var t=c._getParentFromElement(this._element);t&&(this._menu=t.querySelector(Fe))}return this._menu},t._getPlacement=function(){var t=p(this._element.parentNode),e=qe;return t.hasClass(ke)?(e=Ue,p(this._menu).hasClass(Pe)&&(e=Be)):t.hasClass(Le)?e=Qe:t.hasClass(xe)?e=Ve:p(this._menu).hasClass(Pe)&&(e=Ke),e},t._detectNavbar=function(){return 0<p(this._element).closest(".navbar").length},t._getOffset=function(){var e=this,t={};return"function"==typeof this._config.offset?t.fn=function(t){return t.offsets=l({},t.offsets,e._config.offset(t.offsets,e._element)||{}),t}:t.offset=this._config.offset,t},t._getPopperConfig=function(){var t={placement:this._getPlacement(),modifiers:{offset:this._getOffset(),flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return"static"===this._config.display&&(t.modifiers.applyStyle={enabled:!1}),t},c._jQueryInterface=function(e){return this.each(function(){var t=p(this).data(Ce);if(t||(t=new c(this,"object"==typeof e?e:null),p(this).data(Ce,t)),"string"==typeof e){if("undefined"==typeof t[e])throw new TypeError('No method named "'+e+'"');t[e]()}})},c._clearMenus=function(t){if(!t||3!==t.which&&("keyup"!==t.type||9===t.which))for(var e=[].slice.call(document.querySelectorAll(je)),n=0,i=e.length;n<i;n++){var o=c._getParentFromElement(e[n]),r=p(e[n]).data(Ce),s={relatedTarget:e[n]};if(t&&"click"===t.type&&(s.clickEvent=t),r){var a=r._menu;if(p(o).hasClass(Ne)&&!(t&&("click"===t.type&&/input|textarea/i.test(t.target.tagName)||"keyup"===t.type&&9===t.which)&&p.contains(o,t.target))){var l=p.Event(Ae.HIDE,s);p(o).trigger(l),l.isDefaultPrevented()||("ontouchstart"in document.documentElement&&p(document.body).children().off("mouseover",null,p.noop),e[n].setAttribute("aria-expanded","false"),p(a).removeClass(Ne),p(o).removeClass(Ne).trigger(p.Event(Ae.HIDDEN,s)))}}}},c._getParentFromElement=function(t){var e,n=m.getSelectorFromElement(t);return n&&(e=document.querySelector(n)),e||t.parentNode},c._dataApiKeydownHandler=function(t){if((/input|textarea/i.test(t.target.tagName)?!(32===t.which||27!==t.which&&(40!==t.which&&38!==t.which||p(t.target).closest(Fe).length)):Ie.test(t.which))&&(t.preventDefault(),t.stopPropagation(),!this.disabled&&!p(this).hasClass(Oe))){var e=c._getParentFromElement(this),n=p(e).hasClass(Ne);if(n&&(!n||27!==t.which&&32!==t.which)){var i=[].slice.call(e.querySelectorAll(We));if(0!==i.length){var o=i.indexOf(t.target);38===t.which&&0<o&&o--,40===t.which&&o<i.length-1&&o++,o<0&&(o=0),i[o].focus()}}else{if(27===t.which){var r=e.querySelector(je);p(r).trigger("focus")}p(this).trigger("click")}}},s(c,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return Ye}},{key:"DefaultType",get:function(){return ze}}]),c}();p(document).on(Ae.KEYDOWN_DATA_API,je,Xe._dataApiKeydownHandler).on(Ae.KEYDOWN_DATA_API,Fe,Xe._dataApiKeydownHandler).on(Ae.CLICK_DATA_API+" "+Ae.KEYUP_DATA_API,Xe._clearMenus).on(Ae.CLICK_DATA_API,je,function(t){t.preventDefault(),t.stopPropagation(),Xe._jQueryInterface.call(p(this),"toggle")}).on(Ae.CLICK_DATA_API,Re,function(t){t.stopPropagation()}),p.fn[we]=Xe._jQueryInterface,p.fn[we].Constructor=Xe,p.fn[we].noConflict=function(){return p.fn[we]=De,Xe._jQueryInterface};var Ge="modal",$e="bs.modal",Je="."+$e,Ze=p.fn[Ge],tn={backdrop:!0,keyboard:!0,focus:!0,show:!0},en={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},nn={HIDE:"hide"+Je,HIDDEN:"hidden"+Je,SHOW:"show"+Je,SHOWN:"shown"+Je,FOCUSIN:"focusin"+Je,RESIZE:"resize"+Je,CLICK_DISMISS:"click.dismiss"+Je,KEYDOWN_DISMISS:"keydown.dismiss"+Je,MOUSEUP_DISMISS:"mouseup.dismiss"+Je,MOUSEDOWN_DISMISS:"mousedown.dismiss"+Je,CLICK_DATA_API:"click"+Je+".data-api"},on="modal-dialog-scrollable",rn="modal-scrollbar-measure",sn="modal-backdrop",an="modal-open",ln="fade",cn="show",hn=".modal-dialog",un=".modal-body",fn='[data-toggle="modal"]',dn='[data-dismiss="modal"]',pn=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",mn=".sticky-top",gn=function(){function o(t,e){this._config=this._getConfig(e),this._element=t,this._dialog=t.querySelector(hn),this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._isTransitioning=!1,this._scrollbarWidth=0}var t=o.prototype;return t.toggle=function(t){return this._isShown?this.hide():this.show(t)},t.show=function(t){var e=this;if(!this._isShown&&!this._isTransitioning){p(this._element).hasClass(ln)&&(this._isTransitioning=!0);var n=p.Event(nn.SHOW,{relatedTarget:t});p(this._element).trigger(n),this._isShown||n.isDefaultPrevented()||(this._isShown=!0,this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),this._setEscapeEvent(),this._setResizeEvent(),p(this._element).on(nn.CLICK_DISMISS,dn,function(t){return e.hide(t)}),p(this._dialog).on(nn.MOUSEDOWN_DISMISS,function(){p(e._element).one(nn.MOUSEUP_DISMISS,function(t){p(t.target).is(e._element)&&(e._ignoreBackdropClick=!0)})}),this._showBackdrop(function(){return e._showElement(t)}))}},t.hide=function(t){var e=this;if(t&&t.preventDefault(),this._isShown&&!this._isTransitioning){var n=p.Event(nn.HIDE);if(p(this._element).trigger(n),this._isShown&&!n.isDefaultPrevented()){this._isShown=!1;var i=p(this._element).hasClass(ln);if(i&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),p(document).off(nn.FOCUSIN),p(this._element).removeClass(cn),p(this._element).off(nn.CLICK_DISMISS),p(this._dialog).off(nn.MOUSEDOWN_DISMISS),i){var o=m.getTransitionDurationFromElement(this._element);p(this._element).one(m.TRANSITION_END,function(t){return e._hideModal(t)}).emulateTransitionEnd(o)}else this._hideModal()}}},t.dispose=function(){[window,this._element,this._dialog].forEach(function(t){return p(t).off(Je)}),p(document).off(nn.FOCUSIN),p.removeData(this._element,$e),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._isTransitioning=null,this._scrollbarWidth=null},t.handleUpdate=function(){this._adjustDialog()},t._getConfig=function(t){return t=l({},tn,t),m.typeCheckConfig(Ge,t,en),t},t._showElement=function(t){var e=this,n=p(this._element).hasClass(ln);this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),p(this._dialog).hasClass(on)?this._dialog.querySelector(un).scrollTop=0:this._element.scrollTop=0,n&&m.reflow(this._element),p(this._element).addClass(cn),this._config.focus&&this._enforceFocus();var i=p.Event(nn.SHOWN,{relatedTarget:t}),o=function(){e._config.focus&&e._element.focus(),e._isTransitioning=!1,p(e._element).trigger(i)};if(n){var r=m.getTransitionDurationFromElement(this._dialog);p(this._dialog).one(m.TRANSITION_END,o).emulateTransitionEnd(r)}else o()},t._enforceFocus=function(){var e=this;p(document).off(nn.FOCUSIN).on(nn.FOCUSIN,function(t){document!==t.target&&e._element!==t.target&&0===p(e._element).has(t.target).length&&e._element.focus()})},t._setEscapeEvent=function(){var e=this;this._isShown&&this._config.keyboard?p(this._element).on(nn.KEYDOWN_DISMISS,function(t){27===t.which&&(t.preventDefault(),e.hide())}):this._isShown||p(this._element).off(nn.KEYDOWN_DISMISS)},t._setResizeEvent=function(){var e=this;this._isShown?p(window).on(nn.RESIZE,function(t){return e.handleUpdate(t)}):p(window).off(nn.RESIZE)},t._hideModal=function(){var t=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._isTransitioning=!1,this._showBackdrop(function(){p(document.body).removeClass(an),t._resetAdjustments(),t._resetScrollbar(),p(t._element).trigger(nn.HIDDEN)})},t._removeBackdrop=function(){this._backdrop&&(p(this._backdrop).remove(),this._backdrop=null)},t._showBackdrop=function(t){var e=this,n=p(this._element).hasClass(ln)?ln:"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className=sn,n&&this._backdrop.classList.add(n),p(this._backdrop).appendTo(document.body),p(this._element).on(nn.CLICK_DISMISS,function(t){e._ignoreBackdropClick?e._ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"===e._config.backdrop?e._element.focus():e.hide())}),n&&m.reflow(this._backdrop),p(this._backdrop).addClass(cn),!t)return;if(!n)return void t();var i=m.getTransitionDurationFromElement(this._backdrop);p(this._backdrop).one(m.TRANSITION_END,t).emulateTransitionEnd(i)}else if(!this._isShown&&this._backdrop){p(this._backdrop).removeClass(cn);var o=function(){e._removeBackdrop(),t&&t()};if(p(this._element).hasClass(ln)){var r=m.getTransitionDurationFromElement(this._backdrop);p(this._backdrop).one(m.TRANSITION_END,o).emulateTransitionEnd(r)}else o()}else t&&t()},t._adjustDialog=function(){var t=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},t._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},t._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=t.left+t.right<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},t._setScrollbar=function(){var o=this;if(this._isBodyOverflowing){var t=[].slice.call(document.querySelectorAll(pn)),e=[].slice.call(document.querySelectorAll(mn));p(t).each(function(t,e){var n=e.style.paddingRight,i=p(e).css("padding-right");p(e).data("padding-right",n).css("padding-right",parseFloat(i)+o._scrollbarWidth+"px")}),p(e).each(function(t,e){var n=e.style.marginRight,i=p(e).css("margin-right");p(e).data("margin-right",n).css("margin-right",parseFloat(i)-o._scrollbarWidth+"px")});var n=document.body.style.paddingRight,i=p(document.body).css("padding-right");p(document.body).data("padding-right",n).css("padding-right",parseFloat(i)+this._scrollbarWidth+"px")}p(document.body).addClass(an)},t._resetScrollbar=function(){var t=[].slice.call(document.querySelectorAll(pn));p(t).each(function(t,e){var n=p(e).data("padding-right");p(e).removeData("padding-right"),e.style.paddingRight=n||""});var e=[].slice.call(document.querySelectorAll(""+mn));p(e).each(function(t,e){var n=p(e).data("margin-right");"undefined"!=typeof n&&p(e).css("margin-right",n).removeData("margin-right")});var n=p(document.body).data("padding-right");p(document.body).removeData("padding-right"),document.body.style.paddingRight=n||""},t._getScrollbarWidth=function(){var t=document.createElement("div");t.className=rn,document.body.appendChild(t);var e=t.getBoundingClientRect().width-t.clientWidth;return document.body.removeChild(t),e},o._jQueryInterface=function(n,i){return this.each(function(){var t=p(this).data($e),e=l({},tn,p(this).data(),"object"==typeof n&&n?n:{});if(t||(t=new o(this,e),p(this).data($e,t)),"string"==typeof n){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n](i)}else e.show&&t.show(i)})},s(o,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return tn}}]),o}();p(document).on(nn.CLICK_DATA_API,fn,function(t){var e,n=this,i=m.getSelectorFromElement(this);i&&(e=document.querySelector(i));var o=p(e).data($e)?"toggle":l({},p(e).data(),p(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||t.preventDefault();var r=p(e).one(nn.SHOW,function(t){t.isDefaultPrevented()||r.one(nn.HIDDEN,function(){p(n).is(":visible")&&n.focus()})});gn._jQueryInterface.call(p(e),o,this)}),p.fn[Ge]=gn._jQueryInterface,p.fn[Ge].Constructor=gn,p.fn[Ge].noConflict=function(){return p.fn[Ge]=Ze,gn._jQueryInterface};var _n=["background","cite","href","itemtype","longdesc","poster","src","xlink:href"],vn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},yn=/^(?:(?:https?|mailto|ftp|tel|file):|[^&:/?#]*(?:[/?#]|$))/gi,En=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[a-z0-9+/]+=*$/i;function bn(t,s,e){if(0===t.length)return t;if(e&&"function"==typeof e)return e(t);for(var n=(new window.DOMParser).parseFromString(t,"text/html"),a=Object.keys(s),l=[].slice.call(n.body.querySelectorAll("*")),i=function(t,e){var n=l[t],i=n.nodeName.toLowerCase();if(-1===a.indexOf(n.nodeName.toLowerCase()))return n.parentNode.removeChild(n),"continue";var o=[].slice.call(n.attributes),r=[].concat(s["*"]||[],s[i]||[]);o.forEach(function(t){(function(t,e){var n=t.nodeName.toLowerCase();if(-1!==e.indexOf(n))return-1===_n.indexOf(n)||Boolean(t.nodeValue.match(yn)||t.nodeValue.match(En));for(var i=e.filter(function(t){return t instanceof RegExp}),o=0,r=i.length;o<r;o++)if(n.match(i[o]))return!0;return!1})(t,r)||n.removeAttribute(t.nodeName)})},o=0,r=l.length;o<r;o++)i(o);return n.body.innerHTML}var wn="tooltip",Cn="bs.tooltip",Tn="."+Cn,Sn=p.fn[wn],Dn="bs-tooltip",In=new RegExp("(^|\\s)"+Dn+"\\S+","g"),An=["sanitize","whiteList","sanitizeFn"],On={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string|function)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)",sanitize:"boolean",sanitizeFn:"(null|function)",whiteList:"object"},Nn={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"},kn={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",sanitize:!0,sanitizeFn:null,whiteList:vn},Ln="show",xn="out",Pn={HIDE:"hide"+Tn,HIDDEN:"hidden"+Tn,SHOW:"show"+Tn,SHOWN:"shown"+Tn,INSERTED:"inserted"+Tn,CLICK:"click"+Tn,FOCUSIN:"focusin"+Tn,FOCUSOUT:"focusout"+Tn,MOUSEENTER:"mouseenter"+Tn,MOUSELEAVE:"mouseleave"+Tn},Hn="fade",jn="show",Rn=".tooltip-inner",Fn=".arrow",Mn="hover",Wn="focus",Un="click",Bn="manual",qn=function(){function i(t,e){if("undefined"==typeof be)throw new TypeError("Bootstrap's tooltips require Popper.js (https://popper.js.org/)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var t=i.prototype;return t.enable=function(){this._isEnabled=!0},t.disable=function(){this._isEnabled=!1},t.toggleEnabled=function(){this._isEnabled=!this._isEnabled},t.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=p(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),p(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(p(this.getTipElement()).hasClass(jn))return void this._leave(null,this);this._enter(null,this)}},t.dispose=function(){clearTimeout(this._timeout),p.removeData(this.element,this.constructor.DATA_KEY),p(this.element).off(this.constructor.EVENT_KEY),p(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&p(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,(this._activeTrigger=null)!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},t.show=function(){var e=this;if("none"===p(this.element).css("display"))throw new Error("Please use show on visible elements");var t=p.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){p(this.element).trigger(t);var n=m.findShadowRoot(this.element),i=p.contains(null!==n?n:this.element.ownerDocument.documentElement,this.element);if(t.isDefaultPrevented()||!i)return;var o=this.getTipElement(),r=m.getUID(this.constructor.NAME);o.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&p(o).addClass(Hn);var s="function"==typeof this.config.placement?this.config.placement.call(this,o,this.element):this.config.placement,a=this._getAttachment(s);this.addAttachmentClass(a);var l=this._getContainer();p(o).data(this.constructor.DATA_KEY,this),p.contains(this.element.ownerDocument.documentElement,this.tip)||p(o).appendTo(l),p(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new be(this.element,o,{placement:a,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:Fn},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){return e._handlePopperPlacementChange(t)}}),p(o).addClass(jn),"ontouchstart"in document.documentElement&&p(document.body).children().on("mouseover",null,p.noop);var c=function(){e.config.animation&&e._fixTransition();var t=e._hoverState;e._hoverState=null,p(e.element).trigger(e.constructor.Event.SHOWN),t===xn&&e._leave(null,e)};if(p(this.tip).hasClass(Hn)){var h=m.getTransitionDurationFromElement(this.tip);p(this.tip).one(m.TRANSITION_END,c).emulateTransitionEnd(h)}else c()}},t.hide=function(t){var e=this,n=this.getTipElement(),i=p.Event(this.constructor.Event.HIDE),o=function(){e._hoverState!==Ln&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),p(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(p(this.element).trigger(i),!i.isDefaultPrevented()){if(p(n).removeClass(jn),"ontouchstart"in document.documentElement&&p(document.body).children().off("mouseover",null,p.noop),this._activeTrigger[Un]=!1,this._activeTrigger[Wn]=!1,this._activeTrigger[Mn]=!1,p(this.tip).hasClass(Hn)){var r=m.getTransitionDurationFromElement(n);p(n).one(m.TRANSITION_END,o).emulateTransitionEnd(r)}else o();this._hoverState=""}},t.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},t.isWithContent=function(){return Boolean(this.getTitle())},t.addAttachmentClass=function(t){p(this.getTipElement()).addClass(Dn+"-"+t)},t.getTipElement=function(){return this.tip=this.tip||p(this.config.template)[0],this.tip},t.setContent=function(){var t=this.getTipElement();this.setElementContent(p(t.querySelectorAll(Rn)),this.getTitle()),p(t).removeClass(Hn+" "+jn)},t.setElementContent=function(t,e){"object"!=typeof e||!e.nodeType&&!e.jquery?this.config.html?(this.config.sanitize&&(e=bn(e,this.config.whiteList,this.config.sanitizeFn)),t.html(e)):t.text(e):this.config.html?p(e).parent().is(t)||t.empty().append(e):t.text(p(e).text())},t.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},t._getOffset=function(){var e=this,t={};return"function"==typeof this.config.offset?t.fn=function(t){return t.offsets=l({},t.offsets,e.config.offset(t.offsets,e.element)||{}),t}:t.offset=this.config.offset,t},t._getContainer=function(){return!1===this.config.container?document.body:m.isElement(this.config.container)?p(this.config.container):p(document).find(this.config.container)},t._getAttachment=function(t){return Nn[t.toUpperCase()]},t._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(t){if("click"===t)p(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(t){return i.toggle(t)});else if(t!==Bn){var e=t===Mn?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=t===Mn?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;p(i.element).on(e,i.config.selector,function(t){return i._enter(t)}).on(n,i.config.selector,function(t){return i._leave(t)})}}),p(this.element).closest(".modal").on("hide.bs.modal",function(){i.element&&i.hide()}),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},t._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},t._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||p(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),p(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?Wn:Mn]=!0),p(e.getTipElement()).hasClass(jn)||e._hoverState===Ln?e._hoverState=Ln:(clearTimeout(e._timeout),e._hoverState=Ln,e.config.delay&&e.config.delay.show?e._timeout=setTimeout(function(){e._hoverState===Ln&&e.show()},e.config.delay.show):e.show())},t._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||p(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),p(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?Wn:Mn]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=xn,e.config.delay&&e.config.delay.hide?e._timeout=setTimeout(function(){e._hoverState===xn&&e.hide()},e.config.delay.hide):e.hide())},t._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},t._getConfig=function(t){var e=p(this.element).data();return Object.keys(e).forEach(function(t){-1!==An.indexOf(t)&&delete e[t]}),"number"==typeof(t=l({},this.constructor.Default,e,"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),m.typeCheckConfig(wn,t,this.constructor.DefaultType),t.sanitize&&(t.template=bn(t.template,t.whiteList,t.sanitizeFn)),t},t._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},t._cleanTipClass=function(){var t=p(this.getTipElement()),e=t.attr("class").match(In);null!==e&&e.length&&t.removeClass(e.join(""))},t._handlePopperPlacementChange=function(t){var e=t.instance;this.tip=e.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},t._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(p(t).removeClass(Hn),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},i._jQueryInterface=function(n){return this.each(function(){var t=p(this).data(Cn),e="object"==typeof n&&n;if((t||!/dispose|hide/.test(n))&&(t||(t=new i(this,e),p(this).data(Cn,t)),"string"==typeof n)){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return kn}},{key:"NAME",get:function(){return wn}},{key:"DATA_KEY",get:function(){return Cn}},{key:"Event",get:function(){return Pn}},{key:"EVENT_KEY",get:function(){return Tn}},{key:"DefaultType",get:function(){return On}}]),i}();p.fn[wn]=qn._jQueryInterface,p.fn[wn].Constructor=qn,p.fn[wn].noConflict=function(){return p.fn[wn]=Sn,qn._jQueryInterface};var Kn="popover",Qn="bs.popover",Vn="."+Qn,Yn=p.fn[Kn],zn="bs-popover",Xn=new RegExp("(^|\\s)"+zn+"\\S+","g"),Gn=l({},qn.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),$n=l({},qn.DefaultType,{content:"(string|element|function)"}),Jn="fade",Zn="show",ti=".popover-header",ei=".popover-body",ni={HIDE:"hide"+Vn,HIDDEN:"hidden"+Vn,SHOW:"show"+Vn,SHOWN:"shown"+Vn,INSERTED:"inserted"+Vn,CLICK:"click"+Vn,FOCUSIN:"focusin"+Vn,FOCUSOUT:"focusout"+Vn,MOUSEENTER:"mouseenter"+Vn,MOUSELEAVE:"mouseleave"+Vn},ii=function(t){var e,n;function i(){return t.apply(this,arguments)||this}n=t,(e=i).prototype=Object.create(n.prototype),(e.prototype.constructor=e).__proto__=n;var o=i.prototype;return o.isWithContent=function(){return this.getTitle()||this._getContent()},o.addAttachmentClass=function(t){p(this.getTipElement()).addClass(zn+"-"+t)},o.getTipElement=function(){return this.tip=this.tip||p(this.config.template)[0],this.tip},o.setContent=function(){var t=p(this.getTipElement());this.setElementContent(t.find(ti),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(ei),e),t.removeClass(Jn+" "+Zn)},o._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},o._cleanTipClass=function(){var t=p(this.getTipElement()),e=t.attr("class").match(Xn);null!==e&&0<e.length&&t.removeClass(e.join(""))},i._jQueryInterface=function(n){return this.each(function(){var t=p(this).data(Qn),e="object"==typeof n?n:null;if((t||!/dispose|hide/.test(n))&&(t||(t=new i(this,e),p(this).data(Qn,t)),"string"==typeof n)){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return Gn}},{key:"NAME",get:function(){return Kn}},{key:"DATA_KEY",get:function(){return Qn}},{key:"Event",get:function(){return ni}},{key:"EVENT_KEY",get:function(){return Vn}},{key:"DefaultType",get:function(){return $n}}]),i}(qn);p.fn[Kn]=ii._jQueryInterface,p.fn[Kn].Constructor=ii,p.fn[Kn].noConflict=function(){return p.fn[Kn]=Yn,ii._jQueryInterface};var oi="scrollspy",ri="bs.scrollspy",si="."+ri,ai=p.fn[oi],li={offset:10,method:"auto",target:""},ci={offset:"number",method:"string",target:"(string|element)"},hi={ACTIVATE:"activate"+si,SCROLL:"scroll"+si,LOAD_DATA_API:"load"+si+".data-api"},ui="dropdown-item",fi="active",di='[data-spy="scroll"]',pi=".nav, .list-group",mi=".nav-link",gi=".nav-item",_i=".list-group-item",vi=".dropdown",yi=".dropdown-item",Ei=".dropdown-toggle",bi="offset",wi="position",Ci=function(){function n(t,e){var n=this;this._element=t,this._scrollElement="BODY"===t.tagName?window:t,this._config=this._getConfig(e),this._selector=this._config.target+" "+mi+","+this._config.target+" "+_i+","+this._config.target+" "+yi,this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,p(this._scrollElement).on(hi.SCROLL,function(t){return n._process(t)}),this.refresh(),this._process()}var t=n.prototype;return t.refresh=function(){var e=this,t=this._scrollElement===this._scrollElement.window?bi:wi,o="auto"===this._config.method?t:this._config.method,r=o===wi?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map(function(t){var e,n=m.getSelectorFromElement(t);if(n&&(e=document.querySelector(n)),e){var i=e.getBoundingClientRect();if(i.width||i.height)return[p(e)[o]().top+r,n]}return null}).filter(function(t){return t}).sort(function(t,e){return t[0]-e[0]}).forEach(function(t){e._offsets.push(t[0]),e._targets.push(t[1])})},t.dispose=function(){p.removeData(this._element,ri),p(this._scrollElement).off(si),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},t._getConfig=function(t){if("string"!=typeof(t=l({},li,"object"==typeof t&&t?t:{})).target){var e=p(t.target).attr("id");e||(e=m.getUID(oi),p(t.target).attr("id",e)),t.target="#"+e}return m.typeCheckConfig(oi,t,ci),t},t._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},t._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},t._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},t._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),n<=t){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t<this._offsets[0]&&0<this._offsets[0])return this._activeTarget=null,void this._clear();for(var o=this._offsets.length;o--;){this._activeTarget!==this._targets[o]&&t>=this._offsets[o]&&("undefined"==typeof this._offsets[o+1]||t<this._offsets[o+1])&&this._activate(this._targets[o])}}},t._activate=function(e){this._activeTarget=e,this._clear();var t=this._selector.split(",").map(function(t){return t+'[data-target="'+e+'"],'+t+'[href="'+e+'"]'}),n=p([].slice.call(document.querySelectorAll(t.join(","))));n.hasClass(ui)?(n.closest(vi).find(Ei).addClass(fi),n.addClass(fi)):(n.addClass(fi),n.parents(pi).prev(mi+", "+_i).addClass(fi),n.parents(pi).prev(gi).children(mi).addClass(fi)),p(this._scrollElement).trigger(hi.ACTIVATE,{relatedTarget:e})},t._clear=function(){[].slice.call(document.querySelectorAll(this._selector)).filter(function(t){return t.classList.contains(fi)}).forEach(function(t){return t.classList.remove(fi)})},n._jQueryInterface=function(e){return this.each(function(){var t=p(this).data(ri);if(t||(t=new n(this,"object"==typeof e&&e),p(this).data(ri,t)),"string"==typeof e){if("undefined"==typeof t[e])throw new TypeError('No method named "'+e+'"');t[e]()}})},s(n,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return li}}]),n}();p(window).on(hi.LOAD_DATA_API,function(){for(var t=[].slice.call(document.querySelectorAll(di)),e=t.length;e--;){var n=p(t[e]);Ci._jQueryInterface.call(n,n.data())}}),p.fn[oi]=Ci._jQueryInterface,p.fn[oi].Constructor=Ci,p.fn[oi].noConflict=function(){return p.fn[oi]=ai,Ci._jQueryInterface};var Ti="bs.tab",Si="."+Ti,Di=p.fn.tab,Ii={HIDE:"hide"+Si,HIDDEN:"hidden"+Si,SHOW:"show"+Si,SHOWN:"shown"+Si,CLICK_DATA_API:"click"+Si+".data-api"},Ai="dropdown-menu",Oi="active",Ni="disabled",ki="fade",Li="show",xi=".dropdown",Pi=".nav, .list-group",Hi=".active",ji="> li > .active",Ri='[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',Fi=".dropdown-toggle",Mi="> .dropdown-menu .active",Wi=function(){function i(t){this._element=t}var t=i.prototype;return t.show=function(){var n=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&p(this._element).hasClass(Oi)||p(this._element).hasClass(Ni))){var t,i,e=p(this._element).closest(Pi)[0],o=m.getSelectorFromElement(this._element);if(e){var r="UL"===e.nodeName||"OL"===e.nodeName?ji:Hi;i=(i=p.makeArray(p(e).find(r)))[i.length-1]}var s=p.Event(Ii.HIDE,{relatedTarget:this._element}),a=p.Event(Ii.SHOW,{relatedTarget:i});if(i&&p(i).trigger(s),p(this._element).trigger(a),!a.isDefaultPrevented()&&!s.isDefaultPrevented()){o&&(t=document.querySelector(o)),this._activate(this._element,e);var l=function(){var t=p.Event(Ii.HIDDEN,{relatedTarget:n._element}),e=p.Event(Ii.SHOWN,{relatedTarget:i});p(i).trigger(t),p(n._element).trigger(e)};t?this._activate(t,t.parentNode,l):l()}}},t.dispose=function(){p.removeData(this._element,Ti),this._element=null},t._activate=function(t,e,n){var i=this,o=(!e||"UL"!==e.nodeName&&"OL"!==e.nodeName?p(e).children(Hi):p(e).find(ji))[0],r=n&&o&&p(o).hasClass(ki),s=function(){return i._transitionComplete(t,o,n)};if(o&&r){var a=m.getTransitionDurationFromElement(o);p(o).removeClass(Li).one(m.TRANSITION_END,s).emulateTransitionEnd(a)}else s()},t._transitionComplete=function(t,e,n){if(e){p(e).removeClass(Oi);var i=p(e.parentNode).find(Mi)[0];i&&p(i).removeClass(Oi),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}if(p(t).addClass(Oi),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),m.reflow(t),t.classList.contains(ki)&&t.classList.add(Li),t.parentNode&&p(t.parentNode).hasClass(Ai)){var o=p(t).closest(xi)[0];if(o){var r=[].slice.call(o.querySelectorAll(Fi));p(r).addClass(Oi)}t.setAttribute("aria-expanded",!0)}n&&n()},i._jQueryInterface=function(n){return this.each(function(){var t=p(this),e=t.data(Ti);if(e||(e=new i(this),t.data(Ti,e)),"string"==typeof n){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.3.1"}}]),i}();p(document).on(Ii.CLICK_DATA_API,Ri,function(t){t.preventDefault(),Wi._jQueryInterface.call(p(this),"show")}),p.fn.tab=Wi._jQueryInterface,p.fn.tab.Constructor=Wi,p.fn.tab.noConflict=function(){return p.fn.tab=Di,Wi._jQueryInterface};var Ui="toast",Bi="bs.toast",qi="."+Bi,Ki=p.fn[Ui],Qi={CLICK_DISMISS:"click.dismiss"+qi,HIDE:"hide"+qi,HIDDEN:"hidden"+qi,SHOW:"show"+qi,SHOWN:"shown"+qi},Vi="fade",Yi="hide",zi="show",Xi="showing",Gi={animation:"boolean",autohide:"boolean",delay:"number"},$i={animation:!0,autohide:!0,delay:500},Ji='[data-dismiss="toast"]',Zi=function(){function i(t,e){this._element=t,this._config=this._getConfig(e),this._timeout=null,this._setListeners()}var t=i.prototype;return t.show=function(){var t=this;p(this._element).trigger(Qi.SHOW),this._config.animation&&this._element.classList.add(Vi);var e=function(){t._element.classList.remove(Xi),t._element.classList.add(zi),p(t._element).trigger(Qi.SHOWN),t._config.autohide&&t.hide()};if(this._element.classList.remove(Yi),this._element.classList.add(Xi),this._config.animation){var n=m.getTransitionDurationFromElement(this._element);p(this._element).one(m.TRANSITION_END,e).emulateTransitionEnd(n)}else e()},t.hide=function(t){var e=this;this._element.classList.contains(zi)&&(p(this._element).trigger(Qi.HIDE),t?this._close():this._timeout=setTimeout(function(){e._close()},this._config.delay))},t.dispose=function(){clearTimeout(this._timeout),this._timeout=null,this._element.classList.contains(zi)&&this._element.classList.remove(zi),p(this._element).off(Qi.CLICK_DISMISS),p.removeData(this._element,Bi),this._element=null,this._config=null},t._getConfig=function(t){return t=l({},$i,p(this._element).data(),"object"==typeof t&&t?t:{}),m.typeCheckConfig(Ui,t,this.constructor.DefaultType),t},t._setListeners=function(){var t=this;p(this._element).on(Qi.CLICK_DISMISS,Ji,function(){return t.hide(!0)})},t._close=function(){var t=this,e=function(){t._element.classList.add(Yi),p(t._element).trigger(Qi.HIDDEN)};if(this._element.classList.remove(zi),this._config.animation){var n=m.getTransitionDurationFromElement(this._element);p(this._element).one(m.TRANSITION_END,e).emulateTransitionEnd(n)}else e()},i._jQueryInterface=function(n){return this.each(function(){var t=p(this),e=t.data(Bi);if(e||(e=new i(this,"object"==typeof n&&n),t.data(Bi,e)),"string"==typeof n){if("undefined"==typeof e[n])throw new TypeError('No method named "'+n+'"');e[n](this)}})},s(i,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"DefaultType",get:function(){return Gi}},{key:"Default",get:function(){return $i}}]),i}();p.fn[Ui]=Zi._jQueryInterface,p.fn[Ui].Constructor=Zi,p.fn[Ui].noConflict=function(){return p.fn[Ui]=Ki,Zi._jQueryInterface},function(){if("undefined"==typeof p)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var t=p.fn.jquery.split(" ")[0].split(".");if(t[0]<2&&t[1]<9||1===t[0]&&9===t[1]&&t[2]<1||4<=t[0])throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}(),t.Util=m,t.Alert=g,t.Button=k,t.Carousel=at,t.Collapse=Ct,t.Dropdown=Xe,t.Modal=gn,t.Popover=ii,t.Scrollspy=Ci,t.Tab=Wi,t.Toast=Zi,t.Tooltip=qn,Object.defineProperty(t,"__esModule",{value:!0})});
//# sourceMappingURL=bootstrap.bundle.min.js.map | PypiClean |
/Kate-plugins-0.2.3.tar.gz/Kate-plugins-0.2.3/README.rst | .. contents::
============
Kate Plugins
============
Information
===========
These are Pate plugins for `Kate <http://kate-editor.org/>`_ editor. Plugins to make coding easier in `Python <http://python.org/>`_, `Django <https://docs.djangoproject.com>`_ and JavaScript
.. note::
This repository is unmaintained, because these plugins have been added to the official repository: `Python utils <https://projects.kde.org/projects/kde/applications/kate/repository/revisions/master/show/addons/kate/pate/src/plugins/python_utils>`_, `Javascript utils <https://projects.kde.org/projects/kde/applications/kate/repository/revisions/master/show/addons/kate/pate/src/plugins/js_utils>`_, `Django utils <https://projects.kde.org/projects/kde/applications/kate/repository/revisions/master/show/addons/kate/pate/src/plugins/django_utils>`_ and `XML pretty <https://projects.kde.org/projects/kde/applications/kate/repository/revisions/master/entry/addons/kate/pate/src/plugins/xml_pretty.py>`_. The generic functions and generic classes have been added to the `libkatepate <https://projects.kde.org/projects/kde/applications/kate/repository/revisions/master/show/addons/kate/pate/src/plugins/libkatepate>`_
Requirements
============
* `Kate <http://kate-editor.org/>`_
* Extra dependencies for extra and super nice features. Optional, but **very recomended** :)
* `pysmell <http://pypi.python.org/pypi/pysmell>`_
* `pyplete <http://pypi.python.org/pypi/pyplete>`_
* `pyflakes <http://pypi.python.org/pypi/pyflakes>`_
* `simplejson <http://pypi.python.org/pypi/simplejson>`_
* `pyjslint <http://pypi.python.org/pypi/pyjslint>`_ (it requires `NodeJS <http://nodejs.org/>`_, read the pyjslint readme)
Installation
============
* `Install Kate <http://kate-editor.org/get-it/>`_ from sources
* Install optional requirements:
::
# Kate plugins has been tested with these versions but is very probably that works with later versions
pip install pysmell==0.7.3 pyplete==0.0.2 pep8==0.6.1 pyflakes==0.5.0 pyjslint==0.3.3 simplejson==2.6.1
* Install Kate-plugins:
::
pip install Kate-plugins
ln -s /PATH/OF/THE/EGG/kate_plugins/ $(kde4-config --localprefix)/share/apps/kate/pate
Or
::
cd ~/build
git clone https://github.com/goinnn/Kate-plugins
ln -s ~/build/Kate-plugins/kate_plugins/ $(kde4-config --localprefix)/share/apps/kate/pate
* Startup Kate and enable "Python Plugins" in: Settings > Configure Kate > Plugins
You should now see three additional menu items: "Python", "Javascript", and "XML". You can change the menu configuration of easy way change the `settings <https://github.com/goinnn/Kate-plugins/blob/master/kate_plugins/kate_settings_plugins.py>`_
Plugins
=======
Autocomplete (python)
---------------------
* Shortcut: It is automatical
* from and import instruction
* autocomplete into the code (beta) with `pysmell <http://pypi.python.org/pypi/pysmell>`_
* There was a hook if you want to add your own packages python in the autocomplete structure. You should be create a file called "autocomplete_path.py" next to the "autocomplete.py" with a function "def path(session, doc, view)", like this:
::
def path(session, doc, view):
if session == 'session1'
return ['/PATH/OF/THE/EGG1/name1.egg',
'/PATH/OF/THE/PACKAGE1/',
...
'/PATH/OF/THE/EGGN/namen.egg']
elif session == 'session2':
return ['/PATH/OF/THE/EGG2/name2.egg',
'/PATH/OF/THE/PACKAGE2/',
...
'/PATH/OF/THE/EGGN/namem.egg']
else:
return ['/PATH/OF/THE/EGG2/name3.egg',
'/PATH/OF/THE/PACKAGE3/',
...
'/PATH/OF/THE/EGGN/namel.egg']
insert IPDB (python)
--------------------
* Shortcut: Ctrl+I
* Insert the text "import ipdb; ipdb.set_trace()"
insert __init__ (python)
------------------------
* Shortcut: Ctrl+-
* Smart insert a function __init__
insert super (python)
---------------------
* Shortcut: Alt+-
* Smart insert a call to super of the function
insert call recursive (python)
------------------------------
* Shortcut: Ctrl+Alt+-
* Smart insert a call to the current function recursively
PEP8 (python)
-------------
* Shortcut: Alt+8
* Use PEP8 to look for ugly code, highlights lines with problems
* It uses `pep8 <http://pypi.python.org/pypi/pep8>`_ so it must be present in the system
PyFlakes (python)
-----------------
* Shortcut: Alt+7
* Use PyFlakes to look for bad code, highlights lines with problems
* It uses `pyflakes <http://pypi.python.org/pypi/pyflakes>`_ so it must be present in the system
Parse syntax (python)
---------------------
* Shortcut: Alt+6 or when you save the file
* Parse syntax this file and show a error list, or a dialog say "OK"
Check All (python/javascript)
-----------------------------
* Shortcut: Alt+5
* Check pep8, pyflakes, parse syntax and jslint
Template Django urls (django)
-----------------------------
* Shortcut: Ctrl+Alt+7
* Smart template of the file `urls.py <http://docs.djangoproject.com/en/dev/topics/http/urls/#example>`_
Template import views (django)
------------------------------
* Shortcut: Ctrl+Alt+V
* Insert the tipical imports in a view
Create Django form (django)
---------------------------
* Shortcut: Ctrl+Alt+F
* Template to form class
Create Django model (django)
----------------------------
* Shortcut: Ctrl+Alt+M
* Template to model class
Close Template tag (django)
----------------------------
* Shortcut: Ctrl+Alt+C
* Close the last open templatetag (block, if, for, etc)
Template block (django)
----------------------------
* Shortcut: Ctrl+Alt+B
* Insert a struncture like this: {% block content %}XXX{% endblock %} or {% if x > 3 %} {% endif %}
Autocomplete static to javascript (javascript)
----------------------------------------------
* Shortcut: It is automatical
Autocomplete static to jQuery (javascript)
----------------------------------------------
* Shortcut: It is automatical
jQuery ready (javascript)
-------------------------
* Shortcut: Ctrl+J
* Template jQuery ready
Pretty JSON (javascript)
------------------------
* Shortcut: Ctrl+Alt+J
* Convert a horrible json in a pretty JSON :-)
JSLint (javascript)
-------------------
* Shortcut: Alt+9
* Use JSLint to look for errors and bad code, highlights lines with problems
* It uses `pyjslint <http://pypi.python.org/pypi/pyjslint>`_ so it must be present in the system (and working!)
Pretty XML (xhtml)
------------------------
* Shortcut: Ctrl+Alt+X
* Convert a horrible xml in a pretty XML :-)
Future Plugins
==============
* Clean code (core)
* Improve autocompletes plugins (core)
* Template tags autocomplete (django)
* Integration with rope (python)
Other repositories of Plugins to Kate
=====================================
* http://github.com/mtorromeo/kate-plugin-zencoding (Very recomended)
* https://github.com/pag/pate/tree/master/src/plugins
* https://github.com/emyller/pate-plugins
* https://github.com/zaufi/kate-pate-plugins
| PypiClean |
/KayleeVC-0.1.1.tar.gz/KayleeVC-0.1.1/README.rst | Kaylee
======
Kaylee is a somewhat fancy speech recognizer that will run commands and
perform other functions when a user speaks loosely preset sentences. It
is based on `Blather <https://gitlab.com/jezra/blather>`__ by
`Jezra <http://www.jezra.net/>`__, but adds a lot of features that go
beyond the original purpose of Blather.
Requirements
------------
1. Python 3 (tested with 3.5, may work with older versions)
2. pocketsphinx 5prealpha
3. gstreamer-1.0 (and what ever plugin has pocketsphinx support)
4. gstreamer-1.0 base plugins (required for ALSA)
5. python-gobject (required for GStreamer and the GTK-based UI)
6. python-requests (required for automatic language updating)
**Note:** it may also be required to install
``pocketsphinx-hmm-en-hub4wsj``
Usage
-----
1. Copy options.json.tmp to ~/.config/kaylee/options.json and fill the
"commands" section of the file with sentences to speak and commands
to run.
2. Run kaylee.py. This will generate
~/.local/share/kaylee/sentences.corpus based on sentences in the
"commands" section of options.json, then use the `Sphinx Knowledge
Base Tool <http://www.speech.cs.cmu.edu/tools/lmtool.html>`__ to
create and save a new language model and dictionary.
- For GTK UI, run kaylee.py -i g
- To start a UI in 'continuous' listen mode, use the -c flag
- To use a microphone other than the system default, use the -m flag
3. Start talking!
**Note:** default values for command-line arguments may be specified in
the options.json file.
Examples
~~~~~~~~
- To run Kaylee with the GTK UI, starting in continuous listen mode:
``./kaylee.py -i g -c``
- To run Kaylee with no UI and using a USB microphone recognized as
device 2: ``./kaylee.py -m 2``
- To have Kaylee pass each word of the matched sentence as a separate
argument to the executed command: ``./kaylee.py -p``
- To run a command when a valid sentence has been detected:
``./kaylee.py --valid-sentence-command=/path/to/command``
- To run a command when a invalid sentence has been detected:
``./kaylee.py --invalid-sentence-command=/path/to/command``
Finding the Device Number of a USB microphone
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There are a few ways to find the device number of a USB microphone.
- ``cat /proc/asound/cards``
- ``arecord -l``
| PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.