id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/detectors/panoptic_two_stage_segmentor.py | import mmcv
import numpy as np
import torch
from mmdet.core import INSTANCE_OFFSET, bbox2roi, multiclass_nms
from mmdet.core.visualization import imshow_det_bboxes
from ..builder import DETECTORS, build_head
from ..roi_heads.mask_heads.fcn_mask_head import _do_paste_mask
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class TwoStagePanopticSegmentor(TwoStageDetector):
"""Base class of Two-stage Panoptic Segmentor.
As well as the components in TwoStageDetector, Panoptic Segmentor has extra
semantic_head and panoptic_fusion_head.
"""
def __init__(
self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
# for panoptic segmentation
semantic_head=None,
panoptic_fusion_head=None):
super(TwoStagePanopticSegmentor,
self).__init__(backbone, neck, rpn_head, roi_head, train_cfg,
test_cfg, pretrained, init_cfg)
if semantic_head is not None:
self.semantic_head = build_head(semantic_head)
if panoptic_fusion_head is not None:
panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None
panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()
panoptic_fusion_head_.update(test_cfg=panoptic_cfg)
self.panoptic_fusion_head = build_head(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_fusion_head.\
num_things_classes
self.num_stuff_classes = self.panoptic_fusion_head.\
num_stuff_classes
self.num_classes = self.panoptic_fusion_head.num_classes
@property
def with_semantic_head(self):
return hasattr(self,
'semantic_head') and self.semantic_head is not None
@property
def with_panoptic_fusion_head(self):
return hasattr(self, 'panoptic_fusion_heads') and \
self.panoptic_fusion_head is not None
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
raise NotImplementedError(
f'`forward_dummy` is not implemented in {self.__class__.__name__}')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_semantic_seg=None,
proposals=None,
**kwargs):
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
semantic_loss = self.semantic_head.forward_train(x, gt_semantic_seg)
losses.update(semantic_loss)
return losses
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Simple test for mask head without augmentation."""
img_shapes = tuple(meta['ori_shape']
for meta in img_metas) if rescale else tuple(
meta['pad_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
masks = []
for img_shape in img_shapes:
out_shape = (0, self.roi_head.bbox_head.num_classes) \
+ img_shape[:2]
masks.append(det_bboxes[0].new_zeros(out_shape))
mask_pred = det_bboxes[0].new_zeros((0, 80, 28, 28))
mask_results = dict(
masks=masks, mask_pred=mask_pred, mask_feats=None)
return mask_results
_bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))]
if rescale:
if not isinstance(scale_factors[0], float):
scale_factors = [
det_bboxes[0].new_tensor(scale_factor)
for scale_factor in scale_factors
]
_bboxes = [
_bboxes[i] * scale_factors[i] for i in range(len(_bboxes))
]
mask_rois = bbox2roi(_bboxes)
mask_results = self.roi_head._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
# split batch mask prediction back to each image
num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
# resize the mask_preds to (K, H, W)
masks = []
for i in range(len(_bboxes)):
det_bbox = det_bboxes[i][:, :4]
det_label = det_labels[i]
mask_pred = mask_preds[i].sigmoid()
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, det_label][:, None]
img_h, img_w, _ = img_shapes[i]
mask_pred, _ = _do_paste_mask(
mask_pred, det_bbox, img_h, img_w, skip_empty=False)
masks.append(mask_pred)
mask_results['masks'] = masks
return mask_results
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without Augmentation."""
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
bboxes, scores = self.roi_head.simple_test_bboxes(
x, img_metas, proposal_list, None, rescale=rescale)
pan_cfg = self.test_cfg.panoptic
# class-wise predictions
det_bboxes = []
det_labels = []
for bboxe, score in zip(bboxes, scores):
det_bbox, det_label = multiclass_nms(bboxe, score,
pan_cfg.score_thr,
pan_cfg.nms,
pan_cfg.max_per_img)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
mask_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
masks = mask_results['masks']
seg_preds = self.semantic_head.simple_test(x, img_metas, rescale)
results = []
for i in range(len(det_bboxes)):
pan_results = self.panoptic_fusion_head.simple_test(
det_bboxes[i], det_labels[i], masks[i], seg_preds[i])
pan_results = pan_results.int().detach().cpu().numpy()
result = dict(pan_results=pan_results)
results.append(result)
return results
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (dict): The results.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
img = mmcv.imread(img)
img = img.copy()
pan_results = result['pan_results']
# keep objects ahead
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
segms=segms,
labels=labels,
class_names=self.CLASSES,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/jquery/ui/jquery.ui.core.js | (function( $, undefined ) {
// prevent duplicate loading
// this is only a problem because we proxy existing functions
// and we don't want to double proxy them
$.ui = $.ui || {};
if ( $.ui.version ) {
return;
}
$.extend( $.ui, {
version: "1.8.10",
keyCode: {
ALT: 18,
BACKSPACE: 8,
CAPS_LOCK: 20,
COMMA: 188,
COMMAND: 91,
COMMAND_LEFT: 91, // COMMAND
COMMAND_RIGHT: 93,
CONTROL: 17,
DELETE: 46,
DOWN: 40,
END: 35,
ENTER: 13,
ESCAPE: 27,
HOME: 36,
INSERT: 45,
LEFT: 37,
MENU: 93, // COMMAND_RIGHT
NUMPAD_ADD: 107,
NUMPAD_DECIMAL: 110,
NUMPAD_DIVIDE: 111,
NUMPAD_ENTER: 108,
NUMPAD_MULTIPLY: 106,
NUMPAD_SUBTRACT: 109,
PAGE_DOWN: 34,
PAGE_UP: 33,
PERIOD: 190,
RIGHT: 39,
SHIFT: 16,
SPACE: 32,
TAB: 9,
UP: 38,
WINDOWS: 91 // COMMAND
}
});
// plugins
$.fn.extend({
_focus: $.fn.focus,
focus: function( delay, fn ) {
return typeof delay === "number" ?
this.each(function() {
var elem = this;
setTimeout(function() {
$( elem ).focus();
if ( fn ) {
fn.call( elem );
}
}, delay );
}) :
this._focus.apply( this, arguments );
},
scrollParent: function() {
var scrollParent;
if (($.browser.msie && (/(static|relative)/).test(this.css('position'))) || (/absolute/).test(this.css('position'))) {
scrollParent = this.parents().filter(function() {
return (/(relative|absolute|fixed)/).test($.curCSS(this,'position',1)) && (/(auto|scroll)/).test($.curCSS(this,'overflow',1)+$.curCSS(this,'overflow-y',1)+$.curCSS(this,'overflow-x',1));
}).eq(0);
} else {
scrollParent = this.parents().filter(function() {
return (/(auto|scroll)/).test($.curCSS(this,'overflow',1)+$.curCSS(this,'overflow-y',1)+$.curCSS(this,'overflow-x',1));
}).eq(0);
}
return (/fixed/).test(this.css('position')) || !scrollParent.length ? $(document) : scrollParent;
},
zIndex: function( zIndex ) {
if ( zIndex !== undefined ) {
return this.css( "zIndex", zIndex );
}
if ( this.length ) {
var elem = $( this[ 0 ] ), position, value;
while ( elem.length && elem[ 0 ] !== document ) {
// Ignore z-index if position is set to a value where z-index is ignored by the browser
// This makes behavior of this function consistent across browsers
// WebKit always returns auto if the element is positioned
position = elem.css( "position" );
if ( position === "absolute" || position === "relative" || position === "fixed" ) {
// IE returns 0 when zIndex is not specified
// other browsers return a string
// we ignore the case of nested elements with an explicit value of 0
// <div style="z-index: -10;"><div style="z-index: 0;"></div></div>
value = parseInt( elem.css( "zIndex" ), 10 );
if ( !isNaN( value ) && value !== 0 ) {
return value;
}
}
elem = elem.parent();
}
}
return 0;
},
disableSelection: function() {
return this.bind( ( $.support.selectstart ? "selectstart" : "mousedown" ) +
".ui-disableSelection", function( event ) {
event.preventDefault();
});
},
enableSelection: function() {
return this.unbind( ".ui-disableSelection" );
}
});
$.each( [ "Width", "Height" ], function( i, name ) {
var side = name === "Width" ? [ "Left", "Right" ] : [ "Top", "Bottom" ],
type = name.toLowerCase(),
orig = {
innerWidth: $.fn.innerWidth,
innerHeight: $.fn.innerHeight,
outerWidth: $.fn.outerWidth,
outerHeight: $.fn.outerHeight
};
function reduce( elem, size, border, margin ) {
$.each( side, function() {
size -= parseFloat( $.curCSS( elem, "padding" + this, true) ) || 0;
if ( border ) {
size -= parseFloat( $.curCSS( elem, "border" + this + "Width", true) ) || 0;
}
if ( margin ) {
size -= parseFloat( $.curCSS( elem, "margin" + this, true) ) || 0;
}
});
return size;
}
$.fn[ "inner" + name ] = function( size ) {
if ( size === undefined ) {
return orig[ "inner" + name ].call( this );
}
return this.each(function() {
$( this ).css( type, reduce( this, size ) + "px" );
});
};
$.fn[ "outer" + name] = function( size, margin ) {
if ( typeof size !== "number" ) {
return orig[ "outer" + name ].call( this, size );
}
return this.each(function() {
$( this).css( type, reduce( this, size, true, margin ) + "px" );
});
};
});
// selectors
function visible( element ) {
return !$( element ).parents().andSelf().filter(function() {
return $.curCSS( this, "visibility" ) === "hidden" ||
$.expr.filters.hidden( this );
}).length;
}
$.extend( $.expr[ ":" ], {
data: function( elem, i, match ) {
return !!$.data( elem, match[ 3 ] );
},
focusable: function( element ) {
var nodeName = element.nodeName.toLowerCase(),
tabIndex = $.attr( element, "tabindex" );
if ( "area" === nodeName ) {
var map = element.parentNode,
mapName = map.name,
img;
if ( !element.href || !mapName || map.nodeName.toLowerCase() !== "map" ) {
return false;
}
img = $( "img[usemap=#" + mapName + "]" )[0];
return !!img && visible( img );
}
return ( /input|select|textarea|button|object/.test( nodeName )
? !element.disabled
: "a" == nodeName
? element.href || !isNaN( tabIndex )
: !isNaN( tabIndex ))
// the element and all of its ancestors must be visible
&& visible( element );
},
tabbable: function( element ) {
var tabIndex = $.attr( element, "tabindex" );
return ( isNaN( tabIndex ) || tabIndex >= 0 ) && $( element ).is( ":focusable" );
}
});
// support
$(function() {
var body = document.body,
div = body.appendChild( div = document.createElement( "div" ) );
$.extend( div.style, {
minHeight: "100px",
height: "auto",
padding: 0,
borderWidth: 0
});
$.support.minHeight = div.offsetHeight === 100;
$.support.selectstart = "onselectstart" in div;
// set display to none to avoid a layout bug in IE
// http://dev.jquery.com/ticket/4014
body.removeChild( div ).style.display = "none";
});
// deprecated
$.extend( $.ui, {
// $.ui.plugin is deprecated. Use the proxy pattern instead.
plugin: {
add: function( module, option, set ) {
var proto = $.ui[ module ].prototype;
for ( var i in set ) {
proto.plugins[ i ] = proto.plugins[ i ] || [];
proto.plugins[ i ].push( [ option, set[ i ] ] );
}
},
call: function( instance, name, args ) {
var set = instance.plugins[ name ];
if ( !set || !instance.element[ 0 ].parentNode ) {
return;
}
for ( var i = 0; i < set.length; i++ ) {
if ( instance.options[ set[ i ][ 0 ] ] ) {
set[ i ][ 1 ].apply( instance.element, args );
}
}
}
},
// will be deprecated when we switch to jQuery 1.4 - use jQuery.contains()
contains: function( a, b ) {
return document.compareDocumentPosition ?
a.compareDocumentPosition( b ) & 16 :
a !== b && a.contains( b );
},
// only used by resizable
hasScroll: function( el, a ) {
//If overflow is hidden, the element might have extra content, but the user wants to hide it
if ( $( el ).css( "overflow" ) === "hidden") {
return false;
}
var scroll = ( a && a === "left" ) ? "scrollLeft" : "scrollTop",
has = false;
if ( el[ scroll ] > 0 ) {
return true;
}
// TODO: determine which cases actually cause this to happen
// if the element doesn't have the scroll set, see if it's possible to
// set the scroll
el[ scroll ] = 1;
has = ( el[ scroll ] > 0 );
el[ scroll ] = 0;
return has;
},
// these are odd functions, fix the API or move into individual plugins
isOverAxis: function( x, reference, size ) {
//Determines when x coordinate is over "b" element axis
return ( x > reference ) && ( x < ( reference + size ) );
},
isOver: function( y, x, top, left, height, width ) {
//Determines when x, y coordinates is over "b" element
return $.ui.isOverAxis( y, top, height ) && $.ui.isOverAxis( x, left, width );
}
});
})( jQuery ); | PypiClean |
/ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/kmercoverage.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified May 23, 2014
*** DEPRECATED: This should still work but is no longer maintained. ***
Description: Annotates reads with their kmer depth.
Usage: kmercoverage in=<input> out=<read output> hist=<histogram output>
Input parameters:
in2=null Second input file for paired reads
extra=null Additional files to use for input (generating hash table) but not for output
fastareadlen=2^31 Break up FASTA reads longer than this. Can be useful when processing scaffolded genomes
tablereads=-1 Use at most this many reads when building the hashtable (-1 means all)
kmersample=1 Process every nth kmer, and skip the rest
readsample=1 Process every nth read, and skip the rest
Output parameters:
hist=null Specify a file to output the depth histogram
histlen=10000 Max depth displayed on histogram
reads=-1 Only process this number of reads, then quit (-1 means all)
sampleoutput=t Use sampling on output as well as input (not used if sample rates are 1)
printcoverage=f Only print coverage information instead of reads
useheader=f Append coverage info to the read's header
minmedian=0 Don't output reads with median coverage below this
minaverage=0 Don't output reads with average coverage below this
zerobin=f Set to true if you want kmers with a count of 0 to go in the 0 bin instead of the 1 bin in histograms.
Default is false, to prevent confusion about how there can be 0-count kmers.
The reason is that based on the 'minq' and 'minprob' settings, some kmers may be excluded from the bloom filter.
Hashing parameters:
k=31 Kmer length (values under 32 are most efficient, but arbitrarily high values are supported)
cbits=8 Bits per cell in bloom filter; must be 2, 4, 8, 16, or 32. Maximum kmer depth recorded is 2^cbits.
Large values decrease accuracy for a fixed amount of memory.
hashes=4 Number of times a kmer is hashed. Higher is slower.
Higher is MORE accurate if there is enough memory, and LESS accurate if there is not enough memory.
prefilter=f True is slower, but generally more accurate; filters out low-depth kmers from the main hashtable.
prehashes=2 Number of hashes for prefilter.
passes=1 More passes can sometimes increase accuracy by iteratively removing low-depth kmers
minq=7 Ignore kmers containing bases with quality below this
minprob=0.5 Ignore kmers with overall probability of correctness below this
threads=X Spawn exactly X hashing threads (default is number of logical processors). Total active threads may exceed X by up to 4.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
Please contact Brian Bushnell at bbushnell@lbl.gov if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx1g"
z2="-Xms1g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 3200m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
kmercoverage() {
local CMD="java $EA $EOOM $z -cp $CP jgi.KmerCoverage prefilter=true bits=16 interleaved=false $@"
echo $CMD >&2
eval $CMD
}
kmercoverage "$@" | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/datastructures/halfface/halfface.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from random import choice, sample
from compas.datastructures.datastructure import Datastructure
from compas.datastructures.attributes import VertexAttributeView
from compas.datastructures.attributes import EdgeAttributeView
from compas.datastructures.attributes import FaceAttributeView
from compas.datastructures.attributes import CellAttributeView
from compas.utilities import pairwise
class HalfFace(Datastructure):
"""Base half-face data structure for representing volumetric meshes.
Parameters
----------
name: str, optional
The name of the graph.
default_vertex_attributes: dict[str, Any], optional
Default values for vertex attributes.
default_edge_attributes: dict[str, Any], optional
Default values for edge attributes.
default_face_attributes: dict[str, Any], optional
Default values for face attributes.
default_cell_attributes: dict[str, Any], optional
Default values for cell attributes.
Attributes
----------
attributes : dict[str, Any]
General attributes of the data structure which will be included in the data representation.
default_vertex_attributes : dict[str, Any]
Default attributes of the vertices.
default_edge_attributes: dict[str, Any]
Default values for edge attributes.
default_face_attributes: dict[str, Any]
Default values for face attributes.
default_cell_attributes: dict[str, Any]
Default values for cell attributes.
"""
def __init__(
self,
name=None,
default_vertex_attributes=None,
default_edge_attributes=None,
default_face_attributes=None,
default_cell_attributes=None,
):
super(HalfFace, self).__init__()
self._max_vertex = -1
self._max_face = -1
self._max_cell = -1
self._vertex = {}
self._halfface = {}
self._cell = {}
self._plane = {}
self._edge_data = {}
self._face_data = {}
self._cell_data = {}
self.attributes = {"name": name or "HalfFace"}
self.default_vertex_attributes = {"x": 0.0, "y": 0.0, "z": 0.0}
self.default_edge_attributes = {}
self.default_face_attributes = {}
self.default_cell_attributes = {}
if default_vertex_attributes:
self.default_vertex_attributes.update(default_vertex_attributes)
if default_edge_attributes:
self.default_edge_attributes.update(default_edge_attributes)
if default_face_attributes:
self.default_face_attributes.update(default_face_attributes)
if default_cell_attributes:
self.default_cell_attributes.update(default_cell_attributes)
def __str__(self):
tpl = "<HalfFace with {} vertices, {} faces, {} cells, {} edges>"
return tpl.format(
self.number_of_vertices(),
self.number_of_faces(),
self.number_of_cells(),
self.number_of_edges(),
)
# --------------------------------------------------------------------------
# descriptors
# --------------------------------------------------------------------------
@property
def name(self):
return self.attributes.get("name") or self.__class__.__name__
@name.setter
def name(self, value):
self.attributes["name"] = value
@property
def DATASCHEMA(self):
import schema
return schema.Schema(
{
"attributes": dict,
"dva": dict,
"dea": dict,
"dfa": dict,
"dca": dict,
"vertex": dict,
"cell": dict,
"edge_data": dict,
"face_data": dict,
"cell_data": dict,
"max_vertex": schema.And(int, lambda x: x >= -1),
"max_face": schema.And(int, lambda x: x >= -1),
"max_cell": schema.And(int, lambda x: x >= -1),
}
)
@property
def JSONSCHEMANAME(self):
return "halfface"
@property
def data(self):
cell = {}
for c in self._cell:
cell[c] = {}
for u in self._cell[c]:
cell[c].setdefault(u, {})
for v in self._cell[c][u]:
cell[c][u][v] = self._halfface[self._cell[c][u][v]]
data = {
"attributes": self.attributes,
"dva": self.default_vertex_attributes,
"dea": self.default_edge_attributes,
"dfa": self.default_face_attributes,
"dca": self.default_cell_attributes,
"vertex": self._vertex,
"cell": cell,
"edge_data": self._edge_data,
"face_data": self._face_data,
"cell_data": self._cell_data,
"max_vertex": self._max_vertex,
"max_face": self._max_face,
"max_cell": self._max_cell,
}
return data
@data.setter
def data(self, data):
attributes = data.get("attributes") or {}
dva = data.get("dva") or {}
dea = data.get("dea") or {}
dfa = data.get("dfa") or {}
dca = data.get("dca") or {}
vertex = data.get("vertex") or {}
cell = data.get("cell") or {}
edge_data = data.get("edge_data") or {}
face_data = data.get("face_data") or {}
cell_data = data.get("cell_data") or {}
max_vertex = data.get("max_vertex", -1)
max_face = data.get("max_face", -1)
max_cell = data.get("max_cell", -1)
if not vertex or not cell:
return
self.attributes.update(attributes)
self.default_vertex_attributes.update(dva)
self.default_edge_attributes.update(dea)
self.default_face_attributes.update(dfa)
self.default_cell_attributes.update(dca)
self._vertex = {}
self._halfface = {}
self._cell = {}
self._plane = {}
self._edge_data = {}
self._face_data = {}
self._cell_data = {}
for v in vertex:
attr = vertex[v] or {}
self.add_vertex(int(v), attr_dict=attr)
for c in cell:
attr = cell_data.get(c) or {}
faces = []
for u in cell[c]:
for v in cell[c][u]:
faces.append(cell[c][u][v])
self.add_cell(faces, ckey=int(c), attr_dict=attr)
for e in edge_data:
self._edge_data[e] = edge_data[e] or {}
for f in face_data:
self._face_data[f] = face_data[f] or {}
self._max_vertex = max_vertex
self._max_face = max_face
self._max_cell = max_cell
# --------------------------------------------------------------------------
# helpers
# --------------------------------------------------------------------------
def clear(self):
"""Clear all the volmesh data.
Returns
-------
None
"""
del self._vertex
del self._halfface
del self._cell
del self._plane
del self._edge_data
del self._face_data
del self._cell_data
self._vertex = {}
self._halfface = {}
self._cell = {}
self._plane = {}
self._edge_data = {}
self._face_data = {}
self._cell_data = {}
self._max_vertex = -1
self._max_face = -1
self._max_cell = -1
def get_any_vertex(self):
"""Get the identifier of a random vertex.
.. deprecated:: 1.13.3
Use :meth:`vertex_sample` instead.
Returns
-------
int
The identifier of the vertex.
"""
return choice(list(self.vertices()))
def get_any_face(self):
"""Get the identifier of a random face.
.. deprecated:: 1.13.3
Use :meth:`face_sample` instead.
Returns
-------
int
The identifier of the face.
"""
return choice(list(self.faces()))
def vertex_sample(self, size=1):
"""Get the identifiers of a set of random vertices.
Parameters
----------
size : int, optional
The size of the sample.
Returns
-------
list[int]
The identifiers of the vertices.
"""
return sample(list(self.vertices()), size)
def edge_sample(self, size=1):
"""Get the identifiers of a set of random edges.
Parameters
----------
size : int, optional
The size of the sample.
Returns
-------
list[tuple[int, int]]
The identifiers of the edges.
"""
return sample(list(self.edges()), size)
def face_sample(self, size=1):
"""Get the identifiers of a set of random faces.
Parameters
----------
size : int, optional
The size of the sample.
Returns
-------
list[int]
The identifiers of the faces.
"""
return sample(list(self.faces()), size)
def cell_sample(self, size=1):
"""Get the identifiers of a set of random cells.
Parameters
----------
size : int, optional
The size of the sample.
Returns
-------
list[int]
The identifiers of the cells.
"""
return sample(list(self.cells()), size)
def vertex_index(self):
"""Returns a dictionary that maps vertex dictionary keys to the
corresponding index in a vertex list or array.
Returns
-------
dict[int, int]
A dictionary of vertex-index pairs.
"""
return {key: index for index, key in enumerate(self.vertices())}
def index_vertex(self):
"""Returns a dictionary that maps the indices of a vertex list to
keys in the vertex dictionary.
Returns
-------
dict[int, int]
A dictionary of index-vertex pairs.
"""
return dict(enumerate(self.vertices()))
# --------------------------------------------------------------------------
# builders
# --------------------------------------------------------------------------
def add_vertex(self, key=None, attr_dict=None, **kwattr):
"""Add a vertex to the volmesh object.
Parameters
----------
key : int, optional
The vertex identifier.
attr_dict : dict[str, Any], optional
dictionary of vertex attributes.
**kwattr : dict[str, Any], optional
A dictionary of additional attributes compiled of remaining named arguments.
Returns
-------
int
The identifier of the vertex.
Notes
-----
If no key is provided for the vertex, one is generated
automatically. An automatically generated key is an integer that increments
the highest integer value of any key used so far by 1.
If a key with an integer value is provided that is higher than the current
highest integer key value, then the highest integer value is updated accordingly.
"""
if key is None:
key = self._max_vertex = self._max_vertex + 1
if key > self._max_vertex:
self._max_vertex = key
key = int(key)
if key not in self._vertex:
self._vertex[key] = {}
self._plane[key] = {}
attr = attr_dict or {}
attr.update(kwattr)
self._vertex[key].update(attr)
return key
def add_halfface(self, vertices, fkey=None, attr_dict=None, **kwattr):
"""Add a face to the volmesh object.
Parameters
----------
vertices : list[int]
A list of ordered vertex keys representing the face.
For every vertex that does not yet exist, a new vertex is created.
fkey : int, optional
The face identifier.
attr_dict : dict[str, Any], optional
dictionary of halfface attributes.
**kwattr : dict[str, Any], optional
A dictionary of additional attributes compiled of remaining named arguments.
Returns
-------
int
The key of the face.
Notes
-----
If no key is provided for the face, one is generated
automatically. An automatically generated key is an integer that increments
the highest integer value of any key used so far by 1.
If a key with an integer value is provided that is higher than the current
highest integer key value, then the highest integer value is updated accordingly.
"""
if len(vertices) < 3:
return
if vertices[-1] == vertices[0]:
vertices = vertices[:-1]
vertices = [int(key) for key in vertices]
if fkey is None:
fkey = self._max_face = self._max_face + 1
if fkey > self._max_face:
self._max_face = fkey
fkey = int(fkey)
attr = attr_dict or {}
attr.update(kwattr)
self._halfface[fkey] = vertices
for name, value in attr.items():
self.face_attribute(fkey, name, value)
for i in range(-2, len(vertices) - 2):
u = vertices[i]
v = vertices[i + 1]
w = vertices[i + 2]
if u == v or v == w:
continue
self.add_vertex(key=u)
self.add_vertex(key=v)
self.add_vertex(key=w)
if v not in self._plane[u]:
self._plane[u][v] = {}
self._plane[u][v][w] = None
if v not in self._plane[w]:
self._plane[w][v] = {}
if u not in self._plane[w][v]:
self._plane[w][v][u] = None
return fkey
def add_cell(self, faces, ckey=None, attr_dict=None, **kwattr):
"""Add a cell to the volmesh object.
Parameters
----------
faces : list[list[int]]
The faces of the cell defined as lists of vertices.
ckey : int, optional
The cell identifier.
attr_dict : dict[str, Any], optional
A dictionary of cell attributes.
**kwattr : dict[str, Any], optional
A dictionary of additional attributes compiled of remaining named arguments.
Returns
-------
int
The key of the cell.
Raises
------
TypeError
If the provided cell key is of an unhashable type.
Notes
-----
If no key is provided for the cell, one is generated
automatically. An automatically generated key is an integer that increments
the highest integer value of any key used so far by 1.
If a key with an integer value is provided that is higher than the current
highest integer key value, then the highest integer value is updated accordingly.
"""
if ckey is None:
ckey = self._max_cell = self._max_cell + 1
if ckey > self._max_cell:
self._max_cell = ckey
ckey = int(ckey)
attr = attr_dict or {}
attr.update(kwattr)
self._cell[ckey] = {}
for name, value in attr.items():
self.cell_attribute(ckey, name, value)
for vertices in faces:
fkey = self.add_halfface(vertices)
vertices = self.halfface_vertices(fkey)
for i in range(-2, len(vertices) - 2):
u = vertices[i]
v = vertices[i + 1]
w = vertices[i + 2]
if u not in self._cell[ckey]:
self._cell[ckey][u] = {}
self._plane[u][v][w] = ckey
self._cell[ckey][u][v] = fkey
return ckey
# --------------------------------------------------------------------------
# modifiers
# --------------------------------------------------------------------------
def delete_vertex(self, vertex):
"""Delete a vertex from the volmesh and everything that is attached to it.
Parameters
----------
vertex : int
The identifier of the vertex.
Returns
-------
None
"""
for cell in self.vertex_cells(vertex):
self.delete_cell(cell)
def delete_cell(self, cell):
"""Delete a cell from the volmesh.
Parameters
----------
cell : int
The identifier of the cell.
Returns
-------
None
"""
cell_vertices = self.cell_vertices(cell)
cell_faces = self.cell_faces(cell)
for face in cell_faces:
for edge in self.halfface_halfedges(face):
u, v = edge
if (u, v) in self._edge_data:
del self._edge_data[u, v]
if (v, u) in self._edge_data:
del self._edge_data[v, u]
for vertex in cell_vertices:
if len(self.vertex_cells(vertex)) == 1:
del self._vertex[vertex]
for face in cell_faces:
vertices = self.halfface_vertices(face)
for i in range(-2, len(vertices) - 2):
u = vertices[i]
v = vertices[i + 1]
w = vertices[i + 2]
self._plane[u][v][w] = None
if self._plane[w][v][u] is None:
del self._plane[u][v][w]
del self._plane[w][v][u]
del self._halfface[face]
key = "-".join(map(str, sorted(vertices)))
if key in self._face_data:
del self._face_data[key]
del self._cell[cell]
if cell in self._cell_data:
del self._cell_data[cell]
def remove_unused_vertices(self):
"""Remove all unused vertices from the volmesh object.
Returns
-------
None
"""
for vertex in list(self.vertices()):
if vertex not in self._plane:
del self._vertex[vertex]
else:
if not self._plane[vertex]:
del self._vertex[vertex]
del self._plane[vertex]
cull_vertices = remove_unused_vertices
# --------------------------------------------------------------------------
# accessors
# --------------------------------------------------------------------------
def vertices(self, data=False):
"""Iterate over the vertices of the volmesh.
Parameters
----------
data : bool, optional
If True, yield the vertex attributes in addition to the vertex identifiers.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next vertex identifier.
If `data` is True, the next vertex as a (vertex, attr) a tuple.
"""
for vertex in self._vertex:
if not data:
yield vertex
else:
yield vertex, self.vertex_attributes(vertex)
def edges(self, data=False):
"""Iterate over the edges of the volmesh.
Parameters
----------
data : bool, optional
If True, yield the edge attributes as well as the edge identifiers.
Yields
------
tuple[int, int] | tuple[tuple[int, int], dict[str, Any]]
If `data` is False, the next edge as a (u, v) tuple.
If `data` is True, the next edge as a ((u, v), attr) tuple.
"""
seen = set()
for face in self._halfface:
vertices = self._halfface[face]
for u, v in pairwise(vertices + vertices[:1]):
if (u, v) in seen or (v, u) in seen:
continue
seen.add((u, v))
seen.add((v, u))
if not data:
yield u, v
else:
yield (u, v), self.edge_attributes((u, v))
def halffaces(self, data=False):
"""Iterate over the halffaces of the volmesh.
Parameters
----------
data : bool, optional
If True, yield the half-face attributes in addition to half-face identifiers.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next halfface identifier.
If `data` is True, the next halfface as a (halfface, attr) tuple.
"""
for hface in self._halfface:
if not data:
yield hface
else:
yield hface, self.face_attributes(hface)
def faces(self, data=False):
""" "Iterate over the halffaces of the volmesh and yield faces.
Parameters
----------
data : bool, optional
If True, yield the face attributes in addition to the face identifiers.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next face identifier.
If `data` is True, the next face as a (face, attr) tuple.
Notes
-----
Volmesh faces have no topological meaning (analogous to an edge of a mesh).
They are typically used for geometric operations (i.e. planarisation).
Between the interface of two cells, there are two interior faces (one from each cell).
Only one of these two interior faces are returned as a "face".
The unique faces are found by comparing string versions of sorted vertex lists.
"""
seen = set()
faces = []
for face in self._halfface:
key = "-".join(map(str, sorted(self.halfface_vertices(face))))
if key not in seen:
seen.add(key)
faces.append(face)
for face in faces:
if not data:
yield face
else:
yield face, self.face_attributes(face)
def cells(self, data=False):
"""Iterate over the cells of the volmesh.
Parameters
----------
data : bool, optional
If True, yield the cell attributes in addition to the cell identifiers.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next cell identifier.
If `data` is True, the next cell as a (cell, attr) tuple.
"""
for cell in self._cell:
if not data:
yield cell
else:
yield cell, self.cell_attributes(cell)
def vertices_where(self, conditions=None, data=False, **kwargs):
"""Get vertices for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict, optional
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
data : bool, optional
If True, yield the vertex attributes in addition to the identifiers.
**kwargs : dict[str, Any], optional
Additional conditions provided as named function arguments.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next vertex that matches the condition.
If `data` is True, the next vertex and its attributes.
"""
conditions = conditions or {}
conditions.update(kwargs)
for key, attr in self.vertices(True):
is_match = True
for name, value in conditions.items():
method = getattr(self, name, None)
if callable(method):
val = method(key)
if isinstance(val, list):
if value not in val:
is_match = False
break
break
if isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
else:
if name not in attr:
is_match = False
break
if isinstance(attr[name], list):
if value not in attr[name]:
is_match = False
break
break
if isinstance(value, (tuple, list)):
minval, maxval = value
if attr[name] < minval or attr[name] > maxval:
is_match = False
break
else:
if value != attr[name]:
is_match = False
break
if is_match:
if data:
yield key, attr
else:
yield key
def vertices_where_predicate(self, predicate, data=False):
"""Get vertices for which a certain condition or set of conditions is true using a lambda function.
Parameters
----------
predicate : callable
The condition you want to evaluate.
The callable takes 2 parameters: the vertex identifier and the vertex attributes, and should return True or False.
data : bool, optional
If True, yield the vertex attributes in addition to the identifiers.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next vertex that matches the condition.
If `data` is True, the next vertex and its attributes.
"""
for key, attr in self.vertices(True):
if predicate(key, attr):
if data:
yield key, attr
else:
yield key
def edges_where(self, conditions=None, data=False, **kwargs):
"""Get edges for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict, optional
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
data : bool, optional
If True, yield the edge attributes in addition to the identifiers.
**kwargs : dict[str, Any], optional
Additional conditions provided as named function arguments.
Yields
------
tuple[int, int] | tuple[tuple[int, int], dict[str, Any]]
If `data` is False, the next edge as a (u, v) tuple.
If `data` is True, the next edge as a (u, v, data) tuple.
"""
conditions = conditions or {}
conditions.update(kwargs)
for key in self.edges():
is_match = True
attr = self.edge_attributes(key)
for name, value in conditions.items():
method = getattr(self, name, None)
if method and callable(method):
val = method(key)
elif name in attr:
val = attr[name]
else:
is_match = False
break
if isinstance(val, list):
if value not in val:
is_match = False
break
elif isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
if is_match:
if data:
yield key, attr
else:
yield key
def edges_where_predicate(self, predicate, data=False):
"""Get edges for which a certain condition or set of conditions is true using a lambda function.
Parameters
----------
predicate : callable
The condition you want to evaluate.
The callable takes 2 parameters: the edge identifier and the edge attributes, and should return True or False.
data : bool, optional
If True, yield the edge attributes in addition to the identifiers.
Yields
------
tuple[int, int] | tuple[tuple[int, int], dict[str, Any]]
If `data` is False, the next edge as a (u, v) tuple.
If `data` is True, the next edge as a (u, v, data) tuple.
"""
for key, attr in self.edges(True):
if predicate(key, attr):
if data:
yield key, attr
else:
yield key
def faces_where(self, conditions=None, data=False, **kwargs):
"""Get faces for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict, optional
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
data : bool, optional
If True, yield the face attributes in addition to the identifiers.
**kwargs : dict[str, Any], optional
Additional conditions provided as named function arguments.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next face that matches the condition.
If `data` is True, the next face and its attributes.
"""
conditions = conditions or {}
conditions.update(kwargs)
for fkey in self.faces():
is_match = True
attr = self.face_attributes(fkey)
for name, value in conditions.items():
method = getattr(self, name, None)
if method and callable(method):
val = method(fkey)
elif name in attr:
val = attr[name]
else:
is_match = False
break
if isinstance(val, list):
if value not in val:
is_match = False
break
elif isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
if is_match:
if data:
yield fkey, attr
else:
yield fkey
def faces_where_predicate(self, predicate, data=False):
"""Get faces for which a certain condition or set of conditions is true using a lambda function.
Parameters
----------
predicate : callable
The condition you want to evaluate.
The callable takes 2 parameters: the face identifier and the the face attributes, and should return True or False.
data : bool, optional
If True, yield the face attributes in addition to the identifiers.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next face that matches the condition.
If `data` is True, the next face and its attributes.
"""
for fkey, attr in self.faces(True):
if predicate(fkey, attr):
if data:
yield fkey, attr
else:
yield fkey
def cells_where(self, conditions=None, data=False, **kwargs):
"""Get cells for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict, optional
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
data : bool, optional
If True, yield the cell attributes in addition to the identifiers.
**kwargs : dict[str, Any], optional
Additional conditions provided as named function arguments.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next cell that matches the condition.
If `data` is True, the next cell and its attributes.
"""
conditions = conditions or {}
conditions.update(kwargs)
for ckey in self.cells():
is_match = True
attr = self.cell_attributes(ckey)
for name, value in conditions.items():
method = getattr(self, name, None)
if method and callable(method):
val = method(ckey)
elif name in attr:
val = attr[name]
else:
is_match = False
break
if isinstance(val, list):
if value not in val:
is_match = False
break
elif isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
if is_match:
if data:
yield ckey, attr
else:
yield ckey
def cells_where_predicate(self, predicate, data=False):
"""Get cells for which a certain condition or set of conditions is true using a lambda function.
Parameters
----------
predicate : callable
The condition you want to evaluate.
The callable takes 2 parameters: the cell identifier and the cell attributes, and should return True or False.
data : bool, optional
If True, yield the cell attributes in addition to the identifiers.
Yields
------
int | tuple[int, dict[str, Any]]
If `data` is False, the next cell that matches the condition.
If `data` is True, the next cell and its attributes.
"""
for ckey, attr in self.cells(True):
if predicate(ckey, attr):
if data:
yield ckey, attr
else:
yield ckey
# --------------------------------------------------------------------------
# attributes - vertices
# --------------------------------------------------------------------------
def update_default_vertex_attributes(self, attr_dict=None, **kwattr):
"""Update the default vertex attributes.
Parameters
----------
attr_dict : dict[str, Any], optional
A dictionary of attributes with their default values.
**kwattr : dict[str, Any], optional
A dictionary of additional attributes compiled of remaining named arguments.
Returns
-------
None
Notes
-----
Named arguments overwrite correpsonding name-value pairs in the attribute dictionary.
"""
if not attr_dict:
attr_dict = {}
attr_dict.update(kwattr)
self.default_vertex_attributes.update(attr_dict)
def vertex_attribute(self, vertex, name, value=None):
"""Get or set an attribute of a vertex.
Parameters
----------
vertex : int
The vertex identifier.
name : str
The name of the attribute
value : object, optional
The value of the attribute.
Returns
-------
object | None
The value of the attribute,
or None when the function is used as a "setter".
Raises
------
KeyError
If the vertex does not exist.
"""
if vertex not in self._vertex:
raise KeyError(vertex)
if value is not None:
self._vertex[vertex][name] = value
return None
if name in self._vertex[vertex]:
return self._vertex[vertex][name]
else:
if name in self.default_vertex_attributes:
return self.default_vertex_attributes[name]
def unset_vertex_attribute(self, vertex, name):
"""Unset the attribute of a vertex.
Parameters
----------
vertex : int
The vertex identifier.
name : str
The name of the attribute.
Returns
-------
None
Raises
------
KeyError
If the vertex does not exist.
Notes
-----
Unsetting the value of a vertex attribute implicitly sets it back to the value
stored in the default vertex attribute dict.
"""
if name in self._vertex[vertex]:
del self._vertex[vertex][name]
def vertex_attributes(self, vertex, names=None, values=None):
"""Get or set multiple attributes of a vertex.
Parameters
----------
vertex : int
The identifier of the vertex.
names : list[str], optional
A list of attribute names.
values : list[Any], optional
A list of attribute values.
Returns
-------
dict[str, Any] | list[Any] | None
If the parameter `names` is empty,
the function returns a dictionary of all attribute name-value pairs of the vertex.
If the parameter `names` is not empty,
the function returns a list of the values corresponding to the requested attribute names.
The function returns None if it is used as a "setter".
Raises
------
KeyError
If the vertex does not exist.
"""
if vertex not in self._vertex:
raise KeyError(vertex)
if values is not None:
# use it as a setter
for name, value in zip(names, values):
self._vertex[vertex][name] = value
return
# use it as a getter
if not names:
# return all vertex attributes as a dict
return VertexAttributeView(self.default_vertex_attributes, self._vertex[vertex])
values = []
for name in names:
if name in self._vertex[vertex]:
values.append(self._vertex[vertex][name])
elif name in self.default_vertex_attributes:
values.append(self.default_vertex_attributes[name])
else:
values.append(None)
return values
def vertices_attribute(self, name, value=None, keys=None):
"""Get or set an attribute of multiple vertices.
Parameters
----------
name : str
The name of the attribute.
value : object, optional
The value of the attribute.
Default is None.
keys : list[int], optional
A list of vertex identifiers.
Returns
-------
list[Any] | None
The value of the attribute for each vertex,
or None if the function is used as a "setter".
Raises
------
KeyError
If any of the vertices does not exist.
"""
vertices = keys or self.vertices()
if value is not None:
for vertex in vertices:
self.vertex_attribute(vertex, name, value)
return
return [self.vertex_attribute(vertex, name) for vertex in vertices]
def vertices_attributes(self, names=None, values=None, keys=None):
"""Get or set multiple attributes of multiple vertices.
Parameters
----------
names : list[str], optional
The names of the attribute.
Default is None.
values : list[Any], optional
The values of the attributes.
Default is None.
key : list[Any], optional
A list of vertex identifiers.
Returns
-------
list[dict[str, Any]] | list[list[Any]] | None
If the parameter `names` is empty,
the function returns a list containing an attribute dict per vertex.
If the parameter `names` is not empty,
the function returns a list containing a list of attribute values per vertex corresponding to the provided attribute names.
The function returns None if it is used as a "setter".
Raises
------
KeyError
If any of the vertices does not exist.
"""
vertices = keys or self.vertices()
if values:
for vertex in vertices:
self.vertex_attributes(vertex, names, values)
return
return [self.vertex_attributes(vertex, names) for vertex in vertices]
# --------------------------------------------------------------------------
# attributes - edges
# --------------------------------------------------------------------------
def update_default_edge_attributes(self, attr_dict=None, **kwattr):
"""Update the default edge attributes.
Parameters
----------
attr_dict : dict[str, Any], optional
A dictionary of attributes with their default values.
**kwattr : dict[str, Any], optional
A dictionary of additional attributes compiled of remaining named arguments.
Returns
-------
None
Notes
-----
Named arguments overwrite correpsonding key-value pairs in the attribute dictionary.
"""
if not attr_dict:
attr_dict = {}
attr_dict.update(kwattr)
self.default_edge_attributes.update(attr_dict)
def edge_attribute(self, edge, name, value=None):
"""Get or set an attribute of an edge.
Parameters
----------
edge : tuple[int, int]
The edge identifier.
name : str
The name of the attribute.
value : object, optional
The value of the attribute.
Returns
-------
object | None
The value of the attribute, or None when the function is used as a "setter".
Raises
------
KeyError
If the edge does not exist.
"""
u, v = edge
if u not in self._plane or v not in self._plane[u]:
raise KeyError(edge)
key = str(tuple(sorted(edge)))
if value is not None:
if key not in self._edge_data:
self._edge_data[key] = {}
self._edge_data[key][name] = value
return
if key in self._edge_data and name in self._edge_data[key]:
return self._edge_data[key][name]
if name in self.default_edge_attributes:
return self.default_edge_attributes[name]
def unset_edge_attribute(self, edge, name):
"""Unset the attribute of an edge.
Parameters
----------
edge : tuple[int, int]
The edge identifier.
name : str
The name of the attribute.
Raises
------
KeyError
If the edge does not exist.
Returns
-------
None
Notes
-----
Unsetting the value of an edge attribute implicitly sets it back to the value
stored in the default edge attribute dict.
"""
u, v = edge
if u not in self._plane or v not in self._plane[u]:
raise KeyError(edge)
key = str(tuple(sorted(edge)))
if key in self._edge_data and name in self._edge_data[key]:
del self._edge_data[key][name]
def edge_attributes(self, edge, names=None, values=None):
"""Get or set multiple attributes of an edge.
Parameters
----------
edge : tuple[int, int]
The identifier of the edge.
names : list[str], optional
A list of attribute names.
values : list[Any], optional
A list of attribute values.
Returns
-------
dict[str, Any] | list[Any] | None
If the parameter `names` is empty, a dictionary of all attribute name-value pairs of the edge.
If the parameter `names` is not empty, a list of the values corresponding to the provided names.
None if the function is used as a "setter".
Raises
------
KeyError
If the edge does not exist.
"""
u, v = edge
if u not in self._plane or v not in self._plane[u]:
raise KeyError(edge)
key = str(tuple(sorted(edge)))
if values:
for name, value in zip(names, values):
if key not in self._edge_data:
self._edge_data[key] = {}
self._edge_data[key][name] = value
return
if not names:
key = str(tuple(sorted(edge)))
return EdgeAttributeView(self.default_edge_attributes, self._edge_data.setdefault(key, {}))
values = []
for name in names:
value = self.edge_attribute(edge, name)
values.append(value)
return values
def edges_attribute(self, name, value=None, edges=None):
"""Get or set an attribute of multiple edges.
Parameters
----------
name : str
The name of the attribute.
value : object, optional
The value of the attribute.
Default is None.
edges : list[tuple[int, int]], optional
A list of edge identifiers.
Returns
-------
list[Any] | None
A list containing the value per edge of the requested attribute,
or None if the function is used as a "setter".
Raises
------
KeyError
If any of the edges does not exist.
"""
edges = edges or self.edges()
if value is not None:
for edge in edges:
self.edge_attribute(edge, name, value)
return
return [self.edge_attribute(edge, name) for edge in edges]
def edges_attributes(self, names=None, values=None, edges=None):
"""Get or set multiple attributes of multiple edges.
Parameters
----------
names : list[str], optional
The names of the attribute.
values : list[Any], optional
The values of the attributes.
edges : list[tuple[int, int]], optional
A list of edge identifiers.
Returns
-------
list[dict[str, Any]] | list[list[Any]] | None
If the parameter `names` is empty,
a list containing per edge an attribute dict with all attributes (default + custom) of the edge.
If the parameter `names` is not empty,
a list containing per edge a list of attribute values corresponding to the requested names.
None if the function is used as a "setter".
Raises
------
KeyError
If any of the edges does not exist.
"""
edges = edges or self.edges()
if values:
for edge in edges:
self.edge_attributes(edge, names, values)
return
return [self.edge_attributes(edge, names) for edge in edges]
# --------------------------------------------------------------------------
# face attributes
# --------------------------------------------------------------------------
def update_default_face_attributes(self, attr_dict=None, **kwattr):
"""Update the default face attributes.
Parameters
----------
attr_dict : dict[str, Any], optional
A dictionary of attributes with their default values.
**kwattr : dict[str, Any], optional
A dictionary of additional attributes compiled of remaining named arguments.
Returns
-------
None
Notes
-----
Named arguments overwrite correpsonding key-value pairs in the attribute dictionary.
"""
if not attr_dict:
attr_dict = {}
attr_dict.update(kwattr)
self.default_face_attributes.update(attr_dict)
def face_attribute(self, face, name, value=None):
"""Get or set an attribute of a face.
Parameters
----------
face : int
The face identifier.
name : str
The name of the attribute.
value : object, optional
The value of the attribute.
Returns
-------
object | None
The value of the attribute, or None when the function is used as a "setter".
Raises
------
KeyError
If the face does not exist.
"""
if face not in self._halfface:
raise KeyError(face)
key = str(tuple(sorted(self.halfface_vertices(face))))
if value is not None:
if key not in self._face_data:
self._face_data[key] = {}
self._face_data[key][name] = value
return
if key in self._face_data and name in self._face_data[key]:
return self._face_data[key][name]
if name in self.default_face_attributes:
return self.default_face_attributes[name]
def unset_face_attribute(self, face, name):
"""Unset the attribute of a face.
Parameters
----------
face : int
The face identifier.
name : str
The name of the attribute.
Raises
------
KeyError
If the face does not exist.
Returns
-------
None
Notes
-----
Unsetting the value of a face attribute implicitly sets it back to the value
stored in the default face attribute dict.
"""
if face not in self._halfface:
raise KeyError(face)
key = str(tuple(sorted(self.halfface_vertices(face))))
if key in self._face_data and name in self._face_data[key]:
del self._face_data[key][name]
def face_attributes(self, face, names=None, values=None):
"""Get or set multiple attributes of a face.
Parameters
----------
face : int
The identifier of the face.
names : list[str], optional
A list of attribute names.
values : list[Any], optional
A list of attribute values.
Returns
-------
dict[str, Any] | list[Any] | None
If the parameter `names` is empty, a dictionary of all attribute name-value pairs of the face.
If the parameter `names` is not empty, a list of the values corresponding to the provided names.
None if the function is used as a "setter".
Raises
------
KeyError
If the face does not exist.
"""
if face not in self._halfface:
raise KeyError(face)
key = str(tuple(sorted(self.halfface_vertices(face))))
if values:
for name, value in zip(names, values):
if key not in self._face_data:
self._face_data[key] = {}
self._face_data[key][name] = value
return
if not names:
return FaceAttributeView(self.default_face_attributes, self._face_data.setdefault(key, {}))
values = []
for name in names:
value = self.face_attribute(face, name)
values.append(value)
return values
def faces_attribute(self, name, value=None, faces=None):
"""Get or set an attribute of multiple faces.
Parameters
----------
name : str
The name of the attribute.
value : object, optional
The value of the attribute.
Default is None.
faces : list[int], optional
A list of face identifiers.
Returns
-------
list[Any] | None
A list containing the value per face of the requested attribute,
or None if the function is used as a "setter".
Raises
------
KeyError
If any of the faces does not exist.
"""
faces = faces or self.faces()
if value is not None:
for face in faces:
self.face_attribute(face, name, value)
return
return [self.face_attribute(face, name) for face in faces]
def faces_attributes(self, names=None, values=None, faces=None):
"""Get or set multiple attributes of multiple faces.
Parameters
----------
names : list[str], optional
The names of the attribute.
Default is None.
values : list[Any], optional
The values of the attributes.
Default is None.
faces : list[int], optional
A list of face identifiers.
Returns
-------
list[dict[str, Any]] | list[list[Any]] | None
If the parameter `names` is empty,
a list containing per face an attribute dict with all attributes (default + custom) of the face.
If the parameter `names` is not empty,
a list containing per face a list of attribute values corresponding to the requested names.
None if the function is used as a "setter".
Raises
------
KeyError
If any of the faces does not exist.
"""
faces = faces or self.faces()
if values:
for face in faces:
self.face_attributes(face, names, values)
return
return [self.face_attributes(face, names) for face in faces]
# --------------------------------------------------------------------------
# attributes - cell
# --------------------------------------------------------------------------
def update_default_cell_attributes(self, attr_dict=None, **kwattr):
"""Update the default cell attributes.
Parameters
----------
attr_dict : dict[str, Any], optional
A dictionary of attributes with their default values.
**kwattr : dict[str, Any], optional
A dictionary of additional attributes compiled of remaining named arguments.
Returns
-------
None
Notes
----
Named arguments overwrite corresponding cell-value pairs in the attribute dictionary.
"""
if not attr_dict:
attr_dict = {}
attr_dict.update(kwattr)
self.default_cell_attributes.update(attr_dict)
def cell_attribute(self, cell, name, value=None):
"""Get or set an attribute of a cell.
Parameters
----------
cell : int
The cell identifier.
name : str
The name of the attribute.
value : object, optional
The value of the attribute.
Returns
-------
object | None
The value of the attribute, or None when the function is used as a "setter".
Raises
------
KeyError
If the cell does not exist.
"""
if cell not in self._cell:
raise KeyError(cell)
if value is not None:
if cell not in self._cell_data:
self._cell_data[cell] = {}
self._cell_data[cell][name] = value
return
if cell in self._cell_data and name in self._cell_data[cell]:
return self._cell_data[cell][name]
if name in self.default_cell_attributes:
return self.default_cell_attributes[name]
def unset_cell_attribute(self, cell, name):
"""Unset the attribute of a cell.
Parameters
----------
cell : int
The cell identifier.
name : str
The name of the attribute.
Returns
-------
None
Raises
------
KeyError
If the cell does not exist.
Notes
-----
Unsetting the value of a cell attribute implicitly sets it back to the value
stored in the default cell attribute dict.
"""
if cell not in self._cell:
raise KeyError(cell)
if cell in self._cell_data:
if name in self._cell_data[cell]:
del self._cell_data[cell][name]
def cell_attributes(self, cell, names=None, values=None):
"""Get or set multiple attributes of a cell.
Parameters
----------
cell : int
The identifier of the cell.
names : list[str], optional
A list of attribute names.
values : list[Any], optional
A list of attribute values.
Returns
-------
dict[str, Any] | list[Any] | None
If the parameter `names` is empty, a dictionary of all attribute name-value pairs of the cell.
If the parameter `names` is not empty, a list of the values corresponding to the provided names.
None if the function is used as a "setter".
Raises
------
KeyError
If the cell does not exist.
"""
if cell not in self._cell:
raise KeyError(cell)
if values is not None:
for name, value in zip(names, values):
if cell not in self._cell_data:
self._cell_data[cell] = {}
self._cell_data[cell][name] = value
return
if not names:
return CellAttributeView(self.default_cell_attributes, self._cell_data.setdefault(cell, {}))
values = []
for name in names:
value = self.cell_attribute(cell, name)
values.append(value)
return values
def cells_attribute(self, name, value=None, cells=None):
"""Get or set an attribute of multiple cells.
Parameters
----------
name : str
The name of the attribute.
value : object, optional
The value of the attribute.
cells : list[int], optional
A list of cell identifiers.
Returns
-------
list[Any] | None
A list containing the value per face of the requested attribute,
or None if the function is used as a "setter".
Raises
------
KeyError
If any of the cells does not exist.
"""
if not cells:
cells = self.cells()
if value is not None:
for cell in cells:
self.cell_attribute(cell, name, value)
return
return [self.cell_attribute(cell, name) for cell in cells]
def cells_attributes(self, names=None, values=None, cells=None):
"""Get or set multiple attributes of multiple cells.
Parameters
----------
names : list[str], optional
The names of the attribute.
Default is None.
values : list[Any], optional
The values of the attributes.
Default is None.
cells : list[int], optional
A list of cell identifiers.
Returns
-------
list[dict[str, Any]] | list[list[Any]] | None
If the parameter `names` is empty,
a list containing per cell an attribute dict with all attributes (default + custom) of the cell.
If the parameter `names` is empty,
a list containing per cell a list of attribute values corresponding to the requested names.
None if the function is used as a "setter".
Raises
------
KeyError
If any of the faces does not exist.
"""
if not cells:
cells = self.cells()
if values is not None:
for cell in cells:
self.cell_attributes(cell, names, values)
return
return [self.cell_attributes(cell, names) for cell in cells]
# --------------------------------------------------------------------------
# volmesh info
# --------------------------------------------------------------------------
def number_of_vertices(self):
"""Count the number of vertices in the volmesh.
Returns
-------
int
The number of vertices.
"""
return len(list(self.vertices()))
def number_of_edges(self):
"""Count the number of edges in the volmesh.
Returns
-------
int
The number of edges.
"""
return len(list(self.edges()))
def number_of_faces(self):
"""Count the number of faces in the volmesh.
Returns
-------
int
The number of faces.
"""
return len(list(self.faces()))
def number_of_cells(self):
"""Count the number of faces in the volmesh.
Returns
-------
int
The number of cells.
"""
return len(list(self.cells()))
def is_valid(self):
NotImplementedError
# --------------------------------------------------------------------------
# vertex topology
# --------------------------------------------------------------------------
def has_vertex(self, vertex):
"""Verify that a vertex is in the volmesh.
Parameters
----------
vertex : int
The identifier of the vertex.
Returns
-------
bool
True if the vertex is in the volmesh.
False otherwise.
"""
return vertex in self._vertex
def vertex_neighbors(self, vertex):
"""Return the vertex neighbors of a vertex.
Parameters
----------
vertex : int
The identifier of the vertex.
Returns
-------
list[int]
The list of neighboring vertices.
"""
return self._plane[vertex].keys()
def vertex_neighborhood(self, vertex, ring=1):
"""Return the vertices in the neighborhood of a vertex.
Parameters
----------
vertex : int
The identifier of the vertex.
ring : int, optional
The number of neighborhood rings to include.
Returns
-------
list[int]
The vertices in the neighborhood.
Notes
-----
The vertices in the neighborhood are unordered.
"""
nbrs = set(self.vertex_neighbors(vertex))
i = 1
while True:
if i == ring:
break
temp = []
for nbr in nbrs:
temp += self.vertex_neighbors(nbr)
nbrs.update(temp)
i += 1
return list(nbrs - set([vertex]))
def vertex_degree(self, vertex):
"""Count the neighbors of a vertex.
Parameters
----------
vertex : int
The identifier of the vertex.
Returns
-------
int
The degree of the vertex.
"""
return len(self.vertex_neighbors(vertex))
def vertex_min_degree(self):
"""Compute the minimum degree of all vertices.
Returns
-------
int
The lowest degree of all vertices.
"""
if not self._vertex:
return 0
return min(self.vertex_degree(vertex) for vertex in self.vertices())
def vertex_max_degree(self):
"""Compute the maximum degree of all vertices.
Returns
-------
int
The highest degree of all vertices.
"""
if not self._vertex:
return 0
return max(self.vertex_degree(vertex) for vertex in self.vertices())
def vertex_halffaces(self, vertex):
"""Return all halffaces connected to a vertex.
Parameters
----------
vertex : int
The identifier of the vertex.
Returns
-------
list[int]
The list of halffaces connected to a vertex.
"""
cells = self.vertex_cells(vertex)
nbrs = self.vertex_neighbors(vertex)
halffaces = set()
for cell in cells:
for nbr in nbrs:
if nbr in self._cell[cell][vertex]:
halffaces.add(self._cell[cell][vertex][nbr])
halffaces.add(self._cell[cell][nbr][vertex])
return list(halffaces)
def vertex_cells(self, vertex):
"""Return all cells connected to a vertex.
Parameters
----------
vertex : int
The identifier of the vertex.
Returns
-------
list[int]
The list of cells connected to a vertex.
"""
cells = set()
for nbr in self._plane[vertex]:
for cell in self._plane[vertex][nbr].values():
if cell is not None:
cells.add(cell)
return list(cells)
def is_vertex_on_boundary(self, vertex):
"""Verify that a vertex is on a boundary.
Parameters
----------
vertex : int
The identifier of the vertex.
Returns
-------
bool
True if the vertex is on the boundary.
False otherwise.
"""
halffaces = self.vertex_halffaces(vertex)
for halfface in halffaces:
if self.is_halfface_on_boundary(halfface):
return True
return False
# --------------------------------------------------------------------------
# edge topology
# --------------------------------------------------------------------------
def has_edge(self, edge):
"""Verify that the volmesh contains a directed edge (u, v).
Parameters
----------
edge : tuple[int, int]
The identifier of the edge.
Returns
-------
bool
True if the edge exists.
False otherwise.
"""
return edge in set(self.edges())
def edge_halffaces(self, edge):
"""Ordered halffaces around edge (u, v).
Parameters
----------
edge : tuple[int, int]
The identifier of the edge.
Returns
-------
list[int]
Ordered list of halfface identifiers.
"""
u, v = edge
cells = [cell for cell in self._plane[u][v].values() if cell is not None]
cell = cells[0]
halffaces = []
if self.is_edge_on_boundary(edge):
for cell in cells:
halfface = self._cell[cell][v][u]
if self.is_halfface_on_boundary(halfface):
break
for i in range(len(cells)):
halfface = self._cell[cell][u][v]
w = self.halfface_vertex_descendent(halfface, v)
cell = self._plane[w][v][u]
halffaces.append(halfface)
return halffaces
def edge_cells(self, edge):
"""Ordered cells around edge (u, v).
Parameters
----------
edge : tuple[int, int]
The identifier of the edge.
Returns
-------
list[int]
Ordered list of keys identifying the ordered cells.
"""
halffaces = self.edge_halffaces(edge)
return [self.halfface_cell(halfface) for halfface in halffaces]
def is_edge_on_boundary(self, edge):
"""Verify that an edge is on the boundary.
Parameters
----------
edge : tuple[int, int]
The identifier of the edge.
Returns
-------
bool
True if the edge is on the boundary.
False otherwise.
Note
----
This method simply checks if u-v or v-u is on the edge of the volmesh.
The direction u-v does not matter.
"""
u, v = edge
return None in self._plane[u][v].values()
# --------------------------------------------------------------------------
# halfface topology
# --------------------------------------------------------------------------
def has_halfface(self, halfface):
"""Verify that a face is part of the volmesh.
Parameters
----------
halfface : int
The identifier of the halfface.
Returns
-------
bool
True if the face exists.
False otherwise.
"""
return halfface in self._halfface
def halfface_vertices(self, halfface):
"""The vertices of a halfface.
Parameters
----------
halfface : int
The identifier of the halfface.
Returns
-------
list[int]
Ordered vertex identifiers.
"""
return self._halfface[halfface]
def halfface_halfedges(self, halfface):
"""The halfedges of a halfface.
Parameters
----------
halfface : int
The identifier of the halfface.
Returns
-------
list[tuple[int, int]]
The halfedges of a halfface.
"""
vertices = self.halfface_vertices(halfface)
return list(pairwise(vertices + vertices[0:1]))
def halfface_cell(self, halfface):
"""The cell to which the halfface belongs to.
Parameters
----------
halfface : int
The identifier of the halfface.
Returns
-------
int
Identifier of the cell.
"""
u, v, w = self._halfface[halfface][0:3]
return self._plane[u][v][w]
def halfface_opposite_cell(self, halfface):
"""The cell to which the opposite halfface belongs to.
Parameters
----------
halfface : int
The identifier of the halfface.
Returns
-------
int
Identifier of the cell.
"""
u, v, w = self._halfface[halfface][0:3]
return self._plane[w][v][u]
def halfface_opposite_halfface(self, halfface):
"""The opposite face of a face.
Parameters
----------
halfface : int
The identifier of the halfface.
Returns
-------
int
Identifier of the opposite face.
Notes
-----
A face and its opposite face share the same vertices, but in reverse order.
For a boundary face, the opposite face is None.
"""
u, v, w = self._halfface[halfface][0:3]
nbr = self._plane[w][v][u]
if nbr is None:
return None
return self._cell[nbr][v][u]
def halfface_adjacent_halfface(self, halfface, halfedge):
"""Return the halfface adjacent to the halfface across the halfedge.
Parameters
----------
halfface : int
The identifier of the halfface.
halfedge : tuple[int, int]
The identifier of the halfedge.
Returns
-------
int | None
The identifier of the adjacent half-face, or None if `halfedge` is on the boundary.
Notes
-----
The adjacent face belongs a to one of the cell neighbors over faces of the initial cell.
A face and its adjacent face share two common vertices.
"""
u, v = halfedge
cell = self.halfface_cell(halfface)
nbr_halfface = self._cell[cell][v][u]
w = self.face_vertex_ancestor(nbr_halfface, v)
nbr_cell = self._plane[u][v][w]
if nbr_cell is None:
return None
return self._cell[nbr_cell][v][u]
def halfface_vertex_ancestor(self, halfface, vertex):
"""Return the vertex before the specified vertex in a specific face.
Parameters
----------
halfface : int
The identifier of the halfface.
vertex : int
The identifier of the vertex.
Returns
-------
int
The identifier of the vertex before the given vertex in the face cycle.
Raises
------
ValueError
If the vertex is not part of the face.
"""
i = self._halfface[halfface].index(vertex)
return self._halfface[halfface][i - 1]
def halfface_vertex_descendent(self, halfface, vertex):
"""Return the vertex after the specified vertex in a specific face.
Parameters
----------
halfface : int
The identifier of the halfface.
vertex : int
The identifier of the vertex.
Returns
-------
int
The identifier of the vertex after the given vertex in the face cycle.
Raises
------
ValueError
If the vertex is not part of the face.
"""
if self._halfface[halfface][-1] == vertex:
return self._halfface[halfface][0]
i = self._halfface[halfface].index(vertex)
return self._halfface[halfface][i + 1]
def halfface_manifold_neighbors(self, halfface):
nbrs = []
cell = self.halfface_cell(halfface)
for u, v in self.halfface_halfedges(halfface):
nbr_halfface = self._cell[cell][v][u]
w = self.halfface_vertex_ancestor(nbr_halfface, v)
nbr_cell = self._plane[u][v][w]
if nbr_cell is not None:
nbr = self._cell[nbr_cell][v][u]
nbrs.append(nbr)
return nbrs
def halfface_manifold_neighborhood(self, hfkey, ring=1):
"""Return the halfface neighborhood of a halfface across their edges.
Parameters
----------
key : int
The identifier of the halfface.
Returns
-------
list[int]
The list of neighboring halffaces.
Notes
-----
Neighboring halffaces on the same cell are not included.
"""
nbrs = set(self.halfface_manifold_neighbors(hfkey))
i = 1
while True:
if i == ring:
break
temp = []
for nbr_hfkey in nbrs:
temp += self.halfface_manifold_neighbors(nbr_hfkey)
nbrs.update(temp)
i += 1
return list(nbrs - set([hfkey]))
def is_halfface_on_boundary(self, halfface):
"""Verify that a face is on the boundary.
Parameters
----------
halfface : int
The identifier of the halfface.
Returns
-------
bool
True if the face is on the boundary.
False otherwise.
"""
u, v, w = self._halfface[halfface][0:3]
return self._plane[w][v][u] is None
# --------------------------------------------------------------------------
# cell topology
# --------------------------------------------------------------------------
def cell_vertices(self, cell):
"""The vertices of a cell.
Parameters
----------
cell : int
Identifier of the cell.
Returns
-------
list[int]
The vertex identifiers of a cell.
Notes
-----
This method is similar to :meth:`~compas.datastructures.HalfEdge.vertices`,
but in the context of a cell of the `VolMesh`.
"""
return list(set([vertex for face in self.cell_faces(cell) for vertex in self.halfface_vertices(face)]))
def cell_halfedges(self, cell):
"""The halfedges of a cell.
Parameters
----------
cell : int
Identifier of the cell.
Returns
-------
list[tuple[int, int]]
The halfedges of a cell.
Notes
-----
This method is similar to :meth:`~compas.datastructures.HalfEdge.halfedges`,
but in the context of a cell of the `VolMesh`.
"""
halfedges = []
for face in self.cell_faces(cell):
halfedges += self.halfface_halfedges(face)
return halfedges
def cell_edges(self, cell):
"""Return all edges of a cell.
Parameters
----------
cell : int
The cell identifier.
Returns
-------
list[tuple[int, int]]
The edges of the cell.
Notes
-----
This method is similar to :meth:`~compas.datastructures.HalfEdge.edges`,
but in the context of a cell of the `VolMesh`.
"""
raise NotImplementedError
def cell_faces(self, cell):
"""The faces of a cell.
Parameters
----------
cell : int
Identifier of the cell.
Returns
-------
list[int]
The faces of a cell.
Notes
-----
This method is similar to :meth:`~compas.datastructures.HalfEdge.faces`,
but in the context of a cell of the `VolMesh`.
"""
faces = set()
for vertex in self._cell[cell]:
faces.update(self._cell[cell][vertex].values())
return list(faces)
def cell_vertex_neighbors(self, cell, vertex):
"""Ordered vertex neighbors of a vertex of a cell.
Parameters
----------
cell : int
Identifier of the cell.
vertex : int
Identifier of the vertex.
Returns
-------
list[int]
The list of neighboring vertices.
Notes
-----
All of the returned vertices are part of the cell.
This method is similar to :meth:`~compas.datastructures.HalfEdge.vertex_neighbors`,
but in the context of a cell of the `VolMesh`.
"""
if vertex not in self.cell_vertices(cell):
raise KeyError(vertex)
nbr_vertices = self._cell[cell][vertex].keys()
v = nbr_vertices[0]
ordered_vkeys = [v]
for i in range(len(nbr_vertices) - 1):
face = self._cell[cell][vertex][v]
v = self.halfface_vertex_ancestor(face, vertex)
ordered_vkeys.append(v)
return ordered_vkeys
def cell_vertex_faces(self, cell, vertex):
"""Ordered faces connected to a vertex of a cell.
Parameters
----------
cell : int
Identifier of the cell.
vertex : int
Identifier of the vertex.
Returns
-------
list[int]
The ordered list of faces connected to a vertex of a cell.
Notes
-----
All of the returned faces should are part of the same cell.
This method is similar to :meth:`~compas.datastructures.HalfEdge.vertex_faces`,
but in the context of a cell of the `VolMesh`.
"""
nbr_vertices = self._cell[cell][vertex].keys()
u = vertex
v = nbr_vertices[0]
ordered_faces = []
for i in range(len(nbr_vertices)):
face = self._cell[cell][u][v]
v = self.halfface_vertex_ancestor(face, u)
ordered_faces.append(face)
return ordered_faces
def cell_halfedge_face(self, cell, halfedge):
"""Find the face corresponding to a specific halfedge of a cell.
Parameters
----------
cell : int
The identifier of the cell.
halfedge : tuple[int, int]
The identifier of the halfedge.
Returns
-------
int
The identifier of the face.
Notes
-----
This method is similar to :meth:`~compas.datastructures.HalfEdge.halfedge_face`,
but in the context of a cell of the `VolMesh`.
"""
u, v = halfedge
return self._cell[cell][u][v]
def cell_halfedge_opposite_face(self, cell, halfedge):
"""Find the opposite face corresponding to a specific halfedge of a cell.
Parameters
----------
cell : int
The identifier of the cell.
halfedge : tuple[int, int]
The identifier of the halfedge.
Returns
-------
int
The identifier of the face.
"""
u, v = halfedge
return self._cell[cell][v][u]
def cell_face_neighbors(self, cell, face):
"""Find the faces adjacent to a given face of a cell.
Parameters
----------
cell : int
The identifier of the cell.
face : int
The identifier of the face.
Returns
-------
int
The identifier of the face.
Notes
-----
This method is similar to :meth:`~compas.datastructures.HalfEdge.face_neighbors`,
but in the context of a cell of the `VolMesh`.
"""
nbrs = []
for halfedge in self.halfface_halfedges(face):
nbr = self.cell_halfedge_opposite_face(cell, halfedge)
if nbr is not None:
nbrs.append(nbr)
return nbrs
def cell_neighbors(self, cell):
"""Find the neighbors of a given cell.
Parameters
----------
cell : int
The identifier of the cell.
Returns
-------
list[int]
The identifiers of the adjacent cells.
"""
nbrs = []
for face in self.cell_faces(cell):
nbr = self.halfface_opposite_cell(face)
if nbr is not None:
nbrs.append(nbr)
return nbrs
def is_cell_on_boundary(self, cell):
"""Verify that a cell is on the boundary.
Parameters
----------
cell : int
Identifier of the cell.
Returns
-------
bool
True if the face is on the boundary.
False otherwise.
"""
faces = self.cell_faces(cell)
for face in faces:
if self.is_halfface_on_boundary(face):
return True
return False
# --------------------------------------------------------------------------
# boundary
# --------------------------------------------------------------------------
def vertices_on_boundaries(self):
"""Find the vertices on the boundary.
Returns
-------
list[int]
The vertices of the boundary.
"""
vertices = set()
for face in self._halfface:
if self.is_halfface_on_boundary(face):
vertices.update(self.halfface_vertices(face))
return list(vertices)
def halffaces_on_boundaries(self):
"""Find the faces on the boundary.
Returns
-------
list[int]
The faces of the boundary.
"""
faces = set()
for face in self._halfface:
if self.is_halfface_on_boundary(face):
faces.add(face)
return list(faces)
def cells_on_boundaries(self):
"""Find the cells on the boundary.
Returns
-------
list[int]
The cells of the boundary.
"""
cells = set()
for face in self.halffaces_on_boundaries():
cells.add(self.halfface_cell(face))
return list(cells) | PypiClean |
/Clone-ChatGPT-1.3.0.tar.gz/Clone-ChatGPT-1.3.0/src/pandora/flask/static/_next/static/chunks/174-bd28069f281ef76f.js | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[174],{63561:function(e,t){"use strict";t.Z=function(e,t,i){return t in e?Object.defineProperty(e,t,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[t]=i,e}},68561:function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(63561).Z,o=i(95781).Z,a=i(89478).Z;Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t,i,l=e.src,c=e.sizes,h=e.unoptimized,p=void 0!==h&&h,w=e.priority,k=void 0!==w&&w,E=e.loading,I=e.lazyRoot,R=e.lazyBoundary,_=e.className,L=e.quality,q=e.width,C=e.height,O=e.style,N=e.objectFit,P=e.objectPosition,W=e.onLoadingComplete,B=e.placeholder,M=void 0===B?"empty":B,Z=e.blurDataURL,D=s(e,["src","sizes","unoptimized","priority","loading","lazyRoot","lazyBoundary","className","quality","width","height","style","objectFit","objectPosition","onLoadingComplete","placeholder","blurDataURL"]),U=d.useContext(m.ImageConfigContext),V=d.useMemo(function(){var e=y||U||f.imageConfigDefault,t=a(e.deviceSizes).concat(a(e.imageSizes)).sort(function(e,t){return e-t}),i=e.deviceSizes.sort(function(e,t){return e-t});return r({},e,{allSizes:t,deviceSizes:i})},[U]),F=c?"responsive":"intrinsic";"layout"in D&&(D.layout&&(F=D.layout),delete D.layout);var H=x;if("loader"in D){if(D.loader){var G=D.loader;H=function(e){e.config;var t=s(e,["config"]);return G(t)}}delete D.loader}var T="";if(function(e){var t;return"object"==typeof e&&(z(e)||void 0!==e.src)}(l)){var J=z(l)?l.default:l;if(!J.src)throw Error("An object should only be passed to the image component src parameter if it comes from a static image import. It must include src. Received ".concat(JSON.stringify(J)));if(Z=Z||J.blurDataURL,T=J.src,(!F||"fill"!==F)&&(C=C||J.height,q=q||J.width,!J.height||!J.width))throw Error("An object should only be passed to the image component src parameter if it comes from a static image import. It must include height and width. Received ".concat(JSON.stringify(J)))}l="string"==typeof l?l:T;var Q=!k&&("lazy"===E||void 0===E);(l.startsWith("data:")||l.startsWith("blob:"))&&(p=!0,Q=!1),b.has(l)&&(Q=!1),V.unoptimized&&(p=!0);var K=o(d.useState(!1),2),X=K[0],Y=K[1],$=o(g.useIntersection({rootRef:void 0===I?null:I,rootMargin:R||"200px",disabled:!Q}),3),ee=$[0],et=$[1],ei=$[2],en=!Q||et,eo={boxSizing:"border-box",display:"block",overflow:"hidden",width:"initial",height:"initial",background:"none",opacity:1,border:0,margin:0,padding:0},ea={boxSizing:"border-box",display:"block",width:"initial",height:"initial",background:"none",opacity:1,border:0,margin:0,padding:0},er=!1,el=A(q),ec=A(C),es=A(L),ed=Object.assign({},O,{position:"absolute",top:0,left:0,bottom:0,right:0,boxSizing:"border-box",padding:0,border:"none",margin:"auto",display:"block",width:0,height:0,minWidth:"100%",maxWidth:"100%",minHeight:"100%",maxHeight:"100%",objectFit:N,objectPosition:P}),eu="blur"!==M||X?{}:{backgroundSize:N||"cover",backgroundPosition:P||"0% 0%",filter:"blur(20px)",backgroundImage:'url("'.concat(Z,'")')};if("fill"===F)eo.display="block",eo.position="absolute",eo.top=0,eo.left=0,eo.bottom=0,eo.right=0;else if(void 0!==el&&void 0!==ec){var ef=ec/el,eg=isNaN(ef)?"100%":"".concat(100*ef,"%");"responsive"===F?(eo.display="block",eo.position="relative",er=!0,ea.paddingTop=eg):"intrinsic"===F?(eo.display="inline-block",eo.position="relative",eo.maxWidth="100%",er=!0,ea.maxWidth="100%",t="data:image/svg+xml,%3csvg%20xmlns=%27http://www.w3.org/2000/svg%27%20version=%271.1%27%20width=%27".concat(el,"%27%20height=%27").concat(ec,"%27/%3e")):"fixed"===F&&(eo.display="inline-block",eo.position="relative",eo.width=el,eo.height=ec)}var em={src:v,srcSet:void 0,sizes:void 0};en&&(em=S({config:V,src:l,unoptimized:p,layout:F,width:el,quality:es,sizes:c,loader:H}));var eh=l,ep="imagesizes";ep="imageSizes";var ey=(n(i={},"imageSrcSet",em.srcSet),n(i,ep,em.sizes),n(i,"crossOrigin",D.crossOrigin),i),eb=d.default.useLayoutEffect,ev=d.useRef(W),ew=d.useRef(l);d.useEffect(function(){ev.current=W},[W]),eb(function(){ew.current!==l&&(ei(),ew.current=l)},[ei,l]);var ez=r({isLazy:Q,imgAttributes:em,heightInt:ec,widthInt:el,qualityInt:es,layout:F,className:_,imgStyle:ed,blurStyle:eu,loading:E,config:V,unoptimized:p,placeholder:M,loader:H,srcString:eh,onLoadingCompleteRef:ev,setBlurComplete:Y,setIntersection:ee,isVisible:en,noscriptSizes:c},D);return d.default.createElement(d.default.Fragment,null,d.default.createElement("span",{style:eo},er?d.default.createElement("span",{style:ea},t?d.default.createElement("img",{style:{display:"block",maxWidth:"100%",width:"initial",height:"initial",background:"none",opacity:1,border:0,margin:0,padding:0},alt:"","aria-hidden":!0,src:t}):null):null,d.default.createElement(j,Object.assign({},ez))),k?d.default.createElement(u.default,null,d.default.createElement("link",Object.assign({key:"__nimg-"+em.src+em.srcSet+em.sizes,rel:"preload",as:"image",href:em.srcSet?void 0:em.src},ey))):null)};var r=i(17858).Z,l=i(16922).Z,c=i(86905).Z,s=i(31080).Z,d=c(i(70079)),u=l(i(76109)),f=i(60239),g=i(26790),m=i(94136);i(13279);var h=i(5189);function p(e){return"/"===e[0]?e.slice(1):e}var y={deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",dangerouslyAllowSVG:!1,unoptimized:!1},b=new Set,v="data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7",w=new Map([["default",function(e){var t=e.config,i=e.src,n=e.width,o=e.quality;return i.endsWith(".svg")&&!t.dangerouslyAllowSVG?i:"".concat(h.normalizePathTrailingSlash(t.path),"?url=").concat(encodeURIComponent(i),"&w=").concat(n,"&q=").concat(o||75)}],["imgix",function(e){var t=e.config,i=e.src,n=e.width,o=e.quality,a=new URL("".concat(t.path).concat(p(i))),r=a.searchParams;return r.set("auto",r.getAll("auto").join(",")||"format"),r.set("fit",r.get("fit")||"max"),r.set("w",r.get("w")||n.toString()),o&&r.set("q",o.toString()),a.href}],["cloudinary",function(e){var t,i=e.config,n=e.src,o=e.width,a=["f_auto","c_limit","w_"+o,"q_"+(e.quality||"auto")].join(",")+"/";return"".concat(i.path).concat(a).concat(p(n))}],["akamai",function(e){var t=e.config,i=e.src,n=e.width;return"".concat(t.path).concat(p(i),"?imwidth=").concat(n)}],["custom",function(e){var t=e.src;throw Error('Image with src "'.concat(t,'" is missing "loader" prop.')+"\nRead more: https://nextjs.org/docs/messages/next-image-missing-loader")}],]);function z(e){return void 0!==e.default}function S(e){var t=e.config,i=e.src,n=e.unoptimized,o=e.layout,r=e.width,l=e.quality,c=e.sizes,s=e.loader;if(n)return{src:i,srcSet:void 0,sizes:void 0};var d=function(e,t,i,n){var o=e.deviceSizes,r=e.allSizes;if(n&&("fill"===i||"responsive"===i)){for(var l=/(^|\s)(1?\d?\d)vw/g,c=[];s=l.exec(n);s)c.push(parseInt(s[2]));if(c.length){var s,d,u=.01*(d=Math).min.apply(d,a(c));return{widths:r.filter(function(e){return e>=o[0]*u}),kind:"w"}}return{widths:r,kind:"w"}}return"number"!=typeof t||"fill"===i||"responsive"===i?{widths:o,kind:"w"}:{widths:a(new Set([t,2*t].map(function(e){return r.find(function(t){return t>=e})||r[r.length-1]}))),kind:"x"}}(t,r,o,c),u=d.widths,f=d.kind,g=u.length-1;return{sizes:c||"w"!==f?c:"100vw",srcSet:u.map(function(e,n){return"".concat(s({config:t,src:i,quality:l,width:e})," ").concat("w"===f?e:n+1).concat(f)}).join(", "),src:s({config:t,src:i,quality:l,width:u[g]})}}function A(e){return"number"==typeof e?e:"string"==typeof e?parseInt(e,10):void 0}function x(e){var t,i=(null==(t=e.config)?void 0:t.loader)||"default",n=w.get(i);if(n)return n(e);throw Error('Unknown "loader" found in "next.config.js". Expected: '.concat(f.VALID_LOADERS.join(", "),". Received: ").concat(i))}function k(e,t,i,n,o,a){e&&e.src!==v&&e["data-loaded-src"]!==t&&(e["data-loaded-src"]=t,("decode"in e?e.decode():Promise.resolve()).catch(function(){}).then(function(){if(e.parentNode&&(b.add(t),"blur"===n&&a(!0),null==o?void 0:o.current)){var i=e.naturalWidth,r=e.naturalHeight;o.current({naturalWidth:i,naturalHeight:r})}}))}var j=function(e){var t=e.imgAttributes,i=(e.heightInt,e.widthInt),n=e.qualityInt,o=e.layout,a=e.className,l=e.imgStyle,c=e.blurStyle,u=e.isLazy,f=e.placeholder,g=e.loading,m=e.srcString,h=e.config,p=e.unoptimized,y=e.loader,b=e.onLoadingCompleteRef,v=e.setBlurComplete,w=e.setIntersection,z=e.onLoad,A=e.onError,x=(e.isVisible,e.noscriptSizes),j=s(e,["imgAttributes","heightInt","widthInt","qualityInt","layout","className","imgStyle","blurStyle","isLazy","placeholder","loading","srcString","config","unoptimized","loader","onLoadingCompleteRef","setBlurComplete","setIntersection","onLoad","onError","isVisible","noscriptSizes"]);return g=u?"lazy":g,d.default.createElement(d.default.Fragment,null,d.default.createElement("img",Object.assign({},j,t,{decoding:"async","data-nimg":o,className:a,style:r({},l,c),ref:d.useCallback(function(e){w(e),(null==e?void 0:e.complete)&&k(e,m,o,f,b,v)},[w,m,o,f,b,v,]),onLoad:function(e){k(e.currentTarget,m,o,f,b,v),z&&z(e)},onError:function(e){"blur"===f&&v(!0),A&&A(e)}})),(u||"blur"===f)&&d.default.createElement("noscript",null,d.default.createElement("img",Object.assign({},j,S({config:h,src:m,unoptimized:p,layout:o,width:i,quality:n,sizes:x,loader:y}),{decoding:"async","data-nimg":o,style:l,className:a,loading:g}))))};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},96424:function(e,t,i){e.exports=i(68561)}}]); | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/address/sk_SK/__init__.py | from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ("{{city_name}}",)
street_name_formats = ("{{street_name}}",)
street_address_formats = ("{{street_name}} {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("%", "%#", "%##")
street_suffixes_long = ("ulica", "trieda", "nábrežie", "námestie")
street_suffixes_short = ("ul.", "tr.", "nábr.", "nám.")
postcode_formats = (
"8## ##",
"9## ##",
"0## ##",
)
cities = (
"Ábelová",
"Abovce",
"Abrahám",
"Abrahámovce",
"Abrahámovce",
"Abramová",
"Abranovce",
"Adidovce",
"Alekšince",
"Andovce",
"Andrejová",
"Ardanovce",
"Ardovo",
"Arnutovce",
"Báb",
"Babie",
"Babín",
"Babiná",
"Babindol",
"Babinec",
"Bacúch",
"Bacúrov",
"Báč",
"Bačka",
"Bačkov",
"Bačkovík",
"Badín",
"Baďan",
"Báhoň",
"Bajany",
"Bajč",
"Bajerov",
"Bajerovce",
"Bajka",
"Bajtava",
"Baka",
"Baláže",
"Baldovce",
"Balog nad Ipľom",
"Baloň",
"Banka",
"Bánov",
"Bánovce nad Bebravou",
"Bánovce nad Ondavou",
"Banská Belá",
"Banská Štiavnica",
"Banská Bystrica",
"Banské",
"Banský Studenec",
"Baňa",
"Bara",
"Barca",
"Bartošovce",
"Bardoňovo",
"Bartošova Lehôtka",
"Bardejov",
"Baška",
"Baškovce",
"Baškovce",
"Bašovce",
"Batizovce",
"Bátorová",
"Bátka",
"Bátorove Kosihy",
"Bátovce",
"Beharovce",
"Beckov",
"Becherov",
"Belá",
"Belá",
"Belá - Dulice",
"Belá nad Cirochou",
"Beladice",
"Belejovce",
"Belín",
"Belina",
"Belince",
"Bellova Ves",
"Beloveža",
"Beluj",
"Beluša",
"Belža",
"Beniakovce",
"Benice",
"Benkovce",
"Beňadiková",
"Beňadikovce",
"Beňadovo",
"Beňatina",
"Beňuš",
"Bernolákovo",
"Bertotovce",
"Beša",
"Beša",
"Bešeňov",
"Bešeňová",
"Betlanovce",
"Betliar",
"Bežovce",
"Bidovce",
"Biel",
"Bielovce",
"Biely Kostol",
"Bijacovce",
"Bílkove Humence",
"Bíňa",
"Bíňovce",
"Biskupice",
"Biskupová",
"Bitarová",
"Blahová",
"Blatná na Ostrove",
"Blatná Polianka",
"Blatné",
"Blatné Remety",
"Blatné Revištia",
"Blatnica",
"Blažice",
"Blažovce",
"Blesovce",
"Blhovce",
"Bobot",
"Bobrov",
"Bobrovček",
"Bobrovec",
"Bobrovník",
"Bočiar",
"Bodíky",
"Bodiná",
"Bodorová",
"Bodovce",
"Bodružal",
"Bodza",
"Bodzianske Lúky",
"Bogliarka",
"Bohdanovce",
"Bohdanovce nad Trnavou",
"Boheľov",
"Bohunice",
"Bohunice",
"Bohúňovo",
"Bojná",
"Bojnice",
"Bojničky",
"Boldog",
"Boleráz",
"Bolešov",
"Boliarov",
"Boľ",
"Boľkovce",
"Borcová",
"Borčany",
"Borčice",
"Borinka",
"Borová",
"Borovce",
"Borský Mikuláš",
"Borský Svätý Jur",
"Borša",
"Bory",
"Bošáca",
"Bošany",
"Bottovo",
"Boťany",
"Bôrka",
"Bracovce",
"Branč",
"Branovo",
"Bratislava",
"Okres Bratislava II",
"Okres Bratislava III",
"Okres Bratislava IV",
"Okres Bratislava V",
"Braväcovo",
"Brdárka",
"Brehov",
"Brehy",
"Brekov",
"Brestov",
"Brestov",
"Brestov nad Laborcom",
"Brestovany",
"Brestovec",
"Brestovec",
"Bretejovce",
"Bretka",
"Breza",
"Brezany",
"Brezina",
"Breziny",
"Breznica",
"Breznička",
"Breznička",
"Brezno",
"Brezolupy",
"Brezov",
"Brezová pod Bradlom",
"Brezovec",
"Brezovica",
"Brezovica",
"Brezovička",
"Brezovka",
"Brežany",
"Brhlovce",
"Brieštie",
"Brodské",
"Brodzany",
"Brunovce",
"Brusnica",
"Brusník",
"Brusno",
"Brutovce",
"Bruty",
"Brvnište",
"Brzotín",
"Buclovany",
"Búč",
"Bučany",
"Budča",
"Budikovany",
"Budimír",
"Budiná",
"Budince",
"Budiš",
"Budkovce",
"Budmerice",
"Buglovce",
"Buková",
"Bukovce",
"Bukovec",
"Bukovec",
"Bukovina",
"Bulhary",
"Bunetice",
"Bunkovce",
"Bušince",
"Bušovce",
"Buzica",
"Buzitka",
"Bystrá",
"Bystrá",
"Bystrany",
"Bystré",
"Bystričany",
"Bystrička",
"Byšta",
"Bytča",
"Bzenica",
"Bzenov",
"Bzince pod Javorinou",
"Bziny",
"Bzovík",
"Bzovská Lehôtka",
"Bžany",
"Cabaj - Čápor",
"Cabov",
"Cakov",
"Cejkov",
"Cernina",
"Cerová",
"Cerovo",
"Cestice",
"Cífer",
"Cigeľ",
"Cigeľka",
"Cigla",
"Cimenná",
"Cinobaňa",
"Čabalovce",
"Čabiny",
"Čabradský Vrbovok",
"Čadca",
"Čachtice",
"Čajkov",
"Čaka",
"Čakajovce",
"Čakanovce",
"Čakanovce",
"Čakany",
"Čaklov",
"Čalovec",
"Čamovce",
"Čaňa",
"Čaradice",
"Čáry",
"Častá",
"Častkov",
"Častkovce",
"Čata",
"Čataj",
"Čavoj",
"Čebovce",
"Čečehov",
"Čečejovce",
"Čechy",
"Čechynce",
"Čekovce",
"Čeláre",
"Čelkova Lehota",
"Čelovce",
"Čelovce",
"Čeľadice",
"Čeľadince",
"Čeľovce",
"Čenkovce",
"Čerenčany",
"Čereňany",
"Čerhov",
"Čerín",
"Čermany",
"Černík",
"Černina",
"Černochov",
"Čertižné",
"Červená Voda",
"Červenica",
"Červenica pri Sabinove",
"Červeník",
"Červený Hrádok",
"Červený Kameň",
"Červený Kláštor",
"Červeňany",
"České Brezovo",
"Čičarovce",
"Čičava",
"Čičmany",
"Číčov",
"Čierna",
"Čierna Lehota",
"Čierna Lehota",
"Čierna nad Tisou",
"Čierna Voda",
"Čierne",
"Čierne Kľačany",
"Čierne nad Topľou",
"Čierne Pole",
"Čierny Balog",
"Čierny Brod",
"Čierny Potok",
"Čifáre",
"Čiližská Radvaň",
"Čimhová",
"Čirč",
"Číž",
"Čižatice",
"Čoltovo",
"Čremošné",
"Čučma",
"Čukalovce",
"Dačov Lom",
"Daletice",
"Danišovce",
"Dargov",
"Davidov",
"Debraď",
"Dedačov",
"Dedina Mládeže",
"Dedinka",
"Dedinky",
"Dechtice",
"Dekýš",
"Demandice",
"Demänovská Dolina",
"Demjata",
"Detrík",
"Detva",
"Detvianska Huta",
"Devičany",
"Devičie",
"Dežerice",
"Diaková",
"Diakovce",
"Diviacka Nová Ves",
"Diviaky nad Nitricou",
"Divín",
"Divina",
"Divinka",
"Dlhá",
"Dlhá nad Kysucou",
"Dlhá nad Oravou",
"Dlhá nad Váhom",
"Dlhá Ves",
"Dlhé Klčovo",
"Dlhé nad Cirochou",
"Dlhé Pole",
"Dlhé Stráže",
"Dlhoňa",
"Dlžín",
"Dobrá",
"Dobrá Niva",
"Dobrá Voda",
"Dobroč",
"Dobrohošť",
"Dobroslava",
"Dobšiná",
"Dohňany",
"Dojč",
"Dolinka",
"Dolná Breznica",
"Dolná Krupá",
"Dolná Lehota",
"Dolná Mariková",
"Dolná Mičiná",
"Dolná Poruba",
"Dolná Seč",
"Dolná Streda",
"Dolná Strehová",
"Dolná Súča",
"Dolná Tižina",
"Dolná Trnávka",
"Dolná Ves",
"Dolná Ždaňa",
"Dolné Dubové",
"Dolné Kočkovce",
"Dolné Lefantovce",
"Dolné Lovčice",
"Dolné Mladonice",
"Dolné Naštice",
"Dolné Obdokovce",
"Dolné Orešany",
"Dolné Otrokovce",
"Dolné Plachtince",
"Dolné Saliby",
"Dolné Semerovce",
"Dolné Srnie",
"Dolné Strháre",
"Dolné Trhovište",
"Dolné Vestenice",
"Dolné Zahorany",
"Dolné Zelenice",
"Dolný Badín",
"Dolný Bar",
"Dolný Harmanec",
"Dolný Hričov",
"Dolný Chotár",
"Dolný Kalník",
"Dolný Kubín",
"Dolný Lieskov",
"Dolný Lopašov",
"Dolný Ohaj",
"Dolný Pial",
"Dolný Štál",
"Dolný Vadičov",
"Doľany",
"Doľany",
"Domadice",
"Domaníky",
"Domaniža",
"Domaňovce",
"Donovaly",
"Drábsko",
"Drahňov",
"Drahovce",
"Dravce",
"Dražice",
"Dražkovce",
"Drážovce",
"Drienčany",
"Drienica",
"Drienov",
"Drienovec",
"Drienovo",
"Drienovská Nová Ves",
"Drietoma",
"Drnava",
"Drňa",
"Družstevná pri Hornáde",
"Drženice",
"Držkovce",
"Dubinné",
"Dubnica nad Váhom",
"Dubnička",
"Dubník",
"Dubno",
"Dubodiel",
"Dubová",
"Dubová",
"Dubovany",
"Dubovce",
"Dubové",
"Dubové",
"Dubovec",
"Dubovica",
"Dúbrava",
"Dúbrava",
"Dúbrava",
"Dúbravica",
"Dúbravka",
"Dúbravy",
"Ducové",
"Dudince",
"Dukovce",
"Dulov",
"Dulova Ves",
"Dulovce",
"Dulovo",
"Dunajská Lužná",
"Dunajov",
"Dunajská Streda",
"Dunajský Klátov",
"Duplín",
"Dvorany nad Nitrou",
"Dvorec",
"Dvorianky",
"Dvorníky",
"Dvorníky - Včeláre",
"Dvory nad Žitavou",
"Ďačov",
"Ďanová",
"Ďapalovce",
"Ďubákovo",
"Ďurčiná",
"Ďurďoš",
"Ďurďošík",
"Ďurďové",
"Ďurkov",
"Ďurková",
"Ďurkovce",
"Egreš",
"Fačkov",
"Falkušovce",
"Farná",
"Fekišovce",
"Figa",
"Fijaš",
"Fiľakovo",
"Fiľakovské Kováče",
"Fintice",
"Folkušová",
"Forbasy",
"Frička",
"Fričkovce",
"Fričovce",
"Fulianka",
"Gabčíkovo",
"Gaboltov",
"Gajary",
"Galanta",
"Galovany",
"Gánovce",
"Gáň",
"Gbelce",
"Gbely",
"Gbeľany",
"Geča",
"Gelnica",
"Gemer",
"Gemerček",
"Gemerská Hôrka",
"Gemerská Panica",
"Gemerská Poloma",
"Gemerská Ves",
"Gemerské Dechtáre",
"Gemerské Michalovce",
"Gemerské Teplice",
"Gemerský Jablonec",
"Gemerský Sad",
"Geraltov",
"Gerlachov",
"Gerlachov",
"Giglovce",
"Giraltovce",
"Girovce",
"Glabušovce",
"Gočaltovo",
"Gočovo",
"Golianovo",
"Gortva",
"Gôtovany",
"Granč - Petrovce",
"Gregorova Vieska",
"Gregorovce",
"Gribov",
"Gruzovce",
"Gyňov",
"Habovka",
"Habura",
"Hačava",
"Háj",
"Háj",
"Hajná Nová Ves",
"Hajnáčka",
"Hájske",
"Hajtovka",
"Haláčovce",
"Halič",
"Haligovce",
"Haluzice",
"Hamuliakovo",
"Handlová",
"Hanigovce",
"Haniska",
"Haniska",
"Hanková",
"Hankovce",
"Hankovce",
"Hanušovce nad Topľou",
"Harakovce",
"Harhaj",
"Harichovce",
"Harmanec",
"Hatalov",
"Hatné",
"Havaj",
"Havka",
"Havranec",
"Hažín",
"Hažín nad Cirochou",
"Hažlín",
"Helcmanovce",
"Heľpa",
"Henckovce",
"Henclová",
"Hencovce",
"Hendrichovce",
"Herľany",
"Hermanovce",
"Hermanovce nad Topľou",
"Hertník",
"Hervartov",
"Hiadeľ",
"Hincovce",
"Hladovka",
"Hlboké",
"Hliník nad Hronom",
"Hlinné",
"Hlivištia",
"Hlohovec",
"Hniezdne",
"Hnilčík",
"Hnilec",
"Hnojné",
"Hnúšťa",
"Hodejov",
"Hodejovec",
"Hodkovce",
"Hodruša - Hámre",
"Hokovce",
"Holčíkovce",
"Holiare",
"Holice",
"Holíč",
"Holiša",
"Holumnica",
"Honce",
"Hontianska Vrbica",
"Hontianske Moravce",
"Hontianske Nemce",
"Hontianske Tesáre",
"Hontianske Trsťany",
"Horná Breznica",
"Horná Kráľová",
"Horná Krupá",
"Horná Lehota",
"Horná Lehota",
"Horná Mariková",
"Horná Mičiná",
"Horná Poruba",
"Horná Potôň",
"Horná Seč",
"Horná Streda",
"Horná Strehová",
"Horná Súča",
"Horná Štubňa",
"Horná Ves",
"Horná Ves",
"Horná Ždaňa",
"Horné Dubové",
"Horné Hámre",
"Horné Chlebany",
"Horné Lefantovce",
"Horné Mladonice",
"Horné Mýto",
"Horné Naštice",
"Horné Obdokovce",
"Horné Orešany",
"Horné Otrokovce",
"Horné Plachtince",
"Horné Pršany",
"Horné Saliby",
"Horné Semerovce",
"Horné Srnie",
"Horné Strháre",
"Horné Štitáre",
"Horné Trhovište",
"Horné Turovce",
"Horné Vestenice",
"Horné Zahorany",
"Horné Zelenice",
"Horný Badín",
"Horný Bar",
"Horný Hričov",
"Horný Kalník",
"Horný Lieskov",
"Horný Pial",
"Horný Tisovník",
"Horný Vadičov",
"Horňa",
"Horňany",
"Horovce",
"Horovce",
"Hoste",
"Hostice",
"Hostie",
"Hostišovce",
"Hostovice",
"Hosťová",
"Hosťovce",
"Hosťovce",
"Hozelec",
"Hôrka",
"Hôrka nad Váhom",
"Hôrky",
"Hrabičov",
"Hrabkov",
"Hrabová Roztoka",
"Hrabovčík",
"Hrabovec",
"Hrabovec nad Laborcom",
"Hrabské",
"Hrabušice",
"Hradisko",
"Hradište",
"Hradište",
"Hradište pod Vrátnom",
"Hrádok",
"Hrachovište",
"Hrachovo",
"Hraničné",
"Hranovnica",
"Hraň",
"Hrašné",
"Hrašovík",
"Hrčeľ",
"Hrhov",
"Hriadky",
"Hričovské Podhradie",
"Hriňová",
"Hrišovce",
"Hrkovce",
"Hrlica",
"Hrnčiarovce nad Parnou",
"Hrnčiarska Ves",
"Hrnčiarske Zalužany",
"Hrochoť",
"Hromoš",
"Hronec",
"Hronovce",
"Hronsek",
"Hronská Breznica",
"Hronská Dúbrava",
"Hronské Kľačany",
"Hronské Kosihy",
"Hronský Beňadik",
"Hrubá Borša",
"Hruboňovo",
"Hrubov",
"Hrubý Šúr",
"Hrušov",
"Hrušov",
"Hrušovany",
"Hrušovo",
"Hruštín",
"Hubice",
"Hubina",
"Hubošovce",
"Hubová",
"Hubovo",
"Hucín",
"Hudcovce",
"Hul",
"Humenné",
"Huncovce",
"Hunkovce",
"Hurbanova Ves",
"Hurbanovo",
"Husák",
"Husiná",
"Hutka",
"Huty",
"Hviezdoslavov",
"Hvozdnica",
"Hybe",
"Hýľov",
"Chanava",
"Chlebnice",
"Chlmec",
"Chľaba",
"Chmeľnica",
"Chmeľov",
"Chmeľová",
"Chmeľovec",
"Chminianska Nová Ves",
"Chminianske Jakubovany",
"Chmiňany",
"Choča",
"Chocholná - Velčice",
"Choňkovce",
"Chorvátsky Grob",
"Chorváty",
"Chotča",
"Chotín",
"Chrabrany",
"Chrámec",
"Chrastince",
"Chrastné",
"Chrasť nad Hornádom",
"Chrenovec - Brusno",
"Chropov",
"Chrťany",
"Chtelnica",
"Chudá Lehota",
"Chvalová",
"Chvojnica",
"Chvojnica",
"Chynorany",
"Chyžné",
"Igram",
"Ihľany",
"Ihráč",
"Ilava",
"Iliašovce",
"Ilija",
"Imeľ",
"Inovce",
"Iňa",
"Iňačovce",
"Ipeľské Predmostie",
"Ipeľské Úľany",
"Ipeľský Sokolec",
"Istebné",
"Ivachnová",
"Ivančiná",
"Ivanice",
"Ivanka pri Dunaji",
"Ivanka pri Nitre",
"Ivanovce",
"Iža",
"Ižipovce",
"Ižkovce",
"Jablonec",
"Jablonica",
"Jablonka",
"Jablonov",
"Jablonov nad Turňou",
"Jablonové",
"Jablonové",
"Jabloň",
"Jabloňovce",
"Jacovce",
"Jahodná",
"Jaklovce",
"Jakovany",
"Jakubany",
"Jakubov",
"Jakubova Voľa",
"Jakubovany",
"Jakubovany",
"Jakušovce",
"Jalová",
"Jalovec",
"Jalovec",
"Jalšové",
"Jalšovík",
"Jamník",
"Jamník",
"Janice",
"Janík",
"Janíky",
"Jankovce",
"Janov",
"Janova Lehota",
"Janovce",
"Jánovce",
"Jánovce",
"Janovík",
"Jarabá",
"Jarabina",
"Jarok",
"Jarovnice",
"Jasenica",
"Jasenie",
"Jasenov",
"Jasenov",
"Jasenová",
"Jasenovce",
"Jasenové",
"Jasenovo",
"Jaslovské Bohunice",
"Jasov",
"Jasová",
"Jastrabá",
"Jastrabie nad Topľou",
"Jastrabie pri Michalovciach",
"Jatov",
"Javorina (vojenský obvod)",
"Jazernica",
"Jedlinka",
"Jedľové Kostoľany",
"Jelenec",
"Jelka",
"Jelšava",
"Jelšovce",
"Jelšovec",
"Jenkovce",
"Jesenské",
"Jesenské",
"Jestice",
"Ješkova Ves",
"Jezersko",
"Jovice",
"Jovsa",
"Jur nad Hronom",
"Jurkova Voľa",
"Jurová",
"Jurské",
"Juskova Voľa",
"Kačanov",
"Kajal",
"Kalameny",
"Kalinkovo",
"Kalinov",
"Kalinovo",
"Kalná nad Hronom",
"Kalná Roztoka",
"Kálnica",
"Kalnište",
"Kalonda",
"Kalša",
"Kaloša",
"Kaluža",
"Kaľamenová",
"Kaľava",
"Kamanová",
"Kamenec pod Vtáčnikom",
"Kamenica",
"Kamenica nad Cirochou",
"Kamenica nad Hronom",
"Kameničany",
"Kameničná",
"Kamenín",
"Kamenná Poruba",
"Kamenná Poruba",
"Kamenné Kosihy",
"Kamenný Most",
"Kameňany",
"Kamienka",
"Kamienka",
"Kanianka",
"Kapišová",
"Kaplna",
"Kapušany",
"Kapušianske Kľačany",
"Karlová",
"Karná",
"Kašov",
"Kátlovce",
"Kátov",
"Kazimír",
"Kecerovce",
"Kecerovský Lipovec",
"Kečkovce",
"Kečovo",
"Kechnec",
"Kendice",
"Kesovce",
"Keť",
"Kežmarok",
"Kiarov",
"Kladzany",
"Klasov",
"Kláštor pod Znievom",
"Klátova Nová Ves",
"Klčov",
"Klenov",
"Klenová",
"Klenovec",
"Kleňany",
"Klieština",
"Klin",
"Klin nad Bodrogom",
"Klížska Nemá",
"Klokoč",
"Klokočov",
"Klokočov",
"Klubina",
"Kluknava",
"Kľačany",
"Kľače",
"Kľačno",
"Kľak",
"Kľúčovec",
"Kľušov",
"Kmeťovo",
"Kobeliarovo",
"Kobylnice",
"Kobyly",
"Koceľovce",
"Kociha",
"Kocurany",
"Kočín - Lančár",
"Kočovce",
"Kochanovce",
"Kochanovce",
"Kojatice",
"Kojšov",
"Kokava nad Rimavicou",
"Kokošovce",
"Kokšov - Bakša",
"Kolačkov",
"Kolačno",
"Koláre",
"Kolárovice",
"Kolárovo",
"Kolbasov",
"Kolbovce",
"Kolibabovce",
"Kolinovce",
"Kolíňany",
"Kolonica",
"Kolta",
"Komárany",
"Komárno",
"Komárov",
"Komárovce",
"Komjatice",
"Komjatná",
"Komoča",
"Koniarovce",
"Konrádovce",
"Konská",
"Konská",
"Koňuš",
"Kopčany",
"Kopernica",
"Koplotovce",
"Koprivnica",
"Kordíky",
"Korejovce",
"Korňa",
"Koromľa",
"Korunková",
"Korytárky",
"Korytné",
"Kosihovce",
"Kosihy nad Ipľom",
"Kosorín",
"Kostolec",
"Kostolište",
"Kostolná pri Dunaji",
"Kostolná Ves",
"Kostolná - Záriečie",
"Kostolné",
"Kostolné Kračany",
"Kostoľany pod Tribečom",
"Koš",
"Košariská",
"Košarovce",
"Košeca",
"Košecké Podhradie",
"Košice",
"Okres Košice II",
"Okres Košice III",
"Okres Košice IV",
"Košická Belá",
"Košická Polianka",
"Košické Oľšany",
"Košický Klečenov",
"Koškovce",
"Košolná",
"Košúty",
"Košťany nad Turcom",
"Kotešová",
"Kotmanová",
"Kotrčiná Lúčka",
"Kováčová",
"Kováčová",
"Kováčovce",
"Koválov",
"Koválovec",
"Kovarce",
"Kozárovce",
"Kozelník",
"Kozí Vrbovok",
"Kožany",
"Kožuchov",
"Kožuchovce",
"Kračúnovce",
"Krahule",
"Krajná Bystrá",
"Krajná Poľana",
"Krajná Porúbka",
"Krajné",
"Krajné Čierno",
"Krakovany",
"Králiky",
"Kráľ",
"Kráľov Brod",
"Kráľova Lehota",
"Kráľová nad Váhom",
"Kráľová pri Senci",
"Kraľovany",
"Kráľovce",
"Kráľovce - Krnišov",
"Kráľovičove Kračany",
"Kráľovský Chlmec",
"Kraskovo",
"Krásna Lúka",
"Krásna Ves",
"Krásno",
"Krásno nad Kysucou",
"Krásnohorská Dlhá Lúka",
"Krásnohorské Podhradie",
"Krásnovce",
"Krásny Brod",
"Krasňany",
"Kravany",
"Kravany",
"Kravany nad Dunajom",
"Krčava",
"Kremná",
"Kremnica",
"Kremnické Bane",
"Kristy",
"Krišľovce",
"Krišovská Liesková",
"Krivá",
"Krivany",
"Kriváň",
"Krivé",
"Krivoklát",
"Krivosúd - Bodovka",
"Kríže",
"Krížová Ves",
"Krížovany",
"Križovany nad Dudváhom",
"Krná",
"Krnča",
"Krokava",
"Krompachy",
"Krpeľany",
"Krškany",
"Krtovce",
"Kručov",
"Krupina",
"Krušetnica",
"Krušinec",
"Krušovce",
"Kružlov",
"Kružlová",
"Kružná",
"Kružno",
"Kšinná",
"Kubáňovo",
"Kučín",
"Kučín",
"Kuchyňa",
"Kuklov",
"Kuková",
"Kukučínov",
"Kunerad",
"Kunešov",
"Kunova Teplica",
"Kuraľany",
"Kurima",
"Kurimany",
"Kurimka",
"Kurov",
"Kusín",
"Kútniky",
"Kúty",
"Kuzmice",
"Kuzmice",
"Kvačany",
"Kvačany",
"Kvakovce",
"Kvašov",
"Kvetoslavov",
"Kyjatice",
"Kyjov",
"Kynceľová",
"Kysak",
"Kyselica",
"Kysta",
"Kysucké Nové Mesto",
"Kysucký Lieskovec",
"Láb",
"Lackov",
"Lacková",
"Lada",
"Ladce",
"Ladice",
"Ladmovce",
"Ladomerská Vieska",
"Ladomirov",
"Ladomirová",
"Ladzany",
"Lakšárska Nová Ves",
"Lascov",
"Laskár",
"Lastomír",
"Lastovce",
"Laškovce",
"Látky",
"Lazany",
"Lazisko",
"Lazy pod Makytou",
"Lažany",
"Lednica",
"Lednické Rovne",
"Legnava",
"Lehnice",
"Lehota",
"Lehota nad Rimavicou",
"Lehota pod Vtáčnikom",
"Lehôtka",
"Lehôtka pod Brehmi",
"Lechnica",
"Lekárovce",
"Leles",
"Leľa",
"Lemešany",
"Lenartov",
"Lenartovce",
"Lendak",
"Lenka",
"Lentvora",
"Leopoldov",
"Lesenice",
"Lesíček",
"Lesné",
"Lesnica",
"Leštiny",
"Lešť (vojenský obvod)",
"Letanovce",
"Letničie",
"Leváre",
"Levice",
"Levkuška",
"Levoča",
"Ležiachov",
"Libichava",
"Licince",
"Ličartovce",
"Liesek",
"Lieskovany",
"Lieskovec",
"Lieskovec",
"Liešno",
"Liešťany",
"Lietava",
"Lietavská Lúčka",
"Lietavská Svinná - Babkov",
"Likavka",
"Limbach",
"Lipany",
"Lipník",
"Lipníky",
"Lipová",
"Lipová",
"Lipovany",
"Lipovce",
"Lipové",
"Lipovec",
"Lipovec",
"Lipovník",
"Lipovník",
"Liptovská Anna",
"Liptovská Kokava",
"Liptovská Lúžna",
"Liptovská Osada",
"Liptovská Porúbka",
"Liptovská Sielnica",
"Liptovská Štiavnica",
"Liptovská Teplá",
"Liptovská Teplička",
"Liptovské Beharovce",
"Liptovské Kľačany",
"Liptovské Matiašovce",
"Liptovské Revúce",
"Liptovské Sliače",
"Liptovský Hrádok",
"Liptovský Ján",
"Liptovský Michal",
"Liptovský Mikuláš",
"Liptovský Ondrej",
"Liptovský Peter",
"Liptovský Trnovec",
"Lisková",
"Lišov",
"Litava",
"Litmanová",
"Livina",
"Livinské Opatovce",
"Livov",
"Livovská Huta",
"Lodno",
"Lok",
"Lokca",
"Lom nad Rimavicou",
"Lomná",
"Lomné",
"Lomnička",
"Lontov",
"Lopašov",
"Lopúchov",
"Lopušné Pažite",
"Lošonec",
"Lovce",
"Lovča",
"Lovčica - Trubín",
"Lovinobaňa",
"Lozorno",
"Ložín",
"Lubeník",
"Lubina",
"Lúč na Ostrove",
"Lučatín",
"Lučenec",
"Lúčina",
"Lučivná",
"Lúčka",
"Lúčka",
"Lúčka",
"Lúčka",
"Lúčky",
"Lúčky",
"Lúčky",
"Lúčnica nad Žitavou",
"Ludanice",
"Ludrová",
"Luhyňa",
"Lúka",
"Lukačovce",
"Lukáčovce",
"Lukavica",
"Lukavica",
"Lukov",
"Lukovištia",
"Lúky",
"Lula",
"Lupoč",
"Lutila",
"Lutiše",
"Lužany",
"Lužany pri Topli",
"Lužianky",
"Lysá pod Makytou",
"Lysica",
"Ľubá",
"Ľubela",
"Ľubica",
"Ľubietová",
"Ľubiša",
"Ľubochňa",
"Ľuboreč",
"Ľuboriečka",
"Ľubotice",
"Ľubotín",
"Ľubovec",
"Ľudovítová",
"Ľutina",
"Ľutov",
"Macov",
"Mad",
"Madunice",
"Magnezitovce",
"Machulince",
"Majcichov",
"Majere",
"Majerovce",
"Makov",
"Makovce",
"Malacky",
"Malachov",
"Malá Čalomija",
"Malá Čausa",
"Malá Čierna",
"Malá Domaša",
"Malá Franková",
"Malá Hradná",
"Malá Ida",
"Malá Lehota",
"Malá Lodina",
"Malá nad Hronom",
"Malá Poľana",
"Malá Tŕňa",
"Málaš",
"Malatiná",
"Malatíny",
"Malcov",
"Malčice",
"Malé Borové",
"Malé Dvorníky",
"Malé Chyndice",
"Malé Hoste",
"Malé Kosihy",
"Malé Kozmálovce",
"Malé Kršteňany",
"Malé Lednice",
"Malé Leváre",
"Malé Ludince",
"Malé Ozorovce",
"Malé Raškovce",
"Malé Ripňany",
"Malé Straciny",
"Malé Trakany",
"Malé Uherce",
"Malé Vozokany",
"Malé Zálužie",
"Malé Zlievce",
"Málinec",
"Malinová",
"Malinovo",
"Malužiná",
"Malý Cetín",
"Malý Čepčín",
"Malý Horeš",
"Malý Kamenec",
"Malý Krtíš",
"Malý Lapáš",
"Malý Lipník",
"Malý Slavkov",
"Malý Slivník",
"Malý Šariš",
"Malženice",
"Mankovce",
"Maňa",
"Marcelová",
"Margecany",
"Marhaň",
"Marianka",
"Markovce",
"Markuška",
"Markušovce",
"Maršová - Rašov",
"Martin",
"Martin nad Žitavou",
"Martinček",
"Martinová",
"Martovce",
"Mašková",
"Maškovce",
"Matejovce nad Hornádom",
"Matiaška",
"Matiašovce",
"Matovce",
"Matúškovo",
"Matysová",
"Maťovské Vojkovce",
"Medovarce",
"Medvedie",
"Medveďov",
"Medzany",
"Medzev",
"Medzianky",
"Medzibrod",
"Medzibrodie nad Oravou",
"Medzilaborce",
"Melčice - Lieskové",
"Melek",
"Meliata",
"Mengusovce",
"Merašice",
"Merník",
"Mestečko",
"Mestisko",
"Mičakovce",
"Mierovo",
"Miezgovce",
"Michajlov",
"Michal na Ostrove",
"Michal nad Žitavou",
"Michalková",
"Michalok",
"Michalová",
"Michalovce",
"Michaľany",
"Miklušovce",
"Miková",
"Mikulášová",
"Mikušovce",
"Mikušovce",
"Milhosť",
"Miloslavov",
"Milpoš",
"Miňovce",
"Mirkovce",
"Miroľa",
"Mládzovo",
"Mlynárovce",
"Mlynčeky",
"Mlynica",
"Mlynky",
"Mníchova Lehota",
"Mníšek nad Hnilcom",
"Mníšek nad Popradom",
"Moča",
"Močenok",
"Močiar",
"Modra",
"Modra nad Cirochou",
"Modrany",
"Modrová",
"Modrovka",
"Modrý Kameň",
"Mojmírovce",
"Mojš",
"Mojtín",
"Mojzesovo",
"Mokrá Lúka",
"Mokrance",
"Mokroluh",
"Mokrý Háj",
"Moldava nad Bodvou",
"Moravany",
"Moravany nad Váhom",
"Moravské Lieskové",
"Moravský Svätý Ján",
"Most pri Bratislave",
"Mostová",
"Moškovec",
"Mošovce",
"Moštenica",
"Mošurov",
"Motešice",
"Motyčky",
"Môlča",
"Mrázovce",
"Mučín",
"Mudroňovo",
"Mudrovce",
"Muľa",
"Muráň",
"Muránska Dlhá Lúka",
"Muránska Huta",
"Muránska Lehota",
"Muránska Zdychava",
"Mútne",
"Mužla",
"Myjava",
"Myslina",
"Mýtna",
"Mýtne Ludany",
"Mýto pod Ďumbierom",
"Nacina Ves",
"Nadlice",
"Naháč",
"Nálepkovo",
"Námestovo",
"Nána",
"Nandraž",
"Necpaly",
"Nedanovce",
"Nedašovce",
"Neded",
"Nededza",
"Nedožery - Brezany",
"Nechválova Polianka",
"Nemce",
"Nemcovce",
"Nemcovce",
"Nemčice",
"Nemčiňany",
"Nemecká",
"Nemečky",
"Nemešany",
"Nemšová",
"Nenince",
"Neporadza",
"Neporadza",
"Nesvady",
"Nesluša",
"Neverice",
"Nevidzany",
"Nevidzany",
"Nevoľné",
"Nezbudská Lúčka",
"Nimnica",
"Nitra",
"Nitra nad Ipľom",
"Nitrianska Blatnica",
"Nitrianska Streda",
"Nitrianske Hrnčiarovce",
"Nitrianske Pravno",
"Nitrianske Rudno",
"Nitrianske Sučany",
"Nitrica",
"Nižná",
"Nižná",
"Nižná Boca",
"Nižná Hutka",
"Nižná Jablonka",
"Nižná Jedľová",
"Nižná Kamenica",
"Nižná Myšľa",
"Nižná Olšava",
"Nižná Pisaná",
"Nižná Polianka",
"Nižná Rybnica",
"Nižná Sitnica",
"Nižná Slaná",
"Nižná Voľa",
"Nižné Ladičkovce",
"Nižné Nemecké",
"Nižné Repaše",
"Nižné Ružbachy",
"Nižný Čaj",
"Nižný Hrabovec",
"Nižný Hrušov",
"Nižný Klátov",
"Nižný Komárnik",
"Nižný Kručov",
"Nižný Lánec",
"Nižný Mirošov",
"Nižný Orlík",
"Nižný Skálnik",
"Nižný Slavkov",
"Nižný Tvarožec",
"Nižný Žipov",
"Nolčovo",
"Norovce",
"Nová Baňa",
"Nová Bašta",
"Nová Bošáca",
"Nová Bystrica",
"Nová Dedina",
"Nová Dedinka",
"Nová Dubnica",
"Nová Kelča",
"Nová Lehota",
"Nová Lesná",
"Nová Ľubovňa",
"Nová Polhora",
"Nová Polianka",
"Nová Sedlica",
"Nová Ves",
"Nová Ves nad Váhom",
"Nová Ves nad Žitavou",
"Nová Vieska",
"Nováčany",
"Nováky",
"Nové Hony",
"Nové Mesto nad Váhom",
"Nové Sady",
"Nové Zámky",
"Novosad",
"Novoť",
"Nový Ruskov",
"Nový Salaš",
"Nový Tekov",
"Nový Život",
"Nýrovce",
"Ňagov",
"Ňárad",
"Obeckov",
"Obišovce",
"Oborín",
"Obručné",
"Obyce",
"Očkov",
"Očová",
"Odorín",
"Ohrady",
"Ohradzany",
"Ochodnica",
"Ochtiná",
"Okoč",
"Okoličná na Ostrove",
"Okrúhle",
"Okružná",
"Olcnava",
"Olejníkov",
"Olešná",
"Olováry",
"Olšovany",
"Oľdza",
"Oľka",
"Oľšavce",
"Oľšavica",
"Oľšavka",
"Oľšavka",
"Oľšinkov",
"Oľšov",
"Omastiná",
"Omšenie",
"Ondavka",
"Ondavské Matiašovce",
"Ondrašovce",
"Ondrašová",
"Ondrejovce",
"Opátka",
"Opatovce",
"Opatovce nad Nitrou",
"Opatovská Nová Ves",
"Opava",
"Opiná",
"Opoj",
"Oponice",
"Oravce",
"Orávka",
"Oravská Jasenica",
"Oravská Lesná",
"Oravská Polhora",
"Oravská Poruba",
"Oravský Biely Potok",
"Oravský Podzámok",
"Ordzovany",
"Orechová",
"Orechová Potôň",
"Oravské Veselé",
"Oreské",
"Oreské",
"Orešany",
"Orlov",
"Orovnica",
"Ortuťová",
"Osádka",
"Osadné",
"Osikov",
"Oslany",
"Osrblie",
"Ostrá Lúka",
"Ostratice",
"Ostrov",
"Ostrov",
"Ostrovany",
"Ostrý Grúň",
"Osturňa",
"Osuské",
"Oščadnica",
"Otrhánky",
"Otročok",
"Ovčiarsko",
"Ovčie",
"Ozdín",
"Ožďany",
"Pača",
"Padáň",
"Padarovce",
"Pakostov",
"Palárikovo",
"Palín",
"Palota",
"Panické Dravce",
"Paňa",
"Paňovce",
"Papín",
"Papradno",
"Parchovany",
"Parihuzovce",
"Párnica",
"Partizánska Ľupča",
"Partizánske",
"Pastovce",
"Pastuchov",
"Pašková",
"Paština Závada",
"Pata",
"Pataš",
"Pavčina Lehota",
"Pavlice",
"Pavlová",
"Pavlova Ves",
"Pavlovce",
"Pavlovce",
"Pavlovce nad Uhom",
"Pavľany",
"Pažiť",
"Pčoliné",
"Pečenice",
"Pečeňady",
"Pečeňany",
"Pečovská Nová Ves",
"Peder",
"Perín - Chym",
"Pernek",
"Petkovce",
"Petrikovce",
"Petrová",
"Petrova Lehota",
"Petrova Ves",
"Petrovany",
"Petrovce",
"Petrovce",
"Petrovce",
"Petrovce nad Laborcom",
"Petrovice",
"Petrovo",
"Pezinok",
"Piešťany",
"Pichne",
"Píla",
"Píla",
"Píla",
"Pinciná",
"Pinkovce",
"Piskorovce",
"Pitelová",
"Plášťovce",
"Plavé Vozokany",
"Plavecké Podhradie",
"Plavecký Mikuláš",
"Plavecký Peter",
"Plavecký Štvrtok",
"Plaveč",
"Plavnica",
"Plechotice",
"Pleš",
"Plešivec",
"Plevník - Drienové",
"Pliešovce",
"Ploské",
"Ploské",
"Pobedim",
"Počarová",
"Počúvadlo",
"Podbiel",
"Podbranč",
"Podbrezová",
"Podhájska",
"Podhorany",
"Podhorany",
"Podhorany",
"Podhorie",
"Podhorie",
"Podhoroď",
"Podhradie",
"Podhradie",
"Podhradie",
"Podhradík",
"Podkonice",
"Podkriváň",
"Podkylava",
"Podlužany",
"Podlužany",
"Podolie",
"Podolínec",
"Podrečany",
"Podskalie",
"Podtureň",
"Podvysoká",
"Podzámčok",
"Pohorelá",
"Pohranice",
"Pohronská Polhora",
"Pohronský Bukovec",
"Pohronský Ruskov",
"Pochabany",
"Pokryváč",
"Poliakovce",
"Polianka",
"Polichno",
"Polina",
"Poloma",
"Polomka",
"Poltár",
"Poluvsie",
"Poľanovce",
"Poľany",
"Poľný Kesov",
"Pongrácovce",
"Poniky",
"Poprad",
"Poproč",
"Poproč",
"Popudinské Močidľany",
"Poráč",
"Poriadie",
"Porostov",
"Poruba",
"Poruba pod Vihorlatom",
"Porúbka",
"Porúbka",
"Porúbka",
"Porúbka",
"Poša",
"Potok",
"Potok",
"Potoky",
"Potôčky",
"Potvorice",
"Považany",
"Považská Bystrica",
"Povina",
"Povoda",
"Povrazník",
"Pozba",
"Pozdišovce",
"Pôtor",
"Praha",
"Prakovce",
"Prašice",
"Prašník",
"Pravenec",
"Pravica",
"Pravotice",
"Práznovce",
"Prečín",
"Predajná",
"Predmier",
"Prenčov",
"Preseľany",
"Prestavlky",
"Prešov",
"Príbelce",
"Pribeník",
"Pribeta",
"Pribiš",
"Príbovce",
"Pribylina",
"Priechod",
"Priekopa",
"Priepasné",
"Prietrž",
"Prietržka",
"Prievaly",
"Prievidza",
"Prihradzany",
"Príkra",
"Príslop",
"Prituľany",
"Proč",
"Prochot",
"Prosačov",
"Prosiek",
"Prša",
"Pruské",
"Prusy",
"Pružina",
"Pstriná",
"Ptičie",
"Ptrukša",
"Pucov",
"Púchov",
"Pukanec",
"Pusté Čemerné",
"Pusté Pole",
"Pusté Sady",
"Pusté Úľany",
"Pušovce",
"Rabča",
"Rabčice",
"Rad",
"Radatice",
"Radava",
"Radimov",
"Radnovce",
"Radobica",
"Radoľa",
"Radoma",
"Radošina",
"Radošovce",
"Radošovce",
"Radôstka",
"Radvanovce",
"Radvaň nad Dunajom",
"Radvaň nad Laborcom",
"Radzovce",
"Rafajovce",
"Rajčany",
"Rajec",
"Rajecká Lesná",
"Rajecké Teplice",
"Rákoš",
"Rákoš",
"Raková",
"Rakovčík",
"Rakovec nad Ondavou",
"Rakovice",
"Rakovnica",
"Rakovo",
"Rakša",
"Rakúsy",
"Rakytník",
"Rankovce",
"Rapovce",
"Raslavice",
"Rastislavice",
"Rašice",
"Ratka",
"Ratková",
"Ratkovce",
"Ratkovo",
"Ratkovská Lehota",
"Ratkovská Suchá",
"Ratkovské Bystré",
"Ratnovce",
"Ratvaj",
"Ráztočno",
"Ráztoka",
"Ražňany",
"Reca",
"Regetovka",
"Rejdová",
"Reľov",
"Remeniny",
"Remetské Hámre",
"Renčišov",
"Repejov",
"Repište",
"Rešica",
"Rešov",
"Revúca",
"Revúcka Lehota",
"Riečka",
"Riečka",
"Richnava",
"Richvald",
"Rimavská Baňa",
"Rimavská Seč",
"Rimavská Sobota",
"Rimavské Brezovo",
"Rimavské Janovce",
"Rimavské Zalužany",
"Rohov",
"Rohovce",
"Rohožník",
"Rohožník",
"Rochovce",
"Rokycany",
"Rokytov",
"Rokytov pri Humennom",
"Rokytovce",
"Rosina",
"Roškovce",
"Roštár",
"Rovensko",
"Rovinka",
"Rovné",
"Rovné",
"Rovné",
"Rovňany",
"Rozhanovce",
"Rozložná",
"Roztoky",
"Rožkovany",
"Rožňava",
"Rožňavské Bystré",
"Rúbaň",
"Rudina",
"Rudinka",
"Rudinská",
"Rudlov",
"Rudná",
"Rudnianska Lehota",
"Rudník",
"Rudník",
"Rudno",
"Rudno nad Hronom",
"Rudňany",
"Rumanová",
"Rumince",
"Runina",
"Ruská",
"Ruská Bystrá",
"Ruská Kajňa",
"Ruská Nová Ves",
"Ruská Poruba",
"Ruská Volová",
"Ruská Voľa",
"Ruská Voľa nad Popradom",
"Ruskov",
"Ruskovce",
"Ruskovce",
"Ruský Hrabovec",
"Ruský Potok",
"Ružiná",
"Ružindol",
"Ružomberok",
"Rybany",
"Rybky",
"Rybník",
"Rybník",
"Rykynčice",
"Sabinov",
"Sačurov",
"Sádočné",
"Sady nad Torysou",
"Salka",
"Santovka",
"Sap",
"Sása",
"Sása",
"Sasinkovo",
"Sazdice",
"Sebedín - Bečov",
"Sebedražie",
"Sebechleby",
"Seč",
"Sečianky",
"Sečovce",
"Sečovská Polianka",
"Sedliacka Dubová",
"Sedliská",
"Sedmerovec",
"Sejkov",
"Sekule",
"Selce",
"Selce",
"Selce",
"Selec",
"Selice",
"Seľany",
"Semerovo",
"Senec",
"Seniakovce",
"Senica",
"Senné",
"Senné",
"Senohrad",
"Seňa",
"Sereď",
"Sielnica",
"Sihelné",
"Sihla",
"Sikenica",
"Sikenička",
"Siladice",
"Silica",
"Silická Brezová",
"Silická Jablonica",
"Sirk",
"Sirník",
"Skačany",
"Skalica",
"Skalité",
"Skalka nad Váhom",
"Skároš",
"Skerešovo",
"Sklabiná",
"Sklabinský Podzámok",
"Sklabiňa",
"Sklené",
"Sklené Teplice",
"Skrabské",
"Skýcov",
"Sládkovičovo",
"Slančík",
"Slanec",
"Slanská Huta",
"Slanské Nové Mesto",
"Slaská",
"Slatina",
"Slatina nad Bebravou",
"Slatinka nad Bebravou",
"Slatinské Lazy",
"Slatvina",
"Slavec",
"Slavkovce",
"Slavnica",
"Slavoška",
"Slavošovce",
"Slepčany",
"Sliač",
"Sliepkovce",
"Slizké",
"Slivník",
"Slopná",
"Slovany",
"Slovenská Kajňa",
"Slovenská Ľupča",
"Slovenská Nová Ves",
"Slovenská Ves",
"Slovenská Volová",
"Slovenské Ďarmoty",
"Slovenské Kľačany",
"Slovenské Krivé",
"Slovenské Nové Mesto",
"Slovenské Pravno",
"Slovenský Grob",
"Slovinky",
"Sľažany",
"Smilno",
"Smižany",
"Smolenice",
"Smolinské",
"Smolnícka Huta",
"Smolník",
"Smrdáky",
"Smrečany",
"Snakov",
"Snežnica",
"Snina",
"Socovce",
"Soblahov",
"Soboš",
"Sobotište",
"Sobrance",
"Sokolce",
"Sokolovce",
"Sokoľ",
"Sokoľany",
"Solčany",
"Solčianky",
"Sološnica",
"Soľ",
"Soľnička",
"Soľník",
"Somotor",
"Sopkovce",
"Spišská Belá",
"Spišská Nová Ves",
"Spišská Stará Ves",
"Spišská Teplica",
"Spišské Bystré",
"Spišské Hanušovce",
"Spišské Podhradie",
"Spišské Tomášovce",
"Spišské Vlachy",
"Spišský Hrhov",
"Spišský Hrušov",
"Spišský Štiavnik",
"Spišský Štvrtok",
"Stakčín",
"Stakčínska Roztoka",
"Stanča",
"Stankovany",
"Stankovce",
"Stará Bašta",
"Stará Bystrica",
"Stará Halič",
"Stará Huta",
"Stará Kremnička",
"Stará Lehota",
"Stará Lesná",
"Stará Ľubovňa",
"Stará Myjava",
"Stará Turá",
"Stará Voda",
"Staré",
"Staré Hory",
"Starina",
"Starý Hrádok",
"Starý Tekov",
"Staškov",
"Staškovce",
"Stebnícka Huta",
"Stebník",
"Stožok",
"Stráne pod Tatrami",
"Stránska",
"Stránske",
"Stráňany",
"Stráňavy",
"Stratená",
"Stráža",
"Strážne",
"Strážske",
"Strečno",
"Streda nad Bodrogom",
"Stredné Plachtince",
"Strekov",
"Strelníky",
"Stretava",
"Stretavka",
"Streženice",
"Strihovce",
"Stročín",
"Stropkov",
"Studená",
"Studenec",
"Studienka",
"Stuľany",
"Stupava",
"Stupné",
"Sučany",
"Sudince",
"Súdovce",
"Suchá Dolina",
"Suchá Hora",
"Suchá nad Parnou",
"Sucháň",
"Suché",
"Suché Brezovo",
"Suchohrad",
"Sukov",
"Sulín",
"Súlovce",
"Súľov - Hradná",
"Sušany",
"Sútor",
"Svätá Mária",
"Svätoplukovo",
"Svätuš",
"Svätuše",
"Svätý Anton",
"Svätý Jur",
"Svätý Kríž",
"Svätý Peter",
"Svederník",
"Sverepec",
"Sveržov",
"Svetlice",
"Svidnička",
"Svidník",
"Svinia",
"Svinica",
"Svinice",
"Svinná",
"Svit",
"Svodín",
"Svrbice",
"Svrčinovec",
"Šahy",
"Šajdíkove Humence",
"Šalgovce",
"Šalgočka",
"Šalov",
"Šaľa",
"Šambron",
"Šamorín",
"Šamudovce",
"Šandal",
"Šarbov",
"Šarišská Poruba",
"Šarišská Trstená",
"Šarišské Bohdanovce",
"Šarišské Čierne",
"Šarišské Dravce",
"Šarišské Jastrabie",
"Šarišské Michaľany",
"Šarišské Sokolovce",
"Šarišský Štiavnik",
"Šarkan",
"Šarovce",
"Šašová",
"Šaštín - Stráže",
"Šávoľ",
"Šelpice",
"Šemetkovce",
"Šemša",
"Šenkvice",
"Šiatorská Bukovinka",
"Šiba",
"Šíd",
"Šimonovce",
"Šindliar",
"Šintava",
"Šípkov",
"Šípkové",
"Širákov",
"Širkovce",
"Široké",
"Šišov",
"Šivetice",
"Šmigovec",
"Šoltýska",
"Šoporňa",
"Špačince",
"Špania Dolina",
"Španie Pole",
"Šrobárová",
"Štefanov",
"Štefanov nad Oravou",
"Štefanová",
"Štefanovce",
"Štefanovce",
"Štefanovičová",
"Štefurov",
"Šterusy",
"Štiavnické Bane",
"Štiavnička",
"Štiavnik",
"Štítnik",
"Štós",
"Štôla",
"Štrba",
"Štrkovec",
"Štúrovo",
"Štvrtok",
"Štvrtok na Ostrove",
"Šuľa",
"Šumiac",
"Šuňava",
"Šurany",
"Šurianky",
"Šurice",
"Šúrovce",
"Šútovo",
"Šútovce",
"Švábovce",
"Švedlár",
"Švošov",
"Tachty",
"Tajná",
"Tajov",
"Tarnov",
"Tatranská Javorina",
"Tašuľa",
"Tehla",
"Tekolďany",
"Tekovská Breznica",
"Tekovské Lužany",
"Tekovské Nemce",
"Tekovský Hrádok",
"Telgárt",
"Telince",
"Temeš",
"Teplička",
"Teplička nad Váhom",
"Tepličky",
"Teplý Vrch",
"Terany",
"Terchová",
"Teriakovce",
"Terňa",
"Tesáre",
"Tesárske Mlyňany",
"Tešedíkovo",
"Tibava",
"Tichý Potok",
"Timoradza",
"Tisinec",
"Tisovec",
"Tlmače",
"Točnica",
"Tokajík",
"Tomášikovo",
"Tomášov",
"Tomášovce",
"Tomášovce",
"Topoľa",
"Topoľčany",
"Topoľčianky",
"Topoľnica",
"Topoľníky",
"Topoľovka",
"Toporec",
"Tornaľa",
"Torysa",
"Torysky",
"Tovarné",
"Tovarnianska Polianka",
"Tovarníky",
"Tôň",
"Trakovice",
"Trávnica",
"Trávnik",
"Trebatice",
"Trebejov",
"Trebeľovce",
"Trebichava",
"Trebišov",
"Trebostovo",
"Trebušovce",
"Trenč",
"Trenčianska Teplá",
"Trenčianska Turná",
"Trenčianske Bohuslavice",
"Trenčianske Jastrabie",
"Trenčianske Mitice",
"Trenčianske Stankovce",
"Trenčianske Teplice",
"Trenčín",
"Trhová Hradská",
"Trhovište",
"Trnava",
"Trnavá Hora",
"Trnava pri Laborci",
"Trnávka",
"Trnávka",
"Trnkov",
"Trnovec",
"Trnovec nad Váhom",
"Trnovo",
"Tročany",
"Trpín",
"Trstená",
"Trstená na Ostrove",
"Trstené",
"Trstené pri Hornáde",
"Trstice",
"Trstín",
"Trsťany",
"Tŕnie",
"Tuhár",
"Tuhrina",
"Tuchyňa",
"Tulčík",
"Tupá",
"Turá",
"Turany",
"Turany nad Ondavou",
"Turcovce",
"Turček",
"Turčianky",
"Turčianska Štiavnička",
"Turčianske Jaseno",
"Turčianske Kľačany",
"Turčianske Teplice",
"Turčiansky Ďur",
"Turčiansky Peter",
"Turčok",
"Turecká",
"Tureň",
"Turie",
"Turík",
"Turnianska Nová Ves",
"Turňa nad Bodvou",
"Turová",
"Turzovka",
"Tušice",
"Tušická Nová Ves",
"Tužina",
"Tvarožná",
"Tvrdomestice",
"Tvrdošín",
"Tvrdošovce",
"Ťapešovo",
"Ubľa",
"Úbrež",
"Udavské",
"Udiča",
"Údol",
"Uhliská",
"Úhorná",
"Uhorská Ves",
"Uhorské",
"Uhrovec",
"Uhrovské Podhradie",
"Ulič",
"Uličské Krivé",
"Uloža",
"Úľany nad Žitavou",
"Unín",
"Uňatín",
"Urmince",
"Utekáč",
"Uzovce",
"Uzovská Panica",
"Uzovské Pekľany",
"Uzovský Šalgov",
"Vaďovce",
"Vagrinec",
"Váhovce",
"Vajkovce",
"Valaliky",
"Valaská",
"Valaská Belá",
"Valaská Dubová",
"Valaškovce (vojenský obvod)",
"Valča",
"Valentovce",
"Valice",
"Valkovce",
"Vaľkovňa",
"Vaniškovce",
"Vápeník",
"Varadka",
"Varechovce",
"Varhaňovce",
"Varín",
"Vasiľov",
"Vavrečka",
"Vavrinec",
"Vavrišovo",
"Važec",
"Vechec",
"Velčice",
"Veličná",
"Velušovce",
"Veľaty",
"Veľká Čausa",
"Veľká Čierna",
"Veľká Dolina",
"Veľká Franková",
"Veľká Hradná",
"Veľká Ida",
"Veľká Lesná",
"Veľká Lodina",
"Veľká Lomnica",
"Veľká Mača",
"Veľká Paka",
"Veľká Tŕňa",
"Veľké Bierovce",
"Veľké Blahovo",
"Veľké Borové",
"Veľké Držkovce",
"Veľké Dvorany",
"Veľké Dvorníky",
"Veľké Hoste",
"Veľké Chlievany",
"Veľké Chyndice",
"Veľké Kapušany",
"Veľké Kosihy",
"Veľké Kostoľany",
"Veľké Kozmálovce",
"Veľké Kršteňany",
"Veľké Leváre",
"Veľké Lovce",
"Veľké Ludince",
"Veľké Orvište",
"Veľké Ozorovce",
"Veľké Raškovce",
"Veľké Revištia",
"Veľké Ripňany",
"Veľké Rovné",
"Veľké Slemence",
"Veľké Trakany",
"Veľké Turovce",
"Veľké Uherce",
"Veľké Úľany",
"Veľké Vozokany",
"Veľké Zálužie",
"Veľkrop",
"Veľký Biel",
"Veľký Cetín",
"Veľký Čepčín",
"Veľký Ďur",
"Veľký Folkmar",
"Veľký Grob",
"Veľký Horeš",
"Veľký Kamenec",
"Veľký Klíž",
"Veľký Krtíš",
"Veľký Kýr",
"Veľký Lapáš",
"Veľký Lipník",
"Veľký Meder",
"Veľký Slavkov",
"Veľký Slivník",
"Veľký Šariš",
"Veľopolie",
"Vernár",
"Veselé",
"Veterná Poruba",
"Vieska",
"Vieska",
"Vieska nad Žitavou",
"Vikartovce",
"Vinica",
"Viničky",
"Viničné",
"Vinné",
"Vinodol",
"Vinohrady nad Váhom",
"Vinosady",
"Virt",
"Vislanka",
"Vislava",
"Visolaje",
"Višňov",
"Višňové",
"Višňové",
"Vištuk",
"Vitanová",
"Vítkovce",
"Víťaz",
"Víťazovce",
"Vlača",
"Vladiča",
"Vlachovo",
"Vlachy",
"Vlčany",
"Vlčkovce",
"Vlkas",
"Vlková",
"Vlkovce",
"Vlky",
"Voderady",
"Vojany",
"Vojčice",
"Vojka",
"Vojka nad Dunajom",
"Vojkovce",
"Vojnatina",
"Vojňany",
"Vojtovce",
"Volica",
"Volkovce",
"Voľa",
"Vozokany",
"Vozokany",
"Vráble",
"Vrádište",
"Vrakúň",
"Vranov nad Topľou",
"Vrbnica",
"Vrbov",
"Vrbovce",
"Vrbová nad Váhom",
"Vrbové",
"Vrchteplá",
"Vrícko",
"Vršatské Podhradie",
"Vrútky",
"Vtáčkovce",
"Výborná",
"Výčapy - Opatovce",
"Vydrany",
"Vydrná",
"Vydrník",
"Východná",
"Výrava",
"Vysočany",
"Vysoká",
"Vysoká",
"Vysoká nad Kysucou",
"Vysoká nad Uhom",
"Vysoká pri Morave",
"Vysoké Tatry",
"Vyškovce",
"Vyškovce nad Ipľom",
"Vyšná Boca",
"Vyšná Hutka",
"Vyšná Jablonka",
"Vyšná Jedľová",
"Vyšná Kamenica",
"Vyšná Myšľa",
"Vyšná Olšava",
"Vyšná Pisaná",
"Vyšná Polianka",
"Vyšná Rybnica",
"Vyšná Sitnica",
"Vyšná Slaná",
"Vyšná Šebastová",
"Vyšná Voľa",
"Vyšné Ladičkovce",
"Vyšné nad Hronom",
"Vyšné Nemecké",
"Vyšné Remety",
"Vyšné Repaše",
"Vyšné Ružbachy",
"Vyšný Čaj",
"Vyšný Hrabovec",
"Vyšný Hrušov",
"Vyšný Kazimír",
"Vyšný Klátov",
"Vyšný Komárnik",
"Vyšný Kručov",
"Vyšný Kubín",
"Vyšný Mirošov",
"Vyšný Orlík",
"Vyšný Slavkov",
"Vyšný Tvarožec",
"Vyšný Žipov",
"Zábiedovo",
"Záborie",
"Záborské",
"Zádiel",
"Záhor",
"Záhorie (vojenský obvod)",
"Záhorská Ves",
"Záhradné",
"Zákamenné",
"Zákopčie",
"Zalaba",
"Zálesie",
"Zálesie",
"Zalužice",
"Zamarovce",
"Zámutov",
"Záriečie",
"Záskalie",
"Zatín",
"Závada",
"Závada",
"Závadka",
"Závadka",
"Závadka",
"Zavar",
"Závažná Poruba",
"Závod",
"Zázrivá",
"Zbehňov",
"Zbehy",
"Zboj",
"Zbojné",
"Zborov",
"Zborov nad Bystricou",
"Zbrojníky",
"Zbudská Belá",
"Zbudské Dlhé",
"Zbudza",
"Zbyňov",
"Zeleneč",
"Zemianska Olča",
"Zemianske Kostoľany",
"Zemianske Podhradie",
"Zemianske Sady",
"Zemné",
"Zemplín",
"Zemplínska Nová Ves",
"Zemplínska Široká",
"Zemplínska Teplica",
"Zemplínske Hámre",
"Zemplínske Hradište",
"Zemplínske Jastrabie",
"Zemplínske Kopčany",
"Zemplínsky Branč",
"Zlatá Baňa",
"Zlatá Idka",
"Zlaté",
"Zlaté Klasy",
"Zlaté Moravce",
"Zlatná na Ostrove",
"Zlatník",
"Zlatníky",
"Zlatno",
"Zlatno",
"Zliechov",
"Zohor",
"Zubák",
"Zuberec",
"Zubné",
"Zubrohlava",
"Zvolen",
"Zvončín",
"Žabokreky",
"Žabokreky nad Nitrou",
"Žakarovce",
"Žakovce",
"Žalobín",
"Žarnov",
"Žarnovica",
"Žaškov",
"Žbince",
"Ždaňa",
"Ždiar",
"Žehňa",
"Žehra",
"Železník",
"Želiezovce",
"Želmanovce",
"Žemberovce",
"Žemliare",
"Žiar",
"Žiar",
"Žiar nad Hronom",
"Žihárec",
"Žikava",
"Žilina",
"Žipov",
"Žirany",
"Žitavany",
"Žitavce",
"Žitná - Radiša",
"Žlkovce",
"Župčany",
)
streets = (
"Adámiho",
"Agátová",
"Ahoj",
"Albánska",
"Albrechtova",
"Alejová",
"Alešova",
"Alstrova",
"Alžbetínska",
"Alžbety Gwerkovej",
"Amarelková",
"Ambroseho",
"Ambrova",
"Ambrušova",
"Americká",
"Americké námestie",
"Americké námestie",
"Amurská",
"Andreja Mráza",
"Andreja Plávku",
"Andrusovova",
"Anenská",
"Anenská",
"Anízová",
"Antická",
"Antolská",
"Arménska",
"Astronomická",
"Astrová",
"Avarská",
"Azalková",
"Azovská",
"Babuškova",
"Bagarova",
"Báger",
"Bahniatková",
"Bachova",
"Bajkalská",
"Bajkalská",
"Bajkalská",
"Bajkalská",
"Bajkalská",
"Bajkalská",
"Bajzova",
"Bakošova",
"Balkánska",
"Baltská",
"Bancíkovej",
"Banícka",
"Baničova",
"Baníkova",
"Banskobystrická",
"Banšelova",
"Bardejovská",
"Bárdošova",
"Barónka",
"Bartókova",
"Bartoňova",
"Bartoškova",
"Baštová",
"Batkova",
"Bazalková",
"Bazová",
"Bazovského",
"Bažantia",
"Beblavého",
"Bebravská",
"Beckovská",
"Bedľová",
"Begóniová",
"Belániková",
"Belehradská",
"Belianska",
"Belinského",
"Bellova",
"Belopotockého",
"Beňadická",
"Bencúrova",
"Benediktiho",
"Beniakova",
"Beňovského",
"Bernolákova",
"Beskydská",
"Betliarska",
"Bezekova",
"Bezručova",
"Biela",
"Bielkova",
"Bieloruská",
"Bilíkova",
"Biskupická",
"Björnsonova",
"Blagoevova",
"Blatnická",
"Blatúchová",
"Bleduľová",
"Blumentálska",
"Blyskáčová",
"Bočná",
"Bodliaková",
"Bodrocká",
"Bodvianska",
"Bohrova",
"Bohúňova",
"Bojnická",
"Boragová",
"Borekova",
"Borievková",
"Borinská",
"Borodáčova",
"Borovicová",
"Borská",
"Bosákova",
"Boskovičova",
"Bošániho",
"Botanická",
"Bottova",
"Boženy Němcovej",
"Bôrik",
"Bradáčova",
"Bradlianska",
"Brančská",
"Bratislava-Vinohrady",
"Bratislavská",
"Bratská",
"Brečtanová",
"Brestová",
"Brezová",
"Brezovská",
"Brežná",
"Bridlicová",
"Briežky",
"Brigádnická",
"Brižitská",
"Brnianska",
"Brodná",
"Brodská",
"Brokolicová",
"Bronzová",
"Broskyňová",
"Bršlenová",
"Brumovická",
"Brusnicová",
"Břeclavská",
"Bučinová",
"Budatínska",
"Budatínska",
"Budatínska",
"Búdkova cesta",
"Budovateľská",
"Budyšínska",
"Budyšínska",
"Bujnáková",
"Buková",
"Bukovinská",
"Bukureštská",
"Bulharská",
"Bulíkova",
"Bullova",
"Burgundská",
"Buzalkova",
"Bystrého",
"Bystrická",
"BzovIcka",
"Cabanova",
"Cablkova",
"Cádrova",
"Cesta mládeže",
"Cesta mládeže",
"Cesta na Červený most",
"Cesta na Červený most",
"Cesta na Kamzík",
"Cesta na Klanec",
"Cesta na Senec",
"Cígeľská",
"Cikkerova",
"Cintorínska",
"Cintulova",
"Colnícka",
"Cukrová",
"Cyklámenová",
"Cyprichova",
"Cyprichova",
"Cyrilova",
"Čachtická",
"Čajakova",
"Čajakova",
"Čajkovského",
"Čakanková",
"Čaklovská",
"Čalovská",
"Čapajevova",
"Čapkova",
"Čárskeho",
"Čavojského",
"Čečinová",
"Čelakovského",
"Čerešňová",
"Černicová",
"Černockého",
"Černockého",
"Černyševského",
"Červená",
"Červeňákova",
"Červeňova",
"Česká",
"Československých par",
"Československých tan",
"Čiernohorská",
"Čiernovodská",
"Čierny chodník",
"Čiližská",
"Čipkárska",
"Čmelíkova",
"Čmeľovec",
"Čremchová",
"Čučoriedková",
"Čulenova",
"Daliborovo námestie",
"Damborského",
"Dankovského",
"Dargovská",
"Ďatelinová",
"Daxnerovo námestie",
"Delená",
"Delená cesta",
"Demänovská",
"Desiata",
"Detvianska",
"Devätinová",
"Deviata",
"Devínska cesta",
"Devínska cesta - kam",
"Devínske jazero",
"Dlhá",
"Dlhé diely I.",
"Dlhé diely II.",
"Dlhé diely III.",
"Dneperská",
"Dobrovičova",
"Dobrovičova",
"Dobrovského",
"Dobšinského",
"Dohnalova",
"Dohnányho",
"Doležalova",
"Dolná",
"Dolné Koruny",
"Dolnokorunská",
"Dolnozemská cesta",
"Domašská",
"Domkárska",
"Domové role",
"Donnerova",
"Donovalova",
"Donská",
"Dopravná",
"Dorastenecká",
"Dostojevského rad",
"Dr. Vladimíra Clemen",
"Dražická",
"Drevená",
"Drieňová",
"Drieňová",
"Drieňová",
"Drobného",
"Drotárska cesta",
"Drotárska cesta",
"Drotárska cesta",
"Druhá",
"Druidská",
"Družicová",
"Družobná",
"Družstevná",
"Dubnická",
"Dubová",
"Dúbravčická",
"Dúbravská cesta",
"Dudova",
"Dudvážska",
"Dulovo námestie",
"Dulovo námestie",
"Ďumbierska",
"Dunajská",
"Ďurgalova",
"Dvanásta",
"Dvojkrížna",
"Dvojkrížna",
"Dvořákovo nábrežie",
"Edisonova",
"Egrešová",
"Einsteinova",
"Eisnerova",
"Elektrárenská",
"Estónska",
"Estónska",
"Exnárova",
"F. Kostku",
"Fadruszova",
"Fajnorovo nábrežie",
"Fándlyho",
"Farebná",
"Farská",
"Farského",
"Fazuľová",
"Fedákova",
"Fedinova",
"Ferienčíkova",
"Fialkové údolie",
"Fibichova",
"Fikusová",
"Filiálne nádražie",
"Fláviovská",
"Flöglova",
"Floriánske námestie",
"Fraňa Kráľa",
"Francisciho",
"Francúzskych partizá",
"Frankovská",
"Františkánska",
"Františkánske námest",
"Františka Schmuckera",
"Furdekova",
"Furdekova",
"Furmanská",
"Furmintská",
"Gabčíkova",
"Gagarinova",
"Gagarinova",
"Gagarinova",
"Gajarská",
"Gajc",
"Gajova",
"Galaktická",
"Galandova",
"Galbavého",
"Gallayova",
"Gallova",
"Galvaniho",
"Gašparíkova",
"Gaštanová",
"Gavlovičova",
"Gbelská",
"Gelnická",
"Gemerská",
"Geologická",
"Georgínová",
"Gercenova",
"Gerulatská",
"Gessayova",
"Gettingová",
"Glavica",
"Godrova",
"Gogoľova",
"Goláňova",
"Gondova",
"Goralská",
"Gorazdova",
"Gorkého",
"Gregorovej",
"Gronárska",
"Grösslingova",
"Gruzínska",
"Gunduličova",
"Guothova",
"Gusevova",
"Haanova",
"Haburská",
"Hadia cesta",
"Hadriánová",
"Hagarova",
"Hagarova",
"Hájová",
"Halašova",
"Hálkova",
"Hálova",
"Hamuliakova",
"Hanácka",
"Handlovská",
"Hanulova",
"Hanulova",
"Hany Meličkovej",
"Hargašova",
"Harmanecká",
"Harmincova",
"Hasičská",
"Hattalova",
"Havelkova",
"Havlíčkova",
"Havrania",
"Haydnova",
"Hečkova",
"Herlianska",
"Herlianska",
"Heydukova",
"Heyrovského",
"Hlaváčikova",
"Hlavatého",
"Hlavná",
"Hlavné námestie",
"Hlbinná",
"Hlboká cesta",
"Hlboká cesta",
"Hlinická",
"Hlinická",
"Hlivová",
"Hlohová",
"Hlučínska",
"Hnilecká",
"Hodálova",
"Hodonínska",
"Hodonínska",
"Hodonínska",
"Hodžovo námestie",
"Holekova",
"Holíčska",
"Hollého",
"Holubyho",
"Homolova",
"Hontianska",
"Horárska",
"Horcová",
"Horčičná",
"Horná",
"Horná Vančurová",
"Hornádska",
"Horné Židiny",
"Horská",
"Horská",
"Horská",
"Hospodárska",
"Hrabový chodník",
"Hrad",
"Hradištná",
"Hradná",
"Hradné údolie",
"Hradská",
"Hrachová",
"Hraničiarska",
"Hraničná",
"Hraničný priechod-Ču",
"Hrdličkova",
"Hrebendova",
"Hríbová",
"Hriňovská",
"Hrobákova",
"Hrobárska",
"Hroboňova",
"Hronska",
"Hroznová",
"Hrušková",
"Hrušovská",
"Hubeného",
"Hubeného",
"Hudecova",
"Humenské námestie",
"Hummelova",
"Hurbanovo námestie",
"Hurbanovo námestie",
"Husova",
"Húščavova",
"Hutnícka",
"Hviezdna",
"Hviezdicová",
"Hviezdoslavova",
"Hviezdoslavovo námes",
"Hyacintová",
"Hybešova",
"Hydinárska",
"Hýrošova",
"Chalupkova",
"Charkovská",
"Chemická",
"Chladná",
"Chlumeckého",
"Chmeľová",
"Chorvátska",
"Chorvátska",
"Chotárna",
"Chrasťová",
"Chrenová",
"Chrobákova",
"Ihličnatá",
"Ihrisková",
"Iľjušinova",
"Ilkovičova",
"Ílová",
"Ilýrska",
"Imelová",
"Inovecká",
"Inovecká",
"Ipeľská",
"Irisová",
"Irkutská",
"Iršajská",
"Iskerníková",
"Istrijská",
"Ivana Blazeviča",
"Ivana Bukovčana",
"Ivana Horvátha",
"Ivánska cesta",
"J.C.Hronského",
"Jabloňová",
"Jačmenná",
"Jadranská",
"Jadrová",
"Jahodová",
"Jakabova",
"Jakubíkova",
"Jakubovo námestie",
"Jakubská",
"Jalovcová",
"Jamnického",
"Jána Jonáša",
"Jána Poničana",
"Jána Raka",
"Jána Smreka",
"Jána Stanislava",
"Janáčkova",
"Jančova",
"Janíkove role",
"Janka Kráľa",
"Jankolova",
"Jánošíkova",
"Jánoškova",
"Janotova",
"Janšákova",
"Jantárová",
"Jantárová",
"Jantárová cesta",
"Jarabinková",
"Jarná",
"Jaroslavova",
"Jarošova",
"Jasencová",
"Jaseňová",
"Jaskový rad",
"Jasná",
"Jasovská",
"Jastrabia",
"Jašíkova",
"Javorinská",
"Javorová",
"Jazdecká",
"Jazerná",
"Jazmínová",
"Jedenásta",
"Jedlíkova",
"Jedľová",
"Jégého",
"Jegeneš",
"Jelačičova",
"Jelenia",
"Jelšová",
"Jeséniova",
"Jesenná",
"Jesenského",
"Jesienková",
"Jiráskova",
"Jiskrova",
"Jókaiho",
"Jozefa Mikisitsa",
"Jozefa Vachovského",
"Jozefská",
"Júlová",
"Junácka",
"Jungmannova",
"Júnová",
"Jurigovo námestie",
"Jurkovičova",
"Jurovského",
"Jurská",
"Justičná",
"K horárskej studni",
"K lomu",
"K pasienkom",
"K Železnej studienke",
"Kadnárova",
"Kadnárova",
"Kadnárova",
"Kadnárova",
"Kadnárova",
"Kafendova",
"Kalinčiakova",
"Kalinová",
"Kalištná",
"Kaméliová",
"Kamenárska",
"Kamenné námestie",
"Kamilková",
"Kamilková",
"Kamzík",
"Kapicova",
"Kapitulská",
"Kapitulský dvor",
"Kaplinská",
"Kapucínska",
"Kapušianska",
"Karadžičova",
"Karadžičova",
"Karadžičova",
"Karadžičova",
"Karloveská",
"Karloveské rameno",
"Karpatská",
"Karpatské námestie",
"Kašmírska",
"Kaštielska",
"Kataríny Brúderovej",
"Kaukazská",
"Kazanská",
"Kazanská",
"Kazanská",
"Keltská",
"Kempelenova",
"Ketelec",
"Kežmarské námestie",
"Kladnianska",
"Klariská",
"Klásková",
"Kláštorská",
"Klatovská",
"Klatovská",
"Klemensova",
"Klenová",
"Klimkovičova",
"Klincová",
"Klobučnícka",
"Klokočova",
"Kľukatá",
"Kĺzavá",
"Kmeťovo námestie",
"Knižková dolina",
"Koceľova",
"Kočánkova",
"Kohútova",
"Koľajná",
"Kolárska",
"Kolískova",
"Kollárova",
"Kollárovo námestie",
"Kollárovo námestie",
"Kolmá",
"Komárňanská",
"Komárnická",
"Komárnická",
"Komárovská",
"Komenského námestie",
"Kominárska",
"Komonicová",
"Koncová",
"Koniarkova",
"Konopná",
"Konvalinková",
"Konventná",
"Kopanice",
"Kopčianska",
"Koperníkova",
"Koprivnická",
"Koprivnická",
"Koprivnická",
"Korabinského",
"Kórejská",
"Koreničova",
"Koreňová",
"Korunská",
"Korytnická",
"Kosatcová",
"Kosodrevinová",
"Kostlivého",
"Kostolná",
"Košická",
"Košická",
"Košická",
"Kovácsova",
"Kováčska",
"Kovorobotnícka",
"Kovová",
"Kozia",
"Koziarka",
"Kozičova",
"Kozmonautická",
"Kožušnícka",
"Kôprová",
"Kôstková",
"Krahulčia",
"Krajinská",
"Krajinská cesta",
"Krajná",
"Krakovská",
"Kráľovské údolie",
"Krasinského",
"Kraskova",
"Krásna",
"Krásnohorská",
"Krasovského",
"Kratiny",
"Krátka",
"Krčméryho",
"Kremeľská",
"Kremencová",
"Kremnická",
"Kresánkova",
"Kríková",
"Krivá",
"Križkova",
"Krížna",
"Krížna",
"Krížna",
"Krížna",
"Krmanova",
"Krokusová",
"Krompašská",
"Krupinská",
"Kubačova",
"Kubániho",
"Kubínska",
"Kudlákova",
"Kuklovská",
"Kúkoľová",
"Kukučínova",
"Kukuričná",
"Kulíškova",
"Kultúrna",
"Kuneradská",
"Kupeckého",
"Kúpeľná",
"Kurucova",
"Kutlíkova",
"Kútska",
"Kutuzovova",
"Kuzmányho",
"Kvačalova",
"Kvetinárska",
"Kvetná",
"Kýčerského",
"Kyjevská",
"Kysucká",
"Laborecká",
"Lackova",
"Ladislava Batthyányh",
"Ladislava Dérera",
"Ladislava Sáru",
"Ľadová",
"Ladzianskeho",
"Lachova",
"Ľaliová",
"Lamačská cesta",
"Lamačská cesta",
"Lamačská cesta",
"Lamanského",
"Landauova",
"Landererova",
"Langsfeldova",
"Ľanová",
"Laskomerského",
"Laténská",
"Latorická",
"Laučekova",
"Laurinská",
"Lazaretská",
"Lazaretská",
"Leánska",
"Lediny",
"Legerského",
"Legionárska",
"Legionárska",
"Lehotského",
"Lehotského",
"Leknová",
"Lenardova",
"Lermontovova",
"Lesná",
"Lesnícka",
"Leškova",
"Letecká",
"Letisko M.R.Štefánik",
"Letná",
"Levanduľová",
"Levárska",
"Levická",
"Levočská",
"Lidická",
"Lieskovec",
"Lieskovcová",
"Lieskovská cesta",
"Lietavská",
"Lichardova",
"Likavská",
"Limbová",
"Linzbothova",
"Lipnicová",
"Lipová",
"Lipského",
"Liptovská",
"Lisovňa",
"Listová",
"Líščie nivy",
"Líščie údolie",
"Litovská",
"Lodná",
"Lombardiniho",
"Lomnická",
"Lomonosovova",
"Longobardská",
"Lónyaiová",
"Lopenícka",
"Lotyšská",
"Lovinského",
"Lozornianská",
"Ľubietovská",
"Ľubinská",
"Ľubľanská",
"Ľubochnianska",
"Ľubovnianska",
"Ľubovníková",
"Ľudové námestie",
"Ľudovíta Fullu",
"Luhačovická",
"Lužická",
"Lúčna",
"Lužná",
"Lýcejná",
"Lykovcová",
"Lysákova",
"M. Hella",
"Madáchova",
"Maďarská",
"Magnetová",
"Magnezitová",
"Magnóliová",
"Magurská",
"Macharova",
"Máchova",
"Majakovského",
"Majerníkova",
"Majerská",
"Májkova",
"Majoránová",
"Májová",
"Maková",
"Makovického",
"Malá",
"Malagová",
"Malé pálenisko",
"Malinová",
"Malodunajská",
"Malokarpatské námest",
"Malý Draždiak",
"Malý trh",
"Mamateyova",
"Mamateyova",
"Mandľová",
"Mandľovníková",
"Mánesovo námestie",
"Margarétková",
"Marhuľová",
"Mariánska",
"Marie Curie-Sklodows",
"Márie Medveďovej",
"Markova",
"Marótyho",
"Martákovej",
"Martinčekova",
"Martinčekova",
"Martinengova",
"Martinská",
"Mateja Bela",
"Matejkova",
"Matičná",
"Mätová",
"Matúškova",
"Matúšova",
"Mečíkova",
"Medená",
"Medová",
"Medovková",
"Medzierka",
"Medzilaborecká",
"Mesačná",
"Mestská",
"Meteorová",
"Metodova",
"Mickiewiczova",
"Mierová",
"Michalská",
"Mikovíniho",
"Mikulášska",
"Milana Marečka",
"Milana Pišúta",
"Miletičova",
"Miletičova",
"Mišíkova",
"Mišíkova",
"Mišíkova",
"Mládežnícka",
"Mliekárenská",
"Mlynarovičova",
"Mlynská",
"Mlynská dolina",
"Mlynská dolina",
"Mlynská dolina",
"Mlynské luhy",
"Mlynské nivy",
"Mlynské nivy",
"Mlynské nivy",
"Mlynské nivy",
"Mlynské nivy",
"Modranská",
"Modricová",
"Modrý chodník",
"Mojmírova",
"Mokráň záhon",
"Mokrohájska cesta",
"Moldavská",
"Molecova",
"Monardová",
"Morava",
"Moravská",
"Morušova",
"Moskovská",
"Most SNP",
"Mostná",
"Mostová",
"Mošovského",
"Motýlia",
"Moyšova",
"Moyzesova",
"Mozartova",
"Mramorová",
"Mraziarenská",
"Mrázova",
"Mudrochova",
"Mudroňova",
"Mudroňova",
"Mudroňova",
"Muchovo námestie",
"Muránska",
"Murgašova",
"Murnice",
"Muškátová",
"Muštová",
"Múzejná",
"Myjavská",
"Mýtna",
"Mýtna",
"Na Baránku",
"Na barine",
"Na Brezinách",
"Na doline",
"Na grbe",
"Na Grunte",
"Na Holom vrchu",
"Na hrádzi",
"Na Hrebienku",
"Na hriadkach",
"Na Kalvárii",
"Na kaštieli",
"Na kopci",
"Na križovatkách",
"Na lánoch",
"Na medzi",
"Na mýte",
"Na pántoch",
"Na pasekách",
"Na paši",
"Na pažiti",
"Na piesku",
"Na Revíne",
"Na Riviére",
"Na rozhliadke",
"Na Sitine",
"Na skale",
"Na Slanci",
"Na Slavíne",
"Na spojke",
"Na stráni",
"Na Štyridsiatku",
"Na úvrati",
"Na varte",
"Na Vlkovkách",
"Na vrátkach",
"Na vŕšku",
"Na vyhliadke",
"Na výslní",
"Na Zlatej nohe",
"Nábělkova",
"Nábrežie arm. gen. L",
"Nábrežná",
"Nad Dunajom",
"Nad Gronárom",
"Nad jazierkom",
"Nad kúriou",
"Nad lomom",
"Nad lúčkami",
"Nad lúčkami",
"Nad ostrovom",
"Nad Sihoťou",
"Nákovná",
"Nákupná",
"Námestie 1. mája",
"Námestie 6. apríla",
"Námestie Alexandra D",
"Námestie Andreja Hli",
"Námestie Biely kríž",
"Námestie Hraničiarov",
"Námestie Jána Kostru",
"Námestie Jána Pavla",
"Námestie Ľudovíta Št",
"Námestie Martina Ben",
"Námestie Rodiny",
"Námestie slobody",
"Námestie slobody",
"Námestie SNP",
"Námestie SNP",
"Námestie sv. Františ",
"Námestie sv. Petra a",
"Narcisová",
"Nedbalova",
"Nechtíková",
"Nejedlého",
"Nekrasovova",
"Nemčíkova",
"Nerudova",
"Nevädzová",
"Nevská",
"Nezábudková",
"Nezvalova",
"Niťová",
"Nitrianska",
"Nížinná",
"Nobelova",
"Nobelovo námestie",
"Nová",
"Nová Bellova",
"Nová hora",
"Novackého",
"Nové pálenisko",
"Nové záhrady I",
"Nové záhrady II",
"Nové záhrady III",
"Nové záhrady IV",
"Nové záhrady V",
"Nové záhrady VI",
"Nové záhrady VII",
"Novinárska",
"Novobanská",
"Novodvorská",
"Novohorská",
"Novohradská",
"Novosadná",
"Novosvetská",
"Novosvetská",
"Novosvetská",
"Novoveská",
"Nový záhon",
"Obežná",
"Obchodná",
"Oblačná",
"Oblúková",
"Očovská",
"Odbojárov",
"Odborárska",
"Odborárske námestie",
"Odborárske námestie",
"Odeská",
"Ohnicová",
"Okánikova",
"Okružná",
"Olbrachtova",
"Oleandrová",
"Olejkárska",
"Olivová",
"Olšová",
"Ondavská",
"Ondrejovova",
"Ondrejská",
"Opavská",
"Opletalova",
"Oráčska",
"Oravská",
"Orechová",
"Orechová cesta",
"Orechový rad",
"Orenburská",
"Orgovánová",
"Orchideová",
"Oriešková",
"Ormisova",
"Osadná",
"Osiková",
"Oskorušová",
"Osloboditeľská",
"Ostravská",
"Ostredková",
"Ostružinová",
"Osuského",
"Osvetová",
"Otonelská",
"Ovčiarska",
"Ovocná",
"Ovručská",
"Ovsená",
"Ovsištské námestie",
"Ožvoldíkova",
"Ôsma",
"Pajštúnska",
"Palackého",
"Palárikova",
"Palárikova",
"Palinová",
"Palisády",
"Palisády",
"Palisády",
"Palkovičova",
"Palmová",
"Panenská",
"Pankúchova",
"Panónska cesta",
"Panská",
"Papánkovo námestie",
"Papraďová",
"Parcelná",
"Páričkova",
"Parková",
"Partizánska",
"Pasienková",
"Pasienky",
"Pastierska",
"Paulínyho",
"Pave Vukoviča",
"Pavla Blaha",
"Pavla Horova",
"Pavlovičova",
"Pavlovova",
"Pavlovská",
"Pažického",
"Pažítková",
"Pečnianska",
"Pekná cesta",
"Pekná cesta",
"Pekná cesta",
"Pekná vyhliadka",
"Pekníkova",
"Pernecká",
"Perličková",
"Pestovateľská",
"Petara Pasicha",
"Peterská",
"Petöfiho",
"Petržalská",
"Petúniová",
"Pezinská",
"Piata",
"Pieskovcová",
"Piesočná",
"Piešťanská",
"Pifflova",
"Pilárikova",
"Pílová",
"Píniová",
"Pionierska",
"Pionierska",
"Pivoňková",
"Plachého",
"Plachého",
"Planckova",
"Planét",
"Plánky",
"Platanová",
"Plátenícka",
"Plavecká",
"Plickova",
"Pluhová",
"Plynárenská",
"Plzenská",
"Pobrežná",
"Pod agátmi",
"Pod Bôrikom",
"Pod brehmi",
"Pod gaštanmi",
"Pod Kalváriou",
"Pod Klepáčom",
"Pod Kobylou",
"Pod Krásnou hôrkou",
"Pod lesom",
"Pod lipami",
"Pod Lipovým",
"Pod násypom",
"Pod Rovnicami",
"Pod skalou",
"Pod srdcom",
"Pod Strážami",
"Pod Vachmajstrom",
"Pod Válkom",
"Pod vinicami",
"Pod záhradami",
"Pod záhradami",
"Pod Zečákom",
"Podbeľová",
"Podbrezovská",
"Podháj",
"Podhorská",
"Podhorského",
"Podjavorinskej",
"Podkarpatská",
"Podkerepušky",
"Podkolibská",
"Podkorunská",
"Podlesná",
"Podlučinského",
"Podniková",
"Podpriehradná",
"Podtatranského",
"Podunajská",
"Podunajská",
"Podzáhradná",
"Pohánková",
"Pohraničníkov",
"Pohronská",
"Polárna",
"Polianky",
"Poľná",
"Poľnohospodárska",
"Poľný mlyn",
"Poloreckého",
"Poľská",
"Poludníková",
"Poniklecová",
"Popolná",
"Popovova",
"Popradská",
"Porubského",
"Poštová",
"Potočná",
"Považanova",
"Považská",
"Povoznícka",
"Povraznícka",
"Povraznícka",
"Požiarnická",
"Pračanská",
"Prasličková",
"Pražská",
"Pražská",
"Predstaničné námesti",
"Prepoštská",
"Prešernova",
"Prešovská",
"Prešovská",
"Prešovská",
"Pri Bielom kríži",
"Pri dvore",
"Pri Dynamitke",
"Pri Habánskom mlyne",
"Pri hradnej studni",
"Pri hrádzi",
"Pri kolíske",
"Pri kríži",
"Pri mlyne",
"Pri Rochu",
"Pri seči",
"Pri Starej Prachárni",
"Pri Starom háji",
"Pri starom letisku",
"Pri Starom Mýte",
"Pri strelnici",
"Pri Struhe",
"Pri Suchom mlyne",
"Pri Šajbách",
"Pri tehelni",
"Pri trati",
"Pri vinohradoch",
"Pri zvonici",
"Priama cesta",
"Pribylinská",
"Pribinova",
"Pribinova",
"Pribinova",
"Pribišova",
"Prídanky",
"Prídavková",
"Priečna",
"Priehradná",
"Priekopnícka",
"Priekopy",
"Priemyselná",
"Priemyselná",
"Prievozská",
"Prievozská",
"Prievozská",
"Príjazdná",
"Príkopova",
"Primaciálne námestie",
"Prímoravská",
"Prípojná",
"Prístav",
"Prístavná",
"Prokofievova",
"Prokopa Veľkého",
"Prokopova",
"Prúdová",
"Prvá",
"Prvosienková",
"Pšeničná",
"Púchovská",
"Púpavová",
"Pustá",
"Puškinova",
"Pútnická",
"Pyrenejská",
"Rácova",
"Račianska",
"Račianska",
"Račianska",
"Račianska",
"Račianska",
"Račianska",
"Račianske mýto",
"Radarová",
"Rádiová",
"Radlinského",
"Radničná",
"Radničné námestie",
"Radvanská",
"Rajčianska",
"Rajecká",
"Rajská",
"Rajtákova",
"Raketová",
"Rákosová",
"Rascová",
"Rascová",
"Rastislavova",
"Rastlinná",
"Rašelinová",
"Ráztočná",
"Rázusovo nábrežie",
"Ražná",
"Rebarborová",
"Regrútska",
"Remeselnícka",
"Repašského",
"Repíková",
"Repná",
"Rešetkova",
"Revolučná",
"Révová",
"Revúcka",
"Rezedová",
"Riazanská",
"Riazanská",
"Ribayová",
"Ríbezľová",
"Riečna",
"Rigeleho",
"Rímska",
"Rízlingová",
"Riznerova",
"Robotnícka",
"Roľnícka",
"Romanova",
"Röntgenova",
"Rosná",
"Rostovská",
"Rošického",
"Rovná",
"Rovniankova",
"Rovníková",
"Royova",
"Rozálska",
"Rozmarínová",
"Rozvodná",
"Rožňavská",
"Rožňavská",
"Rožňavská",
"Rubínová",
"Rubinsteinova",
"Rudnayovo námestie",
"Rudnícka",
"Rulandská",
"Rumančeková",
"Rumunská",
"Rusovce",
"Rusovská cesta",
"Rustaveliho",
"Ružičková",
"Ružinovská",
"Ružinovská",
"Ružinovská",
"Ružomberská",
"Ružová dolina",
"Ružová dolina",
"Rybárska brána",
"Rybné námestie",
"Rybničná",
"Rybničná",
"Rybničná",
"Rýdziková",
"Rytierska",
"Sabinovská",
"Sabinovská",
"Sad Janka Kráľa",
"Sadmelijská",
"Sadová",
"Samova",
"Saratovská",
"Sartorisova",
"Sasanková",
"Sasinkova",
"Savignonská",
"Seberíniho",
"Sečovská",
"Sedlárska",
"Sedmokrásková",
"Segnáre",
"Segnerova",
"Sekulská",
"Sekurisova",
"Sekýľska",
"Semenárska",
"Semianova",
"Semilonská",
"Senická",
"Senná",
"Septimiova",
"Schengenská",
"Schillerova",
"Schneidera -Trnavské",
"Schody pri starej vo",
"Sibírska",
"Siedma",
"Sienkiewiczova",
"Silvánska",
"Sinokvetná",
"Skalická cesta",
"Skalná",
"Skerličova",
"Sklabinská",
"Sklenárova",
"Sklenárska",
"Skoroceľová",
"Skuteckého",
"Skýcovská",
"Sládkovičova",
"Sladová",
"Slatinská",
"Slávičie údolie",
"Slavín",
"Slepá",
"Sliačska",
"Sliezska",
"Slivková",
"Sĺňavská",
"Slnečná",
"Slnečnicová",
"Slovanské nábrežie",
"Slovienska",
"Slovinec",
"Slovinská",
"Slovnaftská",
"Slovnaftská",
"Slowackého",
"Smetanova",
"Smikova",
"Smolenická",
"Smolnícka",
"Smrečianska",
"Smrečianska",
"Snežienková",
"Soferove schody",
"Socháňova",
"Sochorova",
"Sokolíkova",
"Sokolská",
"Solivarská",
"Sološnická",
"Somolického",
"Somolického",
"Sosnová",
"Sovia",
"Spádová",
"Spätná cesta",
"Spišská",
"Spojná",
"Spoločenská",
"Sputniková",
"Sreznevského",
"Srnčia",
"Stachanovská",
"Stálicová",
"Stanekova",
"Staničná",
"Stará Černicová",
"Stará Ivánska cesta",
"Stará Klenová",
"Stará Prievozská",
"Stará Stupavská",
"Stará Vajnorská",
"Stará vinárska",
"Staré Grunty",
"Staré ihrisko",
"Staré záhrady",
"Starhradská",
"Starohájska",
"Staromestská",
"Staromlynská",
"Starorímska",
"Staroturský chodník",
"Stavbárska",
"Staviteľská",
"Stepná cesta",
"Stodolova",
"Stoklasová",
"Stolárska",
"Strakova",
"Stratená",
"Strážna",
"Strážnická",
"Strážny dom",
"Strečnianska",
"Stredná",
"Strelecká",
"Strelkova",
"Strmá cesta",
"Strmé sady",
"Strmý bok",
"Strmý vŕšok",
"Strojnícka",
"Stromová",
"Stropkovská",
"Struková",
"Studená",
"Studenohorská",
"Stuhová",
"Stupavská",
"Súbežná",
"Sudová",
"Súhvezdná",
"Suchá",
"Suché mýto",
"Suchohradská",
"Súkennícka",
"Súľovská",
"Sumbalova",
"Súmračná",
"Súťažná",
"Svätého Vincenta",
"Svätoplukova",
"Svätoplukova",
"Svätovojtešská",
"Svébska",
"Svetlá",
"Svíbová",
"Svidnícka",
"Svoradova",
"Svrčia",
"Syslia",
"Šafárikovo námestie",
"Šafárikovo námestie",
"Šafránová",
"Šagátova",
"Šachorová",
"Šalátová",
"Šaldova",
"Šalviová",
"Šamorínska",
"Šancová",
"Šancová",
"Šancová",
"Šancová",
"Šándorova",
"Šarišská",
"Šášovská",
"Šaštínska",
"Ševčenkova",
"Šiesta",
"Šikmá",
"Šinkovské",
"Šintavská",
"Šípková",
"Šípová",
"Šíravská",
"Široká",
"Škarniclova",
"Školská",
"Škovránčia",
"Škultétyho",
"Šoltésovej",
"Šošovicová",
"Špieszova",
"Špitálska",
"Športová",
"Šrobárovo námestie",
"Šťastná",
"Štedrá",
"Štefana Králika",
"Štefana Králika",
"Štefana Majera",
"Štefánikova",
"Štefánikova",
"Štefánikova",
"Štefanovičova",
"Štefunkova",
"Štepná",
"Štetinova",
"Štiavnická",
"Štítová",
"Štrbská",
"Štúrova",
"Štvrtá",
"Štyndlova",
"Šulekova",
"Šulekova",
"Šulekova",
"Šumavská",
"Šuňavcova",
"Šúrska",
"Šustekova",
"Šuty",
"Švabinského",
"Švantnerova",
"Tabaková",
"Tablicova",
"Táborská",
"Tajovského",
"Talichova",
"Tallerova",
"Tatranská",
"Tavaríkova osada",
"Tbiliská",
"Tehelná",
"Tehelňa",
"Tehliarska",
"Technická",
"Tekovská",
"Tekvicová",
"Telocvičná",
"Tematínska",
"Teplická",
"Terchovská",
"Teslova",
"Tešedíkova",
"Tetmayerova",
"Thurzova",
"Tibenského",
"Tibériová",
"Tichá",
"Tilgnerova",
"Timravina",
"Tobrucká",
"Tokajícka",
"Tolstého",
"Tománkova",
"Tomanova",
"Tomášikova",
"Tomášikova",
"Tomášikova",
"Tomášikova",
"Tomášikova",
"Toplianska",
"Topoľčianska",
"Topoľová",
"Toryská",
"Továrenská",
"Trajánova",
"Tramínová",
"Tranovského",
"Trávna",
"Trebišovská",
"Trebišovská",
"Trebišovská",
"Trenčianska",
"Treskoňova",
"Tretia",
"Trhová",
"Trinásta",
"Trnavská cesta",
"Trnavská cesta",
"Trnavská cesta",
"Trnavská cesta",
"Trnavská cesta",
"Trnavské mýto",
"Trnková",
"Tŕňová",
"Trojdomy",
"Trojičné námestie",
"Trstínska",
"Tučkova",
"Tuhovská",
"Tulipánová",
"Tupého",
"Tupolevova",
"Turbínova",
"Turčianska",
"Turistická",
"Turnianska",
"Tvarožkova",
"Tylova",
"Tymiánová",
"Tyršovo nábrežie",
"Učiteľská",
"Údernícka",
"Údolná",
"Uhliská",
"Uhorková",
"Uhrova",
"Uhrovecká",
"Ukrajinská",
"Ulica 1. mája",
"Ulica 29. augusta",
"Ulica 29. augusta",
"Ulica 29. augusta",
"Ulica 29. augusta",
"Ulica 8. mája",
"Ulica Alviano",
"Ulica Imricha Karvaš",
"Ulica J. Valašťana D",
"Ulica Janka Alexyho",
"Ulica Jozefa Krónera",
"Ulica Juraja Hronca",
"Ulica Karola Adlera",
"Ulica kpt. Rašu",
"Ulica Leopoldov maje",
"Ulica Ľuda Zúbka",
"Ulica Nad Válkom",
"Ulica padlých hrdino",
"Ulica Pri gaštanovej",
"Ulica Pri pastierni",
"Ulica Pri Vápeníckom",
"Ulica Pri vodnej nád",
"Ulica svornosti",
"Ulica Viktora Tegelh",
"Úprkova",
"Úradnícka",
"Uránová",
"Urbánkova",
"Urbárska",
"Ursínyho",
"Uršulínska",
"Ušiakova",
"Úvozná",
"Uzbecká",
"Úzka",
"Úžiny",
"V záhradách",
"Vajanského nábrežie",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Vajnorská",
"Valachovej",
"Valašská",
"Valchárska",
"Vančurova",
"Vansovej",
"Vápencová",
"Vápenka",
"Vápenná",
"Varínska",
"Varšavská",
"Varšavská",
"Vavilovova",
"Vavrinecká",
"Vavrínova",
"Vazovova",
"Vážska",
"Včelárska",
"Velehradská",
"Veľké Štepnice",
"Veltlínska",
"Vendelínska",
"Ventúrska",
"Veterná",
"Veternicová",
"Vetvárska",
"Vetvová",
"Vidlicová",
"Viedenská cesta",
"Viedenská cesta",
"Viedenská cesta",
"Vietnamská",
"Vígľašská",
"Vihorlatská",
"Viktorínova",
"Vilová",
"Viničná",
"Vínna",
"Vinohradnícka",
"Višňová",
"Víťazná",
"Vlárska",
"Vlastenecké námestie",
"Vlčie hrdlo",
"Vlčkova",
"Vlčkova",
"Vlčkova",
"Vodné elektrárne",
"Vodný vrch",
"Vosková",
"Votrubova",
"Vrábeľská",
"Vrakunská",
"Vrakunská cesta",
"Vrakunská cesta",
"Vrančovičova",
"Vranovská",
"Vrbánska",
"Vrbenského",
"Vŕbová",
"Vresová",
"Vretenová",
"Vrchná",
"Vrútocká",
"Vtáčikova",
"Vtáčnik",
"Vyhliadka",
"Vyhnianska cesta",
"Výhonská",
"Východná",
"Vysoká",
"Vysokohorská",
"Vyšehradská",
"Vyšná",
"Výtvarná",
"Vývojová",
"Wattova",
"Wilsonova",
"Wolkrova",
"Za bránou",
"Za farou",
"Za Kasárňou",
"Za mlynom",
"Za sokolovňou",
"Za Stanicou",
"Za tehelňou",
"Záborského",
"Zadunajská cesta",
"Záhorácka",
"Záhorská",
"Záhradkárska",
"Záhradná",
"Záhradnícka",
"Záhradnícka",
"Záhradnícka",
"Záhradnícka",
"Záhrady",
"Záhrebská",
"Záhrebská",
"Záhumenná",
"Záhumenská",
"Zákutie",
"Zálužická",
"Zámocká",
"Zámocké schody",
"Zámočnícka",
"Západná",
"Západný rad",
"Záporožská",
"Záruby",
"Zátišie",
"Zátureckého",
"Zavadilová",
"Závadská",
"Záveterná",
"Závodná",
"Závodníkova",
"Zbrody",
"Zdravotnícka",
"Zelená",
"Zeleninová",
"Zelenohorská",
"Zelinárska",
"Zhorínska",
"Zidiny",
"Zimná",
"Zlatá",
"Zlaté piesky",
"Zlaté schody",
"Zlatohorská",
"Znievska",
"Zohorská",
"Zochova",
"Zrinského",
"Zvolenská",
"Zvončeková",
"Žabí majer",
"Žabotova",
"Žarnovická",
"Žatevná",
"Žehrianska",
"Železná",
"Železničiarska",
"Železničná",
"Želiarska",
"Žellova",
"Žiacka",
"Žiarska",
"Židovská",
"Žihľavová",
"Žilinská",
"Žilinská",
"Žitavská",
"Žitná",
"Živnostenská",
"Žižkova",
"Žulová",
"Župné námestie",
"Borágova",
"Parenicová",
"Loparová",
"Jegnešská",
"Jonatanová",
"Monardová",
"Perličková",
)
states = (
"Bratislavský kraj",
"Trnavský kraj",
"Trenčiansky kraj",
"Nitriansky kraj",
"Žilinský kraj",
"Banskobystrický kraj",
"Prešovský kraj",
"Košický kraj",
)
countries = (
"Afganistan",
"Afghanistanská islamská republika",
"Ålandy",
"Albánsko",
"Albánska republika",
"Alžírsko",
"Alžírska demokratická ľudová republika",
"Americká Samoa",
"Andorra",
"Andorrské kniežatstvo",
"Angola",
"Angolská republika",
"Anguilla",
"Antarktída",
"Antigua a Barbuda",
"Argentína",
"Argentínska republika",
"Arménsko",
"Arménska republika",
"Aruba",
"Austrália",
"Rakúsko",
"Rakúska republika",
"Azerbajdžan",
"Azerbajdžanská republika",
"Bahamy",
"Bahamské spoločenstvo",
"Bahrajn",
"Bahrajnské kráľovstvo",
"Bangladéš",
"Bangladéšska ľudová republika",
"Barbados",
"Bielorusko",
"Bieloruská republika",
"Belgicko",
"Belgické kráľovstvo",
"Belize",
"Benin",
"Beninská republika",
"Bermudy",
"Bhután",
"Bhutánske kráľovstvo",
"Bolívijská republika",
"Bolívijská republika",
"Bolívia",
"Bosna a Hercegovina",
"Republika Bosny a Hercegoviny",
"Botswana",
"Botswanská republika",
"Bouvetov ostrov",
"Brazília",
"Brazílska federatívna republika",
"Britské indickooceánske územie",
"Brunejsko-darussalamský štát",
"Bulharsko",
"Bulharská republika",
"Burkina Faso",
"Burundi",
"Burundská republika",
"Kambodža",
"Kambodžské kráľovstvo",
"Kamerun",
"Kamerunská republika",
"Kanada",
"Kapverdy",
"Kapverdská republika",
"Kajmanie ostrovy",
"Stredoafrická republika",
"Čad",
"Čadská republika",
"Čile",
"Čilská republika",
"Čína",
"Čínska ľudová republika",
"Vianočný ostrov",
"Kokosové ostrovy",
"Kolumbia",
"Kolumbijská republika",
"Komory",
"Komorský zväz",
"Kongo",
"Konžská republika",
"Konžská demokratická republika",
"Cookove ostrovy",
"Kostarika",
"Kostarická republika",
"Pobrežie Slonoviny",
"Republika Pobrežia Slonoviny",
"Chorvátsko",
"Chorvátska republika",
"Kuba",
"Kubánska republika",
"Cyprus",
"Cyperská republika",
"Česká republika",
"Dánsko",
"Dánske kráľovstvo",
"Džibutsko",
"Džibutská republika",
"Dominika",
"Dominické spoločenstvo",
"Dominikánska republika",
"Ekvádor",
"Ekvádorská republika",
"Egypt",
"Egyptská arabská republika",
"Salvádor",
"Salvádorská republika",
"Rovníková Guinea",
"Republika Rovníkovej Guiney",
"Eritrea",
"Estónsko",
"Estónska republika",
"Etiópia",
"Etiópska federatívna demokratická republika",
"Falklandy (Malvíny)",
"Faerské ostrovy",
"Fidži",
"Fínsko",
"Fínska republika",
"Francúzsko",
"Francúzska republika",
"Francúzska Guyana",
"Francúzska Polynézia",
"Francúzske južné a antarktické územia",
"Gabon",
"Gabonská republika",
"Gambia",
"Gambijská republika",
"Gruzínsko",
"Nemecko",
"Nemecká spolková republika",
"Ghana",
"Ghanská republika",
"Gibraltár",
"Grécko",
"Grécka republika",
"Grónsko",
"Grenada",
"Guadeloupe",
"Guam",
"Guatemala",
"Guatemalská republika",
"Guernsey",
"Guinea",
"Guinejská republika",
"Guinea-Bissau",
"Guinejsko-bissauská republika",
"Guyana",
"Guyanská kooperatívna republika",
"Haiti",
"Haitská republika",
"Heardov ostrov",
"Svätá stolica (Vatikánsky mestský štát)",
"Honduras",
"Honduraská republika",
"Hongkong",
"Osobitná administratívna oblasť Číny Hongkong",
"Maďarsko",
"Maďarská republika",
"Island",
"Islandská republika",
"India",
"Indická republika",
"Indonézia",
"Indonézska republika",
"Iránska islamská republika",
"Iránska islamská republika",
"Irak",
"Iracká republika",
"Írsko",
"Man",
"Izrael",
"Izraelský štát",
"Taliansko",
"Talianska republika",
"Jamajka",
"Japonsko",
"Jersey",
"Jordánsko",
"Jordánske hášimovské kráľovstvo",
"Kazachstan",
"Kazašská republika",
"Keňa",
"Kenská republika",
"Kiribati",
"Kiribatská republika",
"Kórejská ľudovodemokratická republika",
"Kórejská ľudovodemokratická republika",
"Kórejská republika",
"Kuvajt",
"Kuvajtský štát",
"Kirgizsko",
"Kirgizská republika",
"Laoská ľudovodemokratická republika",
"Lotyšsko",
"Lotyšská republika",
"Libanon",
"Libanonská republika",
"Lesotho",
"Lesothské kráľovstvo",
"Libéria",
"Libérijská republika",
"Líbya",
"Lichtenštajnsko",
"Lichtenštajnské kniežatstvo",
"Litva",
"Litovská republika",
"Luxembursko",
"Luxemburské veľkovojvodstvo",
"Macao",
"Osobitná administratívna oblasť Číny Macao",
"Madagaskar",
"Madagaskarská republika",
"Malawi",
"Malawijská republika",
"Malajzia",
"Maldivy",
"Maldivská republika",
"Mali",
"Malijská republika",
"Malta",
"Maltská republika",
"Marshallove ostrovy",
"Republika Marshallových ostrovov",
"Martinik",
"Mauritánia",
"Mauritánska islamská republika",
"Maurícius",
"Maurícijská republika",
"Mayotte",
"Mexiko",
"Spojené štáty mexické",
"Mikronézske federatívne štáty",
"Mikronézske federatívne štáty",
"Moldavská republika",
"Moldavská republika",
"Moldavsko",
"Monako",
"Monacké kniežatstvo",
"Mongolsko",
"Čierna Hora",
"Montserrat",
"Maroko",
"Marocké kráľovstvo",
"Mozambik",
"Mozambická republika",
"Mjanmarsko",
"Namíbia",
"Namíbijská republika",
"Nauru",
"Nauruská republika",
"Nepál",
"Nepálska federatívna demokratická republika",
"Holandsko",
"Holandské kráľovstvo",
"Nová Kaledónia",
"Nový Zéland",
"Nikaragua",
"Nikaragujská republika",
"Niger",
"Nigerská republika",
"Nigéria",
"Nigérijská federatívna republika",
"Niue",
"Norfolk",
"Severné Macedónsko",
"Severné Mariány",
"Severomacedónska republika",
"Spoločenstvo Severných Marián",
"Nórsko",
"Nórske kráľovstvo",
"Omán",
"Ománsky sultanát",
"Pakistan",
"Pakistanská islamská republika",
"Palau",
"Palauská republika",
"palestínske územie, Okupované",
"Okupované palestínske územie",
"Panama",
"Panamská republika",
"Papua - Nová Guinea",
"Paraguaj",
"Paraguajská republika",
"Peru",
"Peruánska republika",
"Filipíny",
"Filipínska republika",
"Pitcairnove ostrovy",
"Poľsko",
"Poľská republika",
"Portugalsko",
"Portugalská republika",
"Portoriko",
"Katar",
"Katarský štát",
"Réunion",
"Rumunsko",
"Ruská federácia",
"Rwanda",
"Rwandská republika",
"Svätý Bartolomej",
"Svätá Helena, Ascension a Tristan da Cunha",
"Svätý Krištof a Nevis",
"Svätá Lucia",
"Saint Martin",
"Saint Pierre a Miquelon",
"Svätý Vincent a Grenadíny",
"Samoa",
"Samojský nezávislý štát",
"San Maríno",
"Sanmarínska republika",
"Svätý Tomáš a Princov ostrov",
"Demokratická republika Svätého Tomáša a Princovho ostrova",
"Saudská Arábia",
"Saudskoarabské kráľovstvo",
"Senegal",
"Senegalská republika",
"Srbsko",
"Srbská republika",
"Seychely",
"Seychelská republika",
"Sierra Leone",
"Sierraleonská republika",
"Singapur",
"Singapurská republika",
"Slovensko",
"Slovenská republika",
"Slovinsko",
"Slovinská republika",
"Šalamúnove ostrovy",
"Somálsko",
"Somálska republika",
"Južná Afrika",
"Juhoafrická republika",
"Južná Georgia a Južné Sandwichove ostrovy",
"Španielsko",
"Španielske kráľovstvo",
"Srí Lanka",
"Srílanská demokratická socialistická republika",
"Sudán",
"Sudánska republika",
"Surinam",
"Surinamská republika",
"Svalbard a Jan Mayen",
"Svazijsko",
"Svazijské kráľovstvo",
"Švédsko",
"Švédske kráľovstvo",
"Švajčiarsko",
"Švajčiarska konfederácia",
"Sýrska arabská republika",
"Taiwan, provincia Číny",
"Taiwan",
"Tadžikistan",
"Tadžická republika",
"Tanzánijská zjednotená republika",
"Tanzánijská zjednotená republika",
"Thajsko",
"Thajské kráľovstvo",
"Východný Timor",
"Východotimorská demokratická republika",
"Togo",
"Togská republika",
"Tokelau",
"Tonga",
"Tongské kráľovstvo",
"Trinidad a Tobago",
"Republika Trinidadu a Tobaga",
"Tunisko",
"Tuniská republika",
"Turecko",
"Turecká republika",
"Turkménsko",
"Ostrovy Turks a Caicos",
"Tuvalu",
"Uganda",
"Ugandská republika",
"Ukrajina",
"Spojené arabské emiráty",
"Spojené kráľovstvo",
"Spojené kráľovstvo Veľkej Británie a Severného Írska",
"Spojené štáty",
"Spojené štáty americké",
"Menšie odľahlé ostrovy Spojených štátov",
"Uruguaj",
"Uruguajská východná republika",
"Uzbekistan",
"Uzbecká republika",
"Vanuatu",
"Vanuatská republika",
"Venezuelská bolívarovská republika",
"Venezuela",
"Vietnam",
"Vietnamská socialistická republika",
"Panenské ostrovy, Britské",
"Britské Panenské ostrovy",
"Panenské ostrovy, Americké",
"Panenské ostrovy Spojených štátov",
"Wallis a Futuna",
"Západná Sahara",
"Jemen",
"Jemenská republika",
"Zambia",
"Zambijská republika",
"Zimbabwe",
"Zimbabwianska republika",
"Britské antarktické územie",
"Socialistická republika Barmský zväz",
"Bieloruská sovietska socialistická republika",
"ostrovy Canton a Enderbury",
"Československo, Československá socialistická republika",
"Dahome",
"Zem kráľovnej Maud",
"Východný Timor",
"Metropolitné Francúzsko",
"Francúzske pobrežie Afarov a Isasov",
"Francúzske južné a antarktické územia",
"Nemecká demokratická republika",
"Nemecká spolková republika",
"Gilbertove a lagúnové ostrovy",
"Johnston",
"Midwajské ostrovy",
"Holandské Antily",
"neutrálne pôdy",
"Nové Hebridy",
"Poručnícke územie tichomorských ostrovov",
"Panamská republika",
"Panamské prieplavové pásmo",
"Rumunská socialistická republika",
"Svätý Krištof",
"Srbsko a Čierna Hora",
"Sikkim",
"Rodézia",
"Španielska Sahara",
"Tichomorské ostrovy pod správou USA",
"ZSSR, Zväz sovietskych socialistických republík",
"Republika Horná Volta",
"Vatikánsky mestský štát (Svätá stolica)",
"Vietnamská demokratická republika",
"Wake",
"Jemenská ľudovodemokratická republika",
"Jemenská arabská republika",
"Socialistická federatívna republika Juhoslávia",
"Zairská republika",
)
def street_suffix_short(self) -> str:
return self.random_element(self.street_suffixes_short)
def street_suffix_long(self) -> str:
return self.random_element(self.street_suffixes_long)
def city_name(self) -> str:
return self.random_element(self.cities)
def street_name(self) -> str:
return self.random_element(self.streets)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
def city_with_postcode(self) -> str:
return self.postcode() + " " + self.random_element(self.cities) | PypiClean |
/MAnorm-1.3.0.tar.gz/MAnorm-1.3.0/manorm/region/__init__.py | import logging
import os
from manorm.region.parsers import get_region_parser
from manorm.stats import xy_to_ma, ma_to_xy, manorm_p
logger = logging.getLogger(__name__)
REGION_FORMATS = ['bed', 'bed3-summit', 'macs', 'macs2', 'narrowpeak',
'broadpeak']
class GenomicRegion:
"""Class for a genomic region.
Parameters
----------
chrom : str
The chromosome name of the region.
start : int
The start coordinate of the region.
end : int
The end coordinate of the region.
summit : int, optional
The summit coordinate of the region. If not specified, the middle point
will be taken as the summit.
Attributes
----------
chrom : str
The chromosome name of the region.
start : int
The start coordinate of the region.
end : int
The end coordinate of the region.
summit : int
The summit coordinate of the region.
Notes
-----
The coordinates are 0-based, which means the region range is [start, end)
and `start` <= `summit` < `end` is strictly inspected.
"""
def __init__(self, chrom, start, end, summit=None):
self.chrom = chrom
self.start = int(start)
self.end = int(end)
if self.start >= self.end:
raise ValueError(
f"expect start < end, got: start={start} end={end}")
if summit is not None:
self.summit = int(summit)
else:
self.summit = (self.start + self.end) // 2
if not self.start <= self.summit < self.end:
raise ValueError(
f"expect start <= summit < end, got start={start} "
f"summit={summit} end={end}")
def __repr__(self):
return f"GenomicRegion({self.chrom}:{self.start}-{self.end})"
class ManormPeak(GenomicRegion):
"""Class for a MAnorm peak.
Parameters
----------
chrom : str
The chromosome name of the peak.
start : int
The start coordinate of the peak.
end : int
The end coordinate of the peak.
summit : int, optional
The summit coordinate of the peak. If not specified, the middle point
will be taken as the summit.
Attributes
----------
chrom : str
The chromosome name of the peak.
start : int
The start coordinate of the peak.
end : int
The end coordinate of the peak.
summit : int
The summit coordinate of the peak.
counted : bool
Whether the reads are counted or not.
read_count1 : int or None
The number of reads falling into the peak region in sample 1.
read_count2 : int or None
The number of reads falling into the peak region in sample 2.
read_density1 : float or None
The read density of sample 1 in the unit of reads per kilobase.
read_density2 : float or None
The read density of sample 2 in the unit of reads per kilobase.
m_raw: float or None
The raw(unnormalized) M value.
a_raw: float or None
The raw(unnormalized) A value.
normalized : bool
Whether the peak is normalized.
read_density1_normed : float or None
Normalized read density of sample 1.
read_density2_normed : float or None
Normalized read density of sample 2.
m_normed : float or None
Normalized M value.
a_normed : float or None
Normalized A value.
p_value : float or None
The P value of MAnorm.
iscommon : bool
Indicator to show whether the peak is a common peak.
summit_dis : int or None
The minimal distance between the summits of two common peaks, only
valid for merged common peaks.
Notes
-----
After counting the reads located in the peak region, fields `read_count1`,
`read_count2`, `read_density1`, `read_density2`, `m_raw`, `a_raw` are set
to proper values.
After the normalization step, fields `read_density1_normed`,
`read_density2_normed`, `m_normed`, `a_normed`, `p_value` are set to
proper values.
"""
def __init__(self, chrom, start, end, summit=None):
super().__init__(chrom, start, end, summit)
self.counted = False
self.read_count1 = None
self.read_count2 = None
self.read_density1 = None
self.read_density2 = None
self.m_raw = None
self.a_raw = None
self.normalized = False
self.read_density1_normed = None
self.read_density2_normed = None
self.m_normed = None
self.a_normed = None
self.p_value = None
self.iscommon = False
self.summit_dis = None
def count_reads(self, reads1, reads2, window=2000):
"""Count reads, calculate the read densities and raw (M, A) values.
Parameters
----------
reads1 : `manorm.read.Reads`
MAnorm reads object of sample 1.
reads2 : `manorm.read.Reads`
MAnorm reads object of sample 2.
window : int, optional
The window size to count reads, default=2000.
"""
if window <= 0:
raise ValueError(f"expect window size > 0, got {window}")
extend = window // 2
self.read_count1 = reads1.count(self.chrom, self.summit - extend,
self.summit + extend) + 1
self.read_count2 = reads2.count(self.chrom, self.summit - extend,
self.summit + extend) + 1
self.read_density1 = self.read_count1 * 1000 / (extend * 2)
self.read_density2 = self.read_count2 * 1000 / (extend * 2)
self.m_raw, self.a_raw = xy_to_ma(self.read_density1,
self.read_density2)
self.counted = True
def normalize(self, slope, intercept):
"""Normalize the M value and A value to remove the global dependence of
M on A given the coefficients of fitted robust linear model.
M-A model: M = slope * A + intercept.
Parameters
----------
slope : float
The slope of fitted M-A linear model.
intercept : float
The intercept of fitted M-A linear model.
"""
self.m_normed = self.m_raw - (slope * self.a_raw + intercept)
self.a_normed = self.a_raw
self.read_density1_normed, self.read_density2_normed = ma_to_xy(
self.m_normed, self.a_normed)
self.p_value = manorm_p(self.read_density1_normed,
self.read_density2_normed)
self.normalized = True
class GenomicRegions:
"""Class for a collection of genomic regions.
Parameters
----------
name : str, optional
The name of the genomic regions.
Attributes
----------
name : str or None
The name of the genomic regions.
"""
def __init__(self, name=None):
self.name = name
self._data = {}
@property
def chroms(self):
"""Returns sorted chromosome names of the genomic regions.
Returns
-------
list of str
Chromosome names (sorted) of the genomic regions.
"""
return sorted(self._data.keys())
@property
def size(self):
"""Returns the number of genomic regions."""
return sum(len(value) for value in self._data.values())
def add(self, region):
"""Add a genomic region into the collection.
Parameters
----------
region : GenomicRegion
The genomic region to be added into the collection.
"""
if not isinstance(region, GenomicRegion):
raise ValueError("requires a `GenomicRegion` object to be added")
else:
self._data.setdefault(region.chrom, [])
self._data[region.chrom].append(region)
def sort(self, by='start', ascending=True):
"""Sort genomic regions.
Parameters
----------
by : str, optional
Which atrribute is used to sort by, default='start'.
ascending : bool, optional
Sort ascendingly or not, default=True.
"""
for chrom in self.chroms:
self._data[chrom].sort(key=lambda x: getattr(x, by),
reverse=not ascending)
def fetch(self, chrom):
"""Fetch genomic regions on specified chromosome.
Parameters
----------
chrom : str
The chromosome name to fetch regions from.
Returns
-------
list
A list of genomic regions on the specified chromosome.
"""
if chrom in self._data:
return self._data[chrom]
else:
return []
def load_genomic_regions(path, format='bed', name=None):
"""Read genomic regions from the specified path.
Parameters
----------
path : str
Path to load the genomic regions.
format : str, optional
File format, default='bed'.
name : str, optional
Name of the genomic regions. If not specified, the basename of the
file will be used.
Returns
-------
regions : GenomicRegions
Loaded genomic regions.
"""
logger.info(f"Loading genomic regions from {path} [{format}]")
if name is None:
name = os.path.splitext(os.path.basename(path))[0]
parser = get_region_parser(format)()
regions = GenomicRegions(name)
for chrom, start, end, summit in parser.parse(path):
regions.add(GenomicRegion(chrom, start, end, summit))
regions.sort()
logger.info(f"Loaded {regions.size} genomic regions")
return regions
def load_manorm_peaks(path, format='bed', name=None):
"""Read peaks from the specified path.
Parameters
----------
path : str
Path to load the peaks.
format : str, optional
File format, default='bed'.
name : str, optional
Name of the peaks. If not specified, the basename of the file will be
used.
Returns
-------
peaks : GenomicRegions
Loaded peaks.
"""
logger.info(f"Loading peaks from {path} [{format}]")
if name is None:
name = os.path.splitext(os.path.basename(path))[0]
parser = get_region_parser(format)()
peaks = GenomicRegions(name)
for chrom, start, end, summit in parser.parse(path):
peaks.add(ManormPeak(chrom, start, end, summit))
peaks.sort()
logger.info(f"Loaded {peaks.size} peaks")
return peaks | PypiClean |
/BESST-2.2.8.tar.gz/BESST-2.2.8/scripts/reads_to_ctg_map.py | import argparse
import os
import shutil
import subprocess
import sys
import tempfile
from datetime import datetime
from sys import stdout
import pysam
import re
##
# Converts a sam file to a bam file using pysam.
#
# @param sam_path Path of the .sam file.
# @param bam_path Path of the resulting .bam file.
#
def sam_to_bam(sam_path, bam_path):
sam_file = pysam.Samfile(sam_path, "r")
bam_file = pysam.Samfile(bam_path, "wb", template=sam_file)
for alignment in sam_file:
bam_file.write(alignment)
##
# Maps the given paired end reads using bwa, and writes a
# sorted .bam file in the given output file.
#
# @param pe1_path Path of the first reads.
# @param pe2_path Path of the second reads.
# @param genome_path Path to the reference genome.
# @param output_path Path of the output file without extension ".bam".
#
def bwa_sampe(pe1_path, pe2_path, genome_path, output_path, tmp_dir, bwa_path, clear_tmp, bNoRebuildIndex, threads):
print()
print('Aligning with bwa aln/sampe.')
stdout.flush()
start = datetime.now()
work_dir = tempfile.mkdtemp() if tmp_dir == None else tmp_dir
genome_db = os.path.join(work_dir, "genome")
pe1_output = os.path.join(work_dir, "pe1.sai")
pe2_output = os.path.join(work_dir, "pe2.sai")
bwa_output = os.path.join(work_dir, "output.sam")
stderr_file = open(output_path + '.bwa.1', 'w')
print('Temp directory:', work_dir)
print('Output path: ', output_path)
print('Stderr file: ', output_path + '.bwa.1')
stdout.flush()
if ( bNoRebuildIndex and os.path.isfile(genome_db + ".sa")) :
print('bwa index exists, no need to remake (--norebuildbwaindex specified)', end=' ')
else:
print('Make bwa index...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "index", "-p", genome_db, genome_path ], stderr=stderr_file)
print('Done.')
stdout.flush()
with open(pe1_output, "w") as pe1_file:
print('Align forward reads with bwa aln...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "aln", "-t", threads, genome_db, pe1_path ],
stdout=pe1_file, stderr=stderr_file)
print('Done.')
stdout.flush()
with open(pe2_output, "w") as pe2_file:
print('Align reverse reads with bwa aln...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "aln", "-t", threads, genome_db, pe2_path ],
stdout=pe2_file, stderr=stderr_file)
print('Done.')
stdout.flush()
with open(bwa_output, "w") as bwa_file:
print('Start bwa sampe...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "sampe",
genome_db,
pe1_output, pe2_output,
pe1_path, pe2_path ], stdout=bwa_file, stderr=stderr_file)
print('Done.')
stdout.flush()
elapsed = datetime.now() - start
print('Time elapsed for bwa index and aln/sampe:', elapsed)
print()
print('Convert SAM to BAM...', end=' ')
stdout.flush()
start = datetime.now()
sam_to_bam(bwa_output, bwa_output + ".bam")
if clear_tmp:
os.remove(bwa_output)
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for SAM to BAM conversion:', elapsed)
print()
print('Sort BAM...', end=' ')
stdout.flush()
start = datetime.now()
pysam.sort(bwa_output + ".bam", output_path)
if clear_tmp:
os.remove(bwa_output + ".bam")
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for BAM sorting:', elapsed)
print()
print('Index BAM...', end=' ')
stdout.flush()
start = datetime.now()
pysam.index(output_path + '.bam')
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for BAM indexing:', elapsed)
print()
print('Remove temp files...', end=' ')
stdout.flush()
start = datetime.now()
if bNoRebuildIndex:
print('Keeping genome index files for future use in ' + work_dir, end=' ')
for f in os.listdir(work_dir):
if not re.search("genome", f):
os.remove(os.path.join(work_dir, f))
else:
shutil.rmtree(work_dir)
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for temp files removing:', elapsed)
stdout.flush()
def bwa_mem(pe1_path, pe2_path, genome_path, threads, output_path, tmp_dir, bwa_path, clear_tmp, bNoRebuildIndex):
print()
print('Aligning with bwa mem.')
stdout.flush()
start = datetime.now()
work_dir = tempfile.mkdtemp() if tmp_dir == None else tmp_dir
genome_db = os.path.join(work_dir, "genome")
pe1_output = os.path.join(work_dir, "pe1.sai")
pe2_output = os.path.join(work_dir, "pe2.sai")
bwa_output = os.path.join(work_dir, "output.sam")
stderr_file = open(output_path + '.bwa.1', 'w')
print('Temp directory:', work_dir)
print('Output path: ', output_path)
print('Stderr file: ', output_path + '.bwa.1')
stdout.flush()
if ( bNoRebuildIndex and os.path.isfile(genome_db + ".sa")) :
print('bwa index exists, no need to remake (--norebuildbwaindex specified)', end=' ')
else:
print('Make bwa index...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "index", "-p", genome_db, genome_path ], stderr=stderr_file)
print('Done.')
stdout.flush()
with open(bwa_output, "w") as bwa_file:
print('Align with bwa mem...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "mem", "-t", threads, "-w", "0", "-O", "99", # new params found by sebastien/guillaume
genome_db, pe1_path, pe2_path ],
stdout=bwa_file,
stderr=stderr_file)
print('Done.')
stdout.flush()
elapsed = datetime.now() - start
print('Time elapsed for bwa index and mem: ', elapsed)
print()
print('Convert SAM to BAM...', end=' ')
stdout.flush()
start = datetime.now()
sam_to_bam(bwa_output, bwa_output + ".bam")
if clear_tmp:
os.remove(bwa_output)
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for SAM to BAM conversion:', elapsed)
print()
print('Sort BAM...', end=' ')
stdout.flush()
start = datetime.now()
pysam.sort(bwa_output + ".bam", output_path)
if clear_tmp:
os.remove(bwa_output + ".bam")
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for BAM sorting:', elapsed)
print()
print('Index BAM...', end=' ')
stdout.flush()
start = datetime.now()
pysam.index(output_path + '.bam')
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for BAM indexing:', elapsed)
print()
print('Remove temp files...', end=' ')
stdout.flush()
start = datetime.now()
if bNoRebuildIndex:
print('Keeping genome index files for future use in ' + work_dir, end=' ')
for f in os.listdir(work_dir):
if not re.search("genome", f):
os.remove(os.path.join(work_dir, f))
else:
shutil.rmtree(work_dir)
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for temp files removing:', elapsed)
stdout.flush()
def map_single_reads(pe_path, genome_path, output_path, bwa_path, threads):
print()
print('Aligning with bwa aln/samse.')
stdout.flush()
start = datetime.now()
work_dir = tempfile.mkdtemp()
genome_db = os.path.join(work_dir, "genome")
pe_output = os.path.join(work_dir, "pe.sai")
bwa_output = os.path.join(work_dir, "output.sam")
print('Temp directory:', work_dir)
print('Output path: ', output_path)
print('Stderr file: /dev/null')
stdout.flush()
null = open("/dev/null")
if ( bNoRebuildIndex and os.path.isfile(genome_db + ".sa")) :
print('bwa index exists, no need to remake (--norebuildbwaindex specified)', end=' ')
else:
print('Make bwa index...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "index", "-p", genome_db, genome_path ], stderr=stderr_file)
print('Done.')
stdout.flush()
with open(pe_output, "w") as pe_file:
print('Align with bwa aln...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "aln", "-t", threads, genome_db, pe_path ], stdout=pe_file, stderr=null)
print('Done.')
stdout.flush()
with open(bwa_output, "w") as bwa_file:
print('Start bwa samse...', end=' ')
stdout.flush()
subprocess.check_call([ bwa_path, "samse",
"-r", "@RG\tID:ILLUMINA\tSM:48_2\tPL:ILLUMINA\tLB:LIB1",
genome_db,
pe_output,
pe_path ], stdout=bwa_file, stderr=null)
print('Done.')
stdout.flush()
elapsed = datetime.now() - start
print('Time elapsed for bwa index and aln/samse:', elapsed)
print()
print('Copy the result to the output directory and remove temp files...', end=' ')
stdout.flush()
start = datetime.now()
shutil.move(bwa_output, output_path)
if bNoRebuildIndex:
print('Keeping genome index files for future use in ' + work_dir, end=' ')
for f in os.listdir(work_dir):
if not re.search("genome", f):
os.remove(os.path.join(work_dir, f))
else:
shutil.rmtree(work_dir)
elapsed = datetime.now() - start
print('Done.')
print('Time elapsed for copying result to the output directory and removing temp files:', elapsed)
stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Maps the given reads with bwa.")
parser.add_argument('pe1_path', type=str, help='Path to the first reads in a read pair (if paired reads). Just the reads if single reads')
parser.add_argument('pe2_path', type=str, nargs='?', default=False, help='Path to the second pairs. Leave unspecified if single reads.')
parser.add_argument('genome_path', type=str, help='Path to the reference genome/contigs.')
parser.add_argument('output_path', type=str, help='Output path of resulting .bam and .bai file.')
parser.add_argument('--tmp_path', type=str, required=False, help='Output path of temporary files.')
parser.add_argument('--bwa_path', type=str, default='bwa', required=False, help='Path to bwa binary with bwa binary name at the end.')
parser.add_argument('--threads', type=str, default='8', required=False, help='Number of threads for bwa mem.')
parser.add_argument('--clear', action="store_true", required=False,
help='Remove SAM file when BAM is already created, and remove BAM file when sorted BAM is already created.')
parser.add_argument('--nomem', action="store_true", required=False,
help='bwa mem default, If flag specified the script uses old bwa algorithm with "aln" and "sampe". ')
parser.add_argument('--norebuildindex', action="store_true", required=False,
help='Do not rebuild bwa index if it already exists in the tmp_dir.')
args = parser.parse_args()
tmp_path = args.tmp_path
if tmp_path != None and not os.path.exists(tmp_path):
os.makedirs(tmp_path)
output_path = args.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
print()
print('pe1_path:', args.pe1_path)
print('pe2_path:', args.pe2_path)
print('genome_path:', args.genome_path)
print('output_path:', output_path)
if tmp_path != None:
print("tmp_path:", tmp_path)
print('bwa path:', args.bwa_path)
print('number of threads:', args.threads)
print('Remove temp SAM and BAM files:', end=' ')
if args.clear:
print('Yes')
else:
print('No')
print('Use bwa aln and sampe instead of bwa mem:', end=' ')
if args.nomem:
print('Yes')
else:
print('No')
print('Do not rebuild bwa index if already exists in tmp dir:', end=' ')
if args.norebuildindex:
print('Yes')
else:
print('No')
stdout.flush()
print()
print('Start processing.')
stdout.flush()
if args.pe2_path and args.nomem:
bwa_sampe(args.pe1_path, args.pe2_path, args.genome_path, output_path, tmp_path,
args.bwa_path, args.clear, args.norebuildindex, args.threads)
elif args.pe2_path:
bwa_mem(args.pe1_path, args.pe2_path, args.genome_path, args.threads, output_path,
tmp_path, args.bwa_path, args.clear, args.norebuildindex)
else:
map_single_reads(args.pe1_path, args.genome_path, args.output_path, args.bwa_path, args.norebuildindex, args.threads)
print()
print('Processing is finished.')
stdout.flush() | PypiClean |
/KPyGithub-1.32a1.tar.gz/KPyGithub-1.32a1/github/Hook.py |
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.HookResponse
class Hook(github.GithubObject.CompletableGithubObject):
"""
This class represents Hooks as returned for example by http://developer.github.com/v3/repos/hooks
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value, "url": self._url.value})
@property
def active(self):
"""
:type: bool
"""
self._completeIfNotSet(self._active)
return self._active.value
@property
def config(self):
"""
:type: dict
"""
self._completeIfNotSet(self._config)
return self._config.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def events(self):
"""
:type: list of string
"""
self._completeIfNotSet(self._events)
return self._events.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def last_response(self):
"""
:type: :class:`github.HookResponse.HookResponse`
"""
self._completeIfNotSet(self._last_response)
return self._last_response.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def test_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._test_url)
return self._test_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, name, config, events=github.GithubObject.NotSet, add_events=github.GithubObject.NotSet, remove_events=github.GithubObject.NotSet, active=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events
assert add_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_events), add_events
assert remove_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_events), remove_events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if add_events is not github.GithubObject.NotSet:
post_parameters["add_events"] = add_events
if remove_events is not github.GithubObject.NotSet:
post_parameters["remove_events"] = remove_events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def test(self):
"""
:calls: `POST /repos/:owner/:repo/hooks/:id/tests <http://developer.github.com/v3/repos/hooks>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/tests"
)
def _initAttributes(self):
self._active = github.GithubObject.NotSet
self._config = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._events = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._last_response = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._test_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "active" in attributes: # pragma no branch
self._active = self._makeBoolAttribute(attributes["active"])
if "config" in attributes: # pragma no branch
self._config = self._makeDictAttribute(attributes["config"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "events" in attributes: # pragma no branch
self._events = self._makeListOfStringsAttribute(attributes["events"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "last_response" in attributes: # pragma no branch
self._last_response = self._makeClassAttribute(github.HookResponse.HookResponse, attributes["last_response"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "test_url" in attributes: # pragma no branch
self._test_url = self._makeStringAttribute(attributes["test_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"]) | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/drawing/manager/Stencil.js | if(!dojo._hasResource["dojox.drawing.manager.Stencil"]){
dojo._hasResource["dojox.drawing.manager.Stencil"]=true;
dojo.provide("dojox.drawing.manager.Stencil");
(function(){
var _1,_2;
dojox.drawing.manager.Stencil=dojox.drawing.util.oo.declare(function(_3){
_1=_3.surface;
this.canvas=_3.canvas;
this.defaults=dojox.drawing.defaults.copy();
this.undo=_3.undo;
this.mouse=_3.mouse;
this.keys=_3.keys;
this.anchors=_3.anchors;
this.stencils={};
this.selectedStencils={};
this._mouseHandle=this.mouse.register(this);
dojo.connect(this.keys,"onArrow",this,"onArrow");
dojo.connect(this.keys,"onEsc",this,"deselect");
dojo.connect(this.keys,"onDelete",this,"onDelete");
},{_dragBegun:false,_wasDragged:false,_secondClick:false,_isBusy:false,register:function(_4){
if(_4.isText&&!_4.editMode&&_4.deleteEmptyCreate&&!_4.getText()){
console.warn("EMPTY CREATE DELETE",_4);
_4.destroy();
return false;
}
this.stencils[_4.id]=_4;
if(_4.execText){
if(_4._text&&!_4.editMode){
this.selectItem(_4);
}
_4.connect("execText",this,function(){
if(_4.isText&&_4.deleteEmptyModify&&!_4.getText()){
console.warn("EMPTY MOD DELETE",_4);
this.deleteItem(_4);
}else{
if(_4.selectOnExec){
this.selectItem(_4);
}
}
});
}
_4.connect("deselect",this,function(){
if(!this._isBusy&&this.isSelected(_4)){
this.deselectItem(_4);
}
});
_4.connect("select",this,function(){
if(!this._isBusy&&!this.isSelected(_4)){
this.selectItem(_4);
}
});
return _4;
},unregister:function(_5){
if(_5){
_5.selected&&this.onDeselect(_5);
delete this.stencils[_5.id];
}
},onArrow:function(_6){
if(this.hasSelected()){
this.saveThrottledState();
this.group.applyTransform({dx:_6.x,dy:_6.y});
}
},_throttleVrl:null,_throttle:false,throttleTime:400,_lastmxx:-1,_lastmxy:-1,saveMoveState:function(){
var mx=this.group.getTransform();
if(mx.dx==this._lastmxx&&mx.dy==this._lastmxy){
return;
}
this._lastmxx=mx.dx;
this._lastmxy=mx.dy;
this.undo.add({before:dojo.hitch(this.group,"setTransform",mx)});
},saveThrottledState:function(){
clearTimeout(this._throttleVrl);
clearInterval(this._throttleVrl);
this._throttleVrl=setTimeout(dojo.hitch(this,function(){
this._throttle=false;
this.saveMoveState();
}),this.throttleTime);
if(this._throttle){
return;
}
this._throttle=true;
this.saveMoveState();
},unDelete:function(_7){
for(var s in _7){
_7[s].render();
this.onSelect(_7[s]);
}
},onDelete:function(_8){
if(_8!==true){
this.undo.add({before:dojo.hitch(this,"unDelete",this.selectedStencils),after:dojo.hitch(this,"onDelete",true)});
}
this.withSelected(function(m){
this.anchors.remove(m);
var id=m.id;
m.destroy();
delete this.stencils[id];
});
this.selectedStencils={};
},deleteItem:function(_9){
if(this.hasSelected()){
var _a=[];
for(var m in this.selectedStencils){
if(this.selectedStencils.id==_9.id){
if(this.hasSelected()==1){
this.onDelete();
return;
}
}else{
_a.push(this.selectedStencils.id);
}
}
this.deselect();
this.selectItem(_9);
this.onDelete();
dojo.forEach(_a,function(id){
this.selectItem(id);
},this);
}else{
this.selectItem(_9);
this.onDelete();
}
},removeAll:function(){
this.selectAll();
this._isBusy=true;
this.onDelete();
this.stencils={};
this._isBusy=false;
},setSelectionGroup:function(){
this.withSelected(function(m){
this.onDeselect(m,true);
});
if(this.group){
_1.remove(this.group);
this.group.removeShape();
}
this.group=_1.createGroup();
this.group.setTransform({dx:0,dy:0});
this.withSelected(function(m){
this.group.add(m.container);
m.select();
});
},setConstraint:function(){
var t=Infinity;
l=Infinity;
this.withSelected(function(m){
var o=m.getBounds();
t=Math.min(o.y1,t);
l=Math.min(o.x1,l);
});
this.constrain={l:-l,t:-t};
},onDeselect:function(_b,_c){
if(!_c){
delete this.selectedStencils[_b.id];
}
this.anchors.remove(_b);
_1.add(_b.container);
_b.selected&&_b.deselect();
_b.applyTransform(this.group.getTransform());
},deselectItem:function(_d){
this.onDeselect(_d);
},deselect:function(){
this.withSelected(function(m){
this.onDeselect(m);
});
this._dragBegun=false;
this._wasDragged=false;
},onSelect:function(_e){
if(!_e){
console.error("null stencil is not selected:",this.stencils);
}
if(this.selectedStencils[_e.id]){
return;
}
this.selectedStencils[_e.id]=_e;
this.group.add(_e.container);
_e.select();
if(this.hasSelected()==1){
this.anchors.add(_e,this.group);
}
},selectAll:function(){
this._isBusy=true;
for(var m in this.stencils){
this.selectItem(m);
}
this._isBusy=false;
},selectItem:function(_f){
var id=typeof (_f)=="string"?_f:_f.id;
var _10=this.stencils[id];
this.setSelectionGroup();
this.onSelect(_10);
this.group.moveToFront();
this.setConstraint();
},onStencilDoubleClick:function(obj){
if(this.selectedStencils[obj.id]){
if(this.selectedStencils[obj.id].edit){
var m=this.selectedStencils[obj.id];
m.editMode=true;
this.deselect();
m.edit();
}
}
},onAnchorUp:function(){
this.setConstraint();
},onStencilDown:function(obj,evt){
if(!this.stencils[obj.id]){
return;
}
this._isBusy=true;
if(this.selectedStencils[obj.id]&&this.keys.meta){
if(dojo.isMac&&this.keys.cmmd){
}
this.onDeselect(this.selectedStencils[obj.id]);
if(this.hasSelected()==1){
this.withSelected(function(m){
this.anchors.add(m,this.group);
});
}
this.group.moveToFront();
this.setConstraint();
return;
}else{
if(this.selectedStencils[obj.id]){
var mx=this.group.getTransform();
this._offx=obj.x-mx.dx;
this._offy=obj.y-mx.dy;
return;
}else{
if(!this.keys.meta){
this.deselect();
}else{
}
}
}
this.selectItem(obj.id);
var mx=this.group.getTransform();
this._offx=obj.x-mx.dx;
this._offy=obj.y-mx.dx;
this.orgx=obj.x;
this.orgy=obj.y;
this._isBusy=false;
this.undo.add({before:function(){
},after:function(){
}});
},onStencilUp:function(obj){
},onStencilDrag:function(obj){
if(!this._dragBegun){
this.onBeginDrag(obj);
this._dragBegun=true;
}else{
this.saveThrottledState();
var x=obj.x-obj.last.x,y=obj.y-obj.last.y,mx=this.group.getTransform(),c=this.constrain,mz=this.defaults.anchors.marginZero;
x=obj.x-this._offx;
y=obj.y-this._offy;
if(x<c.l+mz){
x=c.l+mz;
}
if(y<c.t+mz){
y=c.t+mz;
}
this.group.setTransform({dx:x,dy:y});
}
},onDragEnd:function(obj){
this._dragBegun=false;
},onBeginDrag:function(obj){
this._wasDragged=true;
},onDown:function(obj){
this.deselect();
},exporter:function(){
var _11=[];
for(var m in this.stencils){
this.stencils[m].enabled&&_11.push(this.stencils[m].exporter());
}
return _11;
},toSelected:function(_12){
var _13=Array.prototype.slice.call(arguments).splice(1);
for(var m in this.selectedStencils){
var _14=this.selectedStencils[m];
_14[_12].apply(_14,_13);
}
},withSelected:function(_15){
var f=dojo.hitch(this,_15);
for(var m in this.selectedStencils){
f(this.selectedStencils[m]);
}
},withUnselected:function(_16){
var f=dojo.hitch(this,_16);
for(var m in this.stencils){
!this.stencils[m].selected&&f(this.stencils[m]);
}
},withStencils:function(_17){
var f=dojo.hitch(this,_17);
for(var m in this.stencils){
f(this.stencils[m]);
}
},hasSelected:function(){
var ln=0;
for(var m in this.selectedStencils){
ln++;
}
return ln;
},isSelected:function(_18){
return !!this.selectedStencils[_18.id];
}});
})();
} | PypiClean |
/CSG_test-0.1.2.tar.gz/CSG_test-0.1.2/clientAPI/mixin.py | from types import ModuleType
from typing import (Any, Dict, Optional, Type, Union)
import clientAPI.base as base
from .client import CSGList, CSGenome
import clientAPI.utils as exc
__all__ = [
"GetMixin",
"GetWithoutIdMixin",
"RefreshMixin",
"ListMixin",
"RetrieveMixin",
"CreateMixin",
"UpdateMixin",
"SetMixin",
"DeleteMixin",
"CRUDMixin",
"NoUpdateMixin",
"SaveMixin",
"ObjectDeleteMixin",
"UserAgentDetailMixin",
"AccessRequestMixin",
"DownloadMixin",
"SubscribableMixin",
"TodoMixin",
"TimeTrackingMixin",
"ParticipantsMixin",
"BadgeRenderMixin",
]
_RestManagerBase = base.RESTManager
_RestObjectBase = base.RESTObject
class GetMixin(_RestManagerBase):
# @exc.on_http_error(exc.CSGServerError)
def get(self, uid=None, params=None):
"""Retrieve a single object.
Args:
uid: uid of the object to retrieve
params: additional query parameters (e.g. filter, column)
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The generated RESTObject.
Raises:
// GitlabAuthenticationError: If authentication is not correct
CSGServerError: If the server cannot perform the request
"""
try:
assert (uid != None), "uid cannot be empty"
assert (isinstance(uid, int)), "uid has to be an integer"
assert ((not params) or ("filter" not in params.keys())), "filters are not allowed"
## easy name stuff here ##
incoming_filter = params or {}
# for k,v in kwargs:
# if k in self._create_attrs.required:
# incoming_filter[k] = v
server_data = self.csgenome.http_get(path=f'{self._path}/{uid}', params=incoming_filter)
# print(f'server data: {server_data}')
try:
if 'data' in server_data.keys():
res = server_data['data'] # TODO: very rigid implementation
return self._obj_cls(self, res)
else:
# print(server_data)
return server_data['errors']
except KeyError:
return server_data
except AssertionError as e:
print(f"Input error: {e}")
return None
class ListMixin(_RestManagerBase):
_obj_cls: Optional[Type[base.RESTObject]]
_path: Optional[str]
csgenome: CSGenome
def list(self, columns=None, exact=None, order_by=None, desc=False, **kwargs): # TODO: maybe support none json output in the future
"""Retrieve a list of objects.
Args:
all: If True, return all the items, without pagination
per_page: Number of items to retrieve per request
page: ID of the page to return (starts with page 1)
as_list: If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The list of objects, or a generator if `as_list` is False
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server cannot perform the request
"""
query_param = self._parse_params(kwargs, columns=columns, exact=exact, order_by=order_by, desc=desc)
for k, v in query_param.items():
query_param[k] = v.strip()
obj, error = self.csgenome.http_list(self._path, query_param)
if error:
return obj
return [self._obj_cls(self, item, created_from_list=True) for item in obj]
def list_first(self, dictionary=None, columns=None, exact=False, report_count=False, order_by=None, desc=False, **kwargs):
"""
Return a single object that matches provided properties
You can pass in a dictionary containing key value pairs to search and filter records by,
or use keyword parameters as key value pairs instead
Kwargs:
exact: True/False (default False)
If exact, then string fields are case sensitive
report_count: True/False (default False)
If true, then returns a tuple (obj, num) where num is the number of
records in table that matched the specified search parameters
**kwargs: values for columns of the table you want to search for (if a dictionary if
passed in, these kwargs are ignored)
"""
query_param = self._parse_params(dictionary or kwargs, columns=columns,
exact=exact, order_by=order_by, desc=desc)
for k, v in query_param.items():
if isinstance(v, str):
query_param[k] = v.strip()
obj, error = self.csgenome.http_list(self._path, query_param)
# return self._obj_cls(self, obj[0])
if error or not obj:
return obj
if report_count:
return self._obj_cls(self, obj[0]), len(obj)
return self._obj_cls(self, obj[0])
def _parse_params(self, params: Dict, columns: Union[list, str], exact: bool, order_by: str, desc: bool) -> Dict:
result = {}
if columns:
# add column parameter to result, which can either be in form
# 'column': 'single_col' OR
# 'column': ['list', 'of', 'cols']
result['columns'] = ','.join(columns) if isinstance(columns, list) else str(columns)
if exact:
result['filter_mode'] = 'exact'
for k, v in params.items():
k = k.replace('__', '.') # for foreign key fields
if v is None:
continue
if 'filter' in result: # else we have a combined filter field
value = ','.join(v) if isinstance(v, list) else v
result['filter'] += f';{k}:{value}'
else: # first filter
value = ','.join(v) if isinstance(v, list) else v
result['filter'] = f'{k}:{value}'
if order_by:
result['order_by'] = order_by
if desc:
result['desc'] = True
return result
def gen(self, columns=None, exact=None, order_by=None, desc=False, **kwargs):
query_param = self._parse_params(kwargs, columns=columns, exact=exact, order_by=order_by, desc=desc)
for k, v in query_param.items():
query_param[k] = v.strip()
if "page" not in query_param.keys():
query_param['page'] = 1
if "limit" not in query_param.keys():
query_param['limit'] = 50
csg_list = CSGList(self.csgenome, self._path, query_data=query_param)
return csg_list.gen(lambda item: self._obj_cls(self, item, created_from_list=True))
class CreateMixin(_RestManagerBase):
_path: Optional[str]
csgenome: CSGenome
def _check_missing_create_attrs(self, data):
missing = []
for attr in self._create_attrs.required: # passed in from obj
if attr not in data:
missing.append(attr)
if missing:
raise AttributeError(f"Missing attributes: {', '.join(missing)}")
def create(self, data=None, return_response=False):
"""Create a new object.
Args:
data: parameters to send to the server to create the
resource
return_response: If true, return the entire response body
dictionary instead of just the data on success
(default False)
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
Tuple (resp_dict, is_error) containing:
A dictionary containing server data on success and error
information on failure
and a boolean if error occured or not
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
"""
data = data or {}
self._check_missing_create_attrs(data)
path = self._path # NOTE: might not be necessary, consider
data = {k: v for k, v in data.items() if v is not None}
server_data = self.csgenome.http_post(path, post_data=data)
if 'errors' in server_data:
# then we have error
return server_data['errors'], True
elif 'status' in server_data and not str(server_data['status']).startswith('2'):
return server_data, True
elif return_response:
return server_data, False
else:
return server_data['data'], False # we do not return an object when added
class UpdateMixin(_RestManagerBase):
_computed_path: Optional[str]
_from_parent_attrs: Dict[str, Any]
_obj_cls: Optional[Type[base.RESTObject]]
_parent: Optional[base.RESTObject]
_parent_attrs: Dict[str, Any]
_path: Optional[str]
_update_uses_post: bool = False
csgenome: CSGenome
def _check_missing_update_attrs(self, data):
# Remove the id field from the required list as it was previously moved
# to the http path.
required = tuple(
[k for k in self._update_attrs.required if k != self._obj_cls._id_attr]
)
missing = []
for attr in required:
if attr not in data:
missing.append(attr)
if missing:
raise AttributeError(f"Missing attributes: {', '.join(missing)}")
def update(self, uid, new_data=None):
"""Update an object on the server.
Args:
id: ID of the object to update (can be None if not required)
new_data: the update data for the object
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The new object data (*not* a RESTObject)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server cannot perform the request
"""
new_data = new_data or {}
if uid is None:
raise AttributeError(f"Missing uid.")
else:
path = f"{self._path}/{uid}"
self._check_missing_update_attrs(new_data)
server_data = self.csgenome.http_put(path, put_data=new_data)
if 'data' in server_data.keys():
res = server_data['data'] # TODO: very rigid implementation
return self._obj_cls(self, res)
else:
return server_data['errors']
# class SetMixin(_RestManagerBase):
# _computed_path: Optional[str]
# _from_parent_attrs: Dict[str, Any]
# _obj_cls: Optional[Type[base.RESTObject]]
# _parent: Optional[base.RESTObject]
# _parent_attrs: Dict[str, Any]
# _path: Optional[str]
# gitlab: gitlab.Gitlab
# @exc.on_http_error(exc.GitlabSetError)
# def set(self, key: str, value: str, **kwargs: Any) -> base.RESTObject:
# """Create or update the object.
# Args:
# key: The key of the object to create/update
# value: The value to set for the object
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabSetError: If an error occurred
# Returns:
# The created/updated attribute
# """
# path = f"{self.path}/{utils.EncodedId(key)}"
# data = {"value": value}
# server_data = self.gitlab.http_put(path, post_data=data, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(server_data, requests.Response)
# assert self._obj_cls is not None
# return self._obj_cls(self, server_data)
class DeleteMixin(_RestManagerBase):
_path: Optional[str]
csgenome: CSGenome
def delete(self, uid):
"""Delete an object on the server.
Args:
id: ID of the object to delete
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
if uid is None:
raise AttributeError(f"Missing uid.")
else:
path = f"{self._path}/{uid}"
server_data = self.csgenome.http_delete(path)
return self._obj_cls(self, server_data)
# class CRUDMixin(GetMixin, ListMixin, CreateMixin, UpdateMixin, DeleteMixin):
# _computed_path: Optional[str]
# _from_parent_attrs: Dict[str, Any]
# _obj_cls: Optional[Type[base.RESTObject]]
# _parent: Optional[base.RESTObject]
# _parent_attrs: Dict[str, Any]
# _path: Optional[str]
# gitlab: gitlab.Gitlab
# pass
# class NoUpdateMixin(GetMixin, ListMixin, CreateMixin, DeleteMixin):
# _computed_path: Optional[str]
# _from_parent_attrs: Dict[str, Any]
# _obj_cls: Optional[Type[base.RESTObject]]
# _parent: Optional[base.RESTObject]
# _parent_attrs: Dict[str, Any]
# _path: Optional[str]
# gitlab: gitlab.Gitlab
# pass
class SaveMixin(_RestObjectBase):
"""Mixin for RESTObject's that can be updated."""
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
def _get_updated_data(self):
updated_data = {}
for attr in self.manager._update_attrs.required:
# Get everything required, no matter if it's been updated
updated_data[attr] = getattr(self, attr)
# Add the updated attributes
updated_data.update(self._updated_attrs)
return updated_data
def save(self):
"""Save the changes made to the object to the server.
The object is updated to match what the server returns.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The new object data (*not* a RESTObject)
Raise:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server cannot perform the request
"""
updated_data = self._get_updated_data()
# Nothing to update. Server fails if sent an empty dict.
if not updated_data:
return None
# call the manager
obj_id = self.encoded_id
server_data = self.manager.update(uid=obj_id, new_data=updated_data)
self._update_attrs(server_data)
return server_data
class ObjectDeleteMixin(_RestObjectBase):
"""Mixin for RESTObject's that can be deleted."""
_id_attr: Optional[str]
_attrs: Dict[str, Any]
_module: ModuleType
_parent_attrs: Dict[str, Any]
_updated_attrs: Dict[str, Any]
manager: base.RESTManager
def delete(self, **kwargs: Any) -> None:
"""Delete the object from the server.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
"""
self.manager.delete(self.encoded_id, **kwargs)
# class UserAgentDetailMixin(_RestObjectBase):
# _id_attr: Optional[str]
# _attrs: Dict[str, Any]
# _module: ModuleType
# _parent_attrs: Dict[str, Any]
# _updated_attrs: Dict[str, Any]
# manager: base.RESTManager
# @cli.register_custom_action(("Snippet", "ProjectSnippet", "ProjectIssue"))
# @exc.on_http_error(exc.GitlabGetError)
# def user_agent_detail(self, **kwargs: Any) -> Dict[str, Any]:
# """Get the user agent detail.
# Args:
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabGetError: If the server cannot perform the request
# """
# path = f"{self.manager.path}/{self.encoded_id}/user_agent_detail"
# result = self.manager.gitlab.http_get(path, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result
# class AccessRequestMixin(_RestObjectBase):
# _id_attr: Optional[str]
# _attrs: Dict[str, Any]
# _module: ModuleType
# _parent_attrs: Dict[str, Any]
# _updated_attrs: Dict[str, Any]
# manager: base.RESTManager
# @cli.register_custom_action(
# ("ProjectAccessRequest", "GroupAccessRequest"), tuple(), ("access_level",)
# )
# @exc.on_http_error(exc.GitlabUpdateError)
# def approve(
# self, access_level: int = gitlab.const.DEVELOPER_ACCESS, **kwargs: Any
# ) -> None:
# """Approve an access request.
# Args:
# access_level: The access level for the user
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabUpdateError: If the server fails to perform the request
# """
# path = f"{self.manager.path}/{self.encoded_id}/approve"
# data = {"access_level": access_level}
# server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(server_data, requests.Response)
# self._update_attrs(server_data)
# class DownloadMixin(_RestObjectBase):
# _id_attr: Optional[str]
# _attrs: Dict[str, Any]
# _module: ModuleType
# _parent_attrs: Dict[str, Any]
# _updated_attrs: Dict[str, Any]
# manager: base.RESTManager
# @cli.register_custom_action(("GroupExport", "ProjectExport"))
# @exc.on_http_error(exc.GitlabGetError)
# def download(
# self,
# streamed: bool = False,
# action: Optional[Callable] = None,
# chunk_size: int = 1024,
# **kwargs: Any,
# ) -> Optional[bytes]:
# """Download the archive of a resource export.
# Args:
# streamed: If True the data will be processed by chunks of
# `chunk_size` and each chunk is passed to `action` for
# treatment
# action: Callable responsible of dealing with chunk of
# data
# chunk_size: Size of each chunk
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabGetError: If the server failed to perform the request
# Returns:
# The blob content if streamed is False, None otherwise
# """
# path = f"{self.manager.path}/download"
# result = self.manager.gitlab.http_get(
# path, streamed=streamed, raw=True, **kwargs
# )
# if TYPE_CHECKING:
# assert isinstance(result, requests.Response)
# return utils.response_content(result, streamed, action, chunk_size)
# class SubscribableMixin(_RestObjectBase):
# _id_attr: Optional[str]
# _attrs: Dict[str, Any]
# _module: ModuleType
# _parent_attrs: Dict[str, Any]
# _updated_attrs: Dict[str, Any]
# manager: base.RESTManager
# @cli.register_custom_action(
# ("ProjectIssue", "ProjectMergeRequest", "ProjectLabel", "GroupLabel")
# )
# @exc.on_http_error(exc.GitlabSubscribeError)
# def subscribe(self, **kwargs: Any) -> None:
# """Subscribe to the object notifications.
# Args:
# **kwargs: Extra options to send to the server (e.g. sudo)
# raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabSubscribeError: If the subscription cannot be done
# """
# path = f"{self.manager.path}/{self.encoded_id}/subscribe"
# server_data = self.manager.gitlab.http_post(path, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(server_data, requests.Response)
# self._update_attrs(server_data)
# @cli.register_custom_action(
# ("ProjectIssue", "ProjectMergeRequest", "ProjectLabel", "GroupLabel")
# )
# @exc.on_http_error(exc.GitlabUnsubscribeError)
# def unsubscribe(self, **kwargs: Any) -> None:
# """Unsubscribe from the object notifications.
# Args:
# **kwargs: Extra options to send to the server (e.g. sudo)
# raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabUnsubscribeError: If the unsubscription cannot be done
# """
# path = f"{self.manager.path}/{self.encoded_id}/unsubscribe"
# server_data = self.manager.gitlab.http_post(path, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(server_data, requests.Response)
# self._update_attrs(server_data)
# class TodoMixin(_RestObjectBase):
# _id_attr: Optional[str]
# _attrs: Dict[str, Any]
# _module: ModuleType
# _parent_attrs: Dict[str, Any]
# _updated_attrs: Dict[str, Any]
# manager: base.RESTManager
# @cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
# @exc.on_http_error(exc.GitlabTodoError)
# def todo(self, **kwargs: Any) -> None:
# """Create a todo associated to the object.
# Args:
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabTodoError: If the todo cannot be set
# """
# path = f"{self.manager.path}/{self.encoded_id}/todo"
# self.manager.gitlab.http_post(path, **kwargs)
# class TimeTrackingMixin(_RestObjectBase):
# _id_attr: Optional[str]
# _attrs: Dict[str, Any]
# _module: ModuleType
# _parent_attrs: Dict[str, Any]
# _updated_attrs: Dict[str, Any]
# manager: base.RESTManager
# @cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
# @exc.on_http_error(exc.GitlabTimeTrackingError)
# def time_stats(self, **kwargs: Any) -> Dict[str, Any]:
# """Get time stats for the object.
# Args:
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabTimeTrackingError: If the time tracking update cannot be done
# """
# # Use the existing time_stats attribute if it exist, otherwise make an
# # API call
# if "time_stats" in self.attributes:
# return self.attributes["time_stats"]
# path = f"{self.manager.path}/{self.encoded_id}/time_stats"
# result = self.manager.gitlab.http_get(path, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result
# @cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"), ("duration",))
# @exc.on_http_error(exc.GitlabTimeTrackingError)
# def time_estimate(self, duration: str, **kwargs: Any) -> Dict[str, Any]:
# """Set an estimated time of work for the object.
# Args:
# duration: Duration in human format (e.g. 3h30)
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabTimeTrackingError: If the time tracking update cannot be done
# """
# path = f"{self.manager.path}/{self.encoded_id}/time_estimate"
# data = {"duration": duration}
# result = self.manager.gitlab.http_post(path, post_data=data, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result
# @cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
# @exc.on_http_error(exc.GitlabTimeTrackingError)
# def reset_time_estimate(self, **kwargs: Any) -> Dict[str, Any]:
# """Resets estimated time for the object to 0 seconds.
# Args:
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabTimeTrackingError: If the time tracking update cannot be done
# """
# path = f"{self.manager.path}/{self.encoded_id}/reset_time_estimate"
# result = self.manager.gitlab.http_post(path, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result
# @cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"), ("duration",))
# @exc.on_http_error(exc.GitlabTimeTrackingError)
# def add_spent_time(self, duration: str, **kwargs: Any) -> Dict[str, Any]:
# """Add time spent working on the object.
# Args:
# duration: Duration in human format (e.g. 3h30)
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabTimeTrackingError: If the time tracking update cannot be done
# """
# path = f"{self.manager.path}/{self.encoded_id}/add_spent_time"
# data = {"duration": duration}
# result = self.manager.gitlab.http_post(path, post_data=data, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result
# @cli.register_custom_action(("ProjectIssue", "ProjectMergeRequest"))
# @exc.on_http_error(exc.GitlabTimeTrackingError)
# def reset_spent_time(self, **kwargs: Any) -> Dict[str, Any]:
# """Resets the time spent working on the object.
# Args:
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabTimeTrackingError: If the time tracking update cannot be done
# """
# path = f"{self.manager.path}/{self.encoded_id}/reset_spent_time"
# result = self.manager.gitlab.http_post(path, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result
# class ParticipantsMixin(_RestObjectBase):
# _id_attr: Optional[str]
# _attrs: Dict[str, Any]
# _module: ModuleType
# _parent_attrs: Dict[str, Any]
# _updated_attrs: Dict[str, Any]
# manager: base.RESTManager
# @cli.register_custom_action(("ProjectMergeRequest", "ProjectIssue"))
# @exc.on_http_error(exc.GitlabListError)
# def participants(self, **kwargs: Any) -> Dict[str, Any]:
# """List the participants.
# Args:
# all: If True, return all the items, without pagination
# per_page: Number of items to retrieve per request
# page: ID of the page to return (starts with page 1)
# as_list: If set to False and no pagination option is
# defined, return a generator instead of a list
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabListError: If the list could not be retrieved
# Returns:
# The list of participants
# """
# path = f"{self.manager.path}/{self.encoded_id}/participants"
# result = self.manager.gitlab.http_get(path, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result
# class BadgeRenderMixin(_RestManagerBase):
# @cli.register_custom_action(
# ("GroupBadgeManager", "ProjectBadgeManager"), ("link_url", "image_url")
# )
# @exc.on_http_error(exc.GitlabRenderError)
# def render(self, link_url: str, image_url: str, **kwargs: Any) -> Dict[str, Any]:
# """Preview link_url and image_url after interpolation.
# Args:
# link_url: URL of the badge link
# image_url: URL of the badge image
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabRenderError: If the rendering failed
# Returns:
# The rendering properties
# """
# path = f"{self.path}/render"
# data = {"link_url": link_url, "image_url": image_url}
# result = self.gitlab.http_get(path, data, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result
# class PromoteMixin(_RestObjectBase):
# _id_attr: Optional[str]
# _attrs: Dict[str, Any]
# _module: ModuleType
# _parent_attrs: Dict[str, Any]
# _updated_attrs: Dict[str, Any]
# _update_uses_post: bool = False
# manager: base.RESTManager
# def _get_update_method(
# self,
# ) -> Callable[..., Union[Dict[str, Any], requests.Response]]:
# """Return the HTTP method to use.
# Returns:
# http_put (default) or http_post
# """
# if self._update_uses_post:
# http_method = self.manager.gitlab.http_post
# else:
# http_method = self.manager.gitlab.http_put
# return http_method
# @exc.on_http_error(exc.GitlabPromoteError)
# def promote(self, **kwargs: Any) -> Dict[str, Any]:
# """Promote the item.
# Args:
# **kwargs: Extra options to send to the server (e.g. sudo)
# Raises:
# GitlabAuthenticationError: If authentication is not correct
# GitlabPromoteError: If the item could not be promoted
# GitlabParsingError: If the json data could not be parsed
# Returns:
# The updated object data (*not* a RESTObject)
# """
# path = f"{self.manager.path}/{self.encoded_id}/promote"
# http_method = self._get_update_method()
# result = http_method(path, **kwargs)
# if TYPE_CHECKING:
# assert not isinstance(result, requests.Response)
# return result | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/interfaces/jobs/scheduler.py | import random
import time
import datetime as dt
import threading
import queue
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import initialize_log
from mindsdb.utilities import log
from mindsdb.interfaces.storage import db
from mindsdb.interfaces.jobs.jobs_controller import JobsExecutor
logger = log.get_log('jobs')
def execute_async(q_in, q_out):
while True:
task = q_in.get()
if task['type'] != 'task':
return
record_id = task['record_id']
history_id = task['history_id']
executor = JobsExecutor()
try:
executor.execute_task_local(record_id, history_id)
except (KeyboardInterrupt, SystemExit):
q_out.put(True)
raise
except Exception:
db.session.rollback()
q_out.put(True)
class Scheduler:
def __init__(self, config=None):
self.config = config
self.q_in = queue.Queue()
self.q_out = queue.Queue()
self.work_thread = threading.Thread(target=execute_async, args=(self.q_in, self.q_out))
self.work_thread.start()
def __del__(self):
self.stop_thread()
def stop_thread(self):
self.q_in.put({
'type': 'exit'
})
def scheduler_monitor(self):
check_interval = self.config.get('jobs', {}).get('check_interval', 30)
while True:
logger.debug('Scheduler check timetable')
try:
self.check_timetable()
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logger.error(e)
# different instances should start in not the same time
time.sleep(check_interval + random.randint(1, 10))
def check_timetable(self):
executor = JobsExecutor()
exec_method = self.config.get('jobs', {}).get('executor', 'local')
for record in executor.get_next_tasks():
logger.info(f'Job execute: {record.name}({record.id})')
self.execute_task(record.id, exec_method)
db.session.remove()
def execute_task(self, record_id, exec_method):
executor = JobsExecutor()
if exec_method == 'local':
history_id = executor.lock_record(record_id)
if history_id is None:
# db.session.remove()
logger.info(f'Unable create history record for {record_id}, is locked?')
return
# run in thread
self.q_in.put({
'type': 'task',
'record_id': record_id,
'history_id': history_id,
})
while True:
try:
self.q_out.get(timeout=3)
break
except queue.Empty:
# update last date:
history_record = db.JobsHistory.query.get(history_id)
history_record.updated_at = dt.datetime.now()
db.session.commit()
else:
# TODO add microservice mode
raise NotImplementedError()
def start(self):
config = Config()
db.init()
initialize_log(config, 'jobs', wrap_print=True)
self.config = config
logger.info('Scheduler starts')
try:
self.scheduler_monitor()
except (KeyboardInterrupt, SystemExit):
self.stop_thread()
pass
def start(verbose=False):
scheduler = Scheduler()
scheduler.start()
if __name__ == '__main__':
start() | PypiClean |
/EasyMenusBar-0.3.5.tar.gz/EasyMenusBar-0.3.5/menusbar/core/MenusBar.py |
from PySide2.QtWidgets import QWidget, QFileSystemModel, QTreeView, QAction, QMenu, QVBoxLayout, QAbstractItemView, \
QShortcut
from PySide2 import QtCore, QtGui
from PySide2.QtCore import QMargins, QFileInfo, QUrl, Qt
from PySide2.QtGui import QMouseEvent, QDesktopServices, QKeySequence
from menusbar.core.FileUtils import FileUtils
from .Dialog import Dialog as MKDialog
import json
import os
class MemusBar(QWidget):
handlerAfterSignal = QtCore.Signal(int, str)
handlerBeforeSignal = QtCore.Signal(int, str)
openFileSignal = QtCore.Signal(QFileInfo)
HANLDER_CREATE_DIR = 0
HANLDER_DELETE_DIR = 1
HANLDER_CREATE_FILE = 2
HANLDER_DELETE_FILE = 3
def __init__(self, parent=None, settingFileName: str = "setting.json"):
QWidget.__init__(self, parent)
self.settingFileName = settingFileName
self.init()
self._initUI()
self.initEvent()
def init(self):
self.filtersList = list()
self.isLive = False
self.workDir = None
self.defualtSuffix = 'md'
self.defualtSuffixList = None
self.settingFileSavePath = None
self.fileUtils = FileUtils()
self.SP_KEY_WORKDIR = "lastWorkDirRecord"
self.defaultContent: list = None
def checkWorkDir(self) -> bool:
if self.workDir and os.path.exists(self.workDir) and os.path.isdir(self.workDir):
return True
else:
return False
def createDir(self, value):
if self.checkWorkDir():
self.fileUtils.createDir(self.workDir, value)
else:
raise RuntimeError('This workd dir is not init')
def createFile(self, value):
if self.checkWorkDir():
self.fileUtils.createFile(self.workDir, value, self.defaultContent)
else:
raise RuntimeError('This workd dir is not init')
def initEvent(self):
shortcut = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_D), self)
shortcut.activated.connect(self.deleteShortcutEvent)
shortcut = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_K), self)
shortcut.activated.connect(self.createFileShortcutEvent)
shortcut = QShortcut(QKeySequence(Qt.SHIFT + Qt.CTRL + Qt.Key_K), self)
shortcut.activated.connect(self.createDirShortcutEvent)
def createDirShortcutEvent(self):
if self.isLive == False:
return
fileInfo = self.treeModel.fileInfo(self.treeView.currentIndex())
text = MKDialog().inputDirNameDialog()
if text:
if self._handlerBeforeEvent(self.HANLDER_CREATE_DIR, os.path.join(self.getDir(fileInfo), text)):
self.fileUtils.createDir(self.getDir(fileInfo), text)
self._handlerAfterEvent(self.HANLDER_CREATE_DIR, os.path.join(self.getDir(fileInfo), text))
def createFileShortcutEvent(self):
if self.isLive == False:
return
fileInfo = self.treeModel.fileInfo(self.treeView.currentIndex())
text = MKDialog().inputFileNameDialog()
temp = str(text).split(".")
if len(temp) < 2:
text = text + "." + self.defualtSuffix
else:
fileSuffix = temp[len(temp) - 1]
if self.defualtSuffixList:
if fileSuffix not in self.defualtSuffixList:
text = text + "." + self.defualtSuffix
elif self.defualtSuffix and self.defualtSuffix != fileSuffix:
text = text + "." + self.defualtSuffix
if text:
if self._handlerBeforeEvent(self.HANLDER_CREATE_FILE, os.path.join(self.getDir(fileInfo), text)):
self.fileUtils.createFile(self.getDir(fileInfo), text)
self._handlerAfterEvent(self.HANLDER_CREATE_FILE, os.path.join(self.getDir(fileInfo), text))
def deleteShortcutEvent(self):
if self.isLive == False:
return
fileInfo = self.treeModel.fileInfo(self.treeView.currentIndex())
if fileInfo.isFile():
if self._handlerBeforeEvent(self.HANLDER_DELETE_FILE, fileInfo.filePath()):
self.fileUtils.deleteFile(fileInfo.filePath())
self._handlerAfterEvent(self.HANLDER_DELETE_FILE, fileInfo.filePath())
else:
if self._handlerBeforeEvent(self.HANLDER_DELETE_DIR, fileInfo.filePath()):
self.fileUtils.deleteDir(fileInfo.filePath())
self._handlerAfterEvent(self.HANLDER_DELETE_DIR, fileInfo.filePath())
def getSettingSavePath(self):
if not self.settingFileSavePath:
self.settingFileSavePath = self.fileUtils.getProjectPath()
return self.settingFileSavePath
def getSettingFile(self):
if not os.path.exists(os.path.join(self.getSettingSavePath(), self.settingFileName)):
with open(os.path.join(self.getSettingSavePath(), self.settingFileName), mode='w') as _:
pass
with open(os.path.join(self.getSettingSavePath(), self.settingFileName), mode='r') as jsonFile:
try:
data = json.load(jsonFile)
return data
except Exception as e:
print(e)
return {}
def saveSettingFile(self, setting):
with open(os.path.join(self.getSettingSavePath(), self.settingFileName), mode='w') as jsonFile:
json.dump(setting, jsonFile)
def getLastWorkDirRecord(self):
setting: dict = self.getSettingFile()
if self.SP_KEY_WORKDIR in setting.keys():
return setting[self.SP_KEY_WORKDIR]
else:
return os.environ['HOME']
def updateWorkDirRecord(self, value) -> str:
setting = self.getSettingFile()
setting[self.SP_KEY_WORKDIR] = value
self.saveSettingFile(setting)
return value
def initWorkDir(self, workDir: str = None, settingFilePath: str = None):
if settingFilePath:
self.settingFileSavePath = settingFilePath
if not workDir:
workDir = self.getLastWorkDirRecord()
self.updateWorkDir(workDir=workDir)
def updateWorkDir(self, workDir: str):
self.updateWorkDirRecord(workDir)
self.workDir = workDir
self.treeModel.setRootPath(self.workDir)
self.treeView.setRootIndex(self.treeModel.index(self.workDir))
self.filtersList.clear()
if self.defualtSuffixList:
for s in self.defualtSuffixList:
self.filtersList.append("*.%s" % s)
else:
self.filtersList.append("*.%s" % self.defualtSuffix)
self.treeModel.setNameFilters(self.filtersList)
def _initUI(self):
self.mainLayout = QVBoxLayout(self)
self.mainLayout.setContentsMargins(QMargins(0, 0, 0, 0))
self.treeModel = QFileSystemModel(self)
self.treeModel.setReadOnly(False)
self.treeView = QTreeView(self)
self.treeView.setDragDropMode(QAbstractItemView.InternalMove)
self.treeView.setDragEnabled(True)
self.treeView.setAcceptDrops(True)
self.treeView.setDropIndicatorShown(True)
self.setMinimumHeight(100)
self.treeView.setModel(self.treeModel)
self.treeView.setSortingEnabled(True)
self.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.treeView.customContextMenuRequested.connect(self.showContextMenu)
self.treeView.mouseDoubleClickEvent = self.mouseDoubleClickEvent
self.treeView.setColumnHidden(1, True)
self.treeView.setColumnHidden(2, True)
self.treeView.setColumnHidden(3, True)
self.mainLayout.addWidget(self.treeView, 1)
self.setLayout(self.mainLayout)
def mouseDoubleClickEvent(self, event: QMouseEvent):
if event.button() == QtCore.Qt.LeftButton:
# 是否按下左键
index = self.treeView.indexAt(event.pos())
if index.isValid():
fileInfo = self.treeModel.fileInfo(index)
self.openFileSignal.emit(fileInfo)
else:
event.accept()
def showContextMenu(self, pos):
index = self.treeView.indexAt(pos)
if index.isValid():
menu = QMenu(self)
action = QAction(menu)
action.setObjectName("create_dir")
action.setProperty("pos", pos)
action.setText("创建目录")
action.triggered.connect(self.actionHandler)
menu.addAction(action)
action = QAction(menu)
action.setObjectName("delete_dir")
action.setProperty("pos", pos)
action.setText("删除目录")
action.triggered.connect(self.actionHandler)
menu.addAction(action)
action = QAction(menu)
action.setObjectName("create_file")
action.setProperty("pos", pos)
action.setText("创建文件")
action.triggered.connect(self.actionHandler)
menu.addAction(action)
action = QAction(menu)
action.setObjectName("delete_file")
action.setProperty("pos", pos)
action.setText("删除文件")
menu.addAction(action)
action.triggered.connect(self.actionHandler)
action = QAction(menu)
action.setObjectName("reveal_resources")
action.setProperty("pos", pos)
action.setText("打开资源")
menu.addAction(action)
action.triggered.connect(self.actionHandler)
menu.exec_(self.treeView.mapToGlobal(pos))
def handlerBeforeEvent(self, type: int, filePath: str) -> bool:
return True
def _handlerBeforeEvent(self, type: int, filePath: str) -> bool:
self.handlerBeforeSignal.emit(type, filePath)
return self.handlerBeforeEvent(type, filePath)
def _handlerAfterEvent(self, type: int, filePath: str):
self.handlerAfterSignal.emit(type, filePath)
self.handlerAfterEvent(type, filePath)
def handlerAfterEvent(self, type: int, filePath: str):
pass
def actionHandler(self):
if not self.checkWorkDir():
raise RuntimeError('This workd dir is not init')
type = self.sender().objectName()
fileInfo = self.treeModel.fileInfo(self.treeView.currentIndex())
# print(fileInfo.fileName())
# print(fileInfo.filePath())
# print(fileInfo.path())
# print(fileInfo.isRoot())
file = fileInfo.filePath()
if type == "create_dir":
text = MKDialog().inputDirNameDialog()
if text:
if self._handlerBeforeEvent(self.HANLDER_CREATE_DIR, os.path.join(self.getDir(fileInfo), text)):
self.fileUtils.createDir(self.getDir(fileInfo), text)
self._handlerAfterEvent(self.HANLDER_CREATE_DIR, os.path.join(self.getDir(fileInfo), text))
elif type == "delete_dir":
if self._handlerBeforeEvent(self.HANLDER_DELETE_DIR, fileInfo.filePath()):
self.fileUtils.deleteDir(fileInfo.filePath())
self._handlerAfterEvent(self.HANLDER_DELETE_DIR, fileInfo.filePath())
elif type == "create_file":
text = MKDialog().inputFileNameDialog()
temp = str(text).split(".")
if len(temp) < 2:
text = text + "." + self.defualtSuffix
else:
fileSuffix = temp[len(temp) - 1]
if self.defualtSuffixList:
if fileSuffix not in self.defualtSuffixList:
text = text + "." + self.defualtSuffix
elif self.defualtSuffix and self.defualtSuffix != fileSuffix:
text = text + "." + self.defualtSuffix
if text:
if self._handlerBeforeEvent(self.HANLDER_CREATE_FILE, os.path.join(self.getDir(fileInfo), text)):
self.fileUtils.createFile(self.getDir(fileInfo), text)
self._handlerAfterEvent(self.HANLDER_CREATE_FILE, os.path.join(self.getDir(fileInfo), text))
elif type == "delete_file":
if self._handlerBeforeEvent(self.HANLDER_DELETE_FILE, file):
self.fileUtils.deleteFile(file)
self._handlerAfterEvent(self.HANLDER_DELETE_FILE, file)
elif type == "reveal_resources":
url = QUrl("file:%s" % self.getDir(fileInfo), QUrl.TolerantMode)
QDesktopServices.openUrl(url)
def getDir(self, fileInfo: QFileInfo):
if fileInfo.isFile():
return fileInfo.path()
else:
return fileInfo.filePath()
def setSupportFileSuffix(self, suffix: list, defualtSuffix: str):
self.defualtSuffixList = suffix
self.defualtSuffix = defualtSuffix
def enterEvent(self, event: QtCore.QEvent):
self.isLive = True
def leaveEvent(self, event: QtCore.QEvent):
self.isLive = False | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/doc/common_doc.py | import importlib
import os.path as osp
import pkgutil
import re
from os import getenv, listdir
from types import ModuleType
from typing import Callable
from mathics import builtin, settings
from mathics.builtin.base import check_requires_list
from mathics.core.evaluation import Message, Print
from mathics.core.util import IS_PYPY
from mathics.doc.utils import slugify
from mathics.eval.pymathics import pymathics_builtins_by_module, pymathics_modules
# These are all the XML/HTML-like tags that documentation supports.
ALLOWED_TAGS = (
"dl",
"dd",
"dt",
"em",
"url",
"ul",
"i",
"ol",
"li",
"con",
"console",
"img",
"imgpng",
"ref",
"subsection",
)
ALLOWED_TAGS_RE = dict(
(allowed, re.compile("<(%s.*?)>" % allowed)) for allowed in ALLOWED_TAGS
)
# This string is used, so we can indicate a trailing blank at the end of a line by
# adding this string to the end of the line which gets stripped off.
# Some editors and formatters like to strip off trailing blanks at the ends of lines.
END_LINE_SENTINAL = "#<--#"
# The regular expressions below (strings ending with _RE
# pull out information from docstring or text in a file. Ghetto parsing.
CHAPTER_RE = re.compile('(?s)<chapter title="(.*?)">(.*?)</chapter>')
CONSOLE_RE = re.compile(r"(?s)<(?P<tag>con|console)>(?P<content>.*?)</(?P=tag)>")
DL_ITEM_RE = re.compile(
r"(?s)<(?P<tag>d[td])>(?P<content>.*?)(?:</(?P=tag)>|)\s*(?:(?=<d[td]>)|$)"
)
DL_RE = re.compile(r"(?s)<dl>(.*?)</dl>")
HYPERTEXT_RE = re.compile(
r"(?s)<(?P<tag>em|url)>(\s*:(?P<text>.*?):\s*)?(?P<content>.*?)</(?P=tag)>"
)
IMG_PNG_RE = re.compile(
r'<imgpng src="(?P<src>.*?)" title="(?P<title>.*?)" label="(?P<label>.*?)">'
)
IMG_RE = re.compile(
r'<img src="(?P<src>.*?)" title="(?P<title>.*?)" label="(?P<label>.*?)">'
)
# Preserve space before and after in-line code variables.
LATEX_RE = re.compile(r"(\s?)\$(\w+?)\$(\s?)")
LIST_ITEM_RE = re.compile(r"(?s)<li>(.*?)(?:</li>|(?=<li>)|$)")
LIST_RE = re.compile(r"(?s)<(?P<tag>ul|ol)>(?P<content>.*?)</(?P=tag)>")
MATHICS_RE = re.compile(r"(?<!\\)\'(.*?)(?<!\\)\'")
PYTHON_RE = re.compile(r"(?s)<python>(.*?)</python>")
QUOTATIONS_RE = re.compile(r"\"([\w\s,]*?)\"")
REF_RE = re.compile(r'<ref label="(?P<label>.*?)">')
SECTION_RE = re.compile('(?s)(.*?)<section title="(.*?)">(.*?)</section>')
SPECIAL_COMMANDS = {
"LaTeX": (r"<em>LaTeX</em>", r"\LaTeX{}"),
"Mathematica": (
r"<em>Mathematica</em>®",
r"\emph{Mathematica}\textregistered{}",
),
"Mathics": (r"<em>Mathics3</em>", r"\emph{Mathics3}"),
"Mathics3": (r"<em>Mathics3</em>", r"\emph{Mathics3}"),
"Sage": (r"<em>Sage</em>", r"\emph{Sage}"),
"Wolfram": (r"<em>Wolfram</em>", r"\emph{Wolfram}"),
"skip": (r"<br /><br />", r"\bigskip"),
}
SUBSECTION_END_RE = re.compile("</subsection>")
SUBSECTION_RE = re.compile('(?s)<subsection title="(.*?)">')
TESTCASE_RE = re.compile(
r"""(?mx)^ # re.MULTILINE (multi-line match) and re.VERBOSE (readable regular expressions
((?:.|\n)*?)
^\s+([>#SX])>[ ](.*) # test-code indicator
((?:\n\s*(?:[:|=.][ ]|\.).*)*) # test-code results"""
)
TESTCASE_OUT_RE = re.compile(r"^\s*([:|=])(.*)$")
# Used for getting test results by test expresson and chapter/section information.
test_result_map = {}
def _replace_all(text, pairs):
for (i, j) in pairs:
text = text.replace(i, j)
return text
def get_module_doc(module: ModuleType) -> tuple:
doc = module.__doc__
if doc is not None:
doc = doc.strip()
if doc:
title = doc.splitlines()[0]
text = "\n".join(doc.splitlines()[1:])
else:
title = module.__name__
for prefix in ("mathics.builtin.", "mathics.optional."):
if title.startswith(prefix):
title = title[len(prefix) :]
title = title.capitalize()
text = ""
return title, text
def get_results_by_test(test_expr: str, full_test_key: list, doc_data: dict) -> dict:
"""
Sometimes test numbering is off, either due to bugs or changes since the
data was read.
Here, we compensate for this by looking up the test by its chapter and section name
portion stored in `full_test_key` along with the and the test expresion data
stored in `test_expr`.
This new key is looked up in `test_result_map` its value is returned.
`doc_data` is only first time this is called to populate `test_result_map`.
"""
# Strip off the test index form new key with this and the test string.
# Add to any existing value for that "result". This is now what we want to
# use as a tee in test_result_map to look for.
test_section = list(full_test_key)[:-1]
search_key = tuple(test_section)
if not test_result_map:
# Populate test_result_map from doc_data
for key, result in doc_data.items():
test_section = list(key)[:-1]
new_test_key = tuple(test_section)
next_result = test_result_map.get(new_test_key, None)
if next_result:
next_result.append(result)
else:
next_result = [result]
test_result_map[new_test_key] = next_result
results = test_result_map.get(search_key, None)
result = {}
if results:
for result_candidate in results:
if result_candidate["query"] == test_expr:
if result:
# Already found something
print(f"Warning, multiple results appear under {search_key}.")
return {}
else:
result = result_candidate
return result
def get_submodule_names(object) -> list:
"""Many builtins are organized into modules which, from a documentation
standpoint, are like Mathematica Online Guide Docs.
"List Functions", "Colors", or "Distance and Similarity Measures"
are some examples Guide Documents group group various Bultin Functions,
under submodules relate to that general classification.
Here, we want to return a list of the Python modules under a "Guide Doc"
module.
As an example of a "Guide Doc" and its submodules, consider the
module named mathics.builtin.colors. It collects code and documentation pertaining
to the builtin functions that would be found in the Guide documenation for "Colors".
The `mathics.builtin.colors` module has a submodule
`mathics.builtin.colors.named_colors`.
The builtin functions defined in `named_colors` then are those found in the
"Named Colors" group of the "Colors" Guide Doc.
So in this example then, in the list the modules returned for
Python module `mathics.builtin.colors` would be the
`mathics.builtin.colors.named_colors` module which contains the
definition and docs for the "Named Colors" Mathics Bultin
Functions.
"""
modpkgs = []
if hasattr(object, "__path__"):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append(modname)
modpkgs.sort()
return modpkgs
def filter_comments(doc: str) -> str:
"""Remove docstring documentation comments. These are lines
that start with ##"""
return "\n".join(
line for line in doc.splitlines() if not line.lstrip().startswith("##")
)
def get_doc_name_from_module(module):
name = "???"
if module.__doc__:
lines = module.__doc__.strip()
if not lines:
name = module.__name__
else:
name = lines.split("\n")[0]
return name
POST_SUBSTITUTION_TAG = "_POST_SUBSTITUTION%d_"
def pre_sub(regexp, text: str, repl_func):
post_substitutions = []
def repl_pre(match):
repl = repl_func(match)
index = len(post_substitutions)
post_substitutions.append(repl)
return POST_SUBSTITUTION_TAG % index
text = regexp.sub(repl_pre, text)
return text, post_substitutions
def post_sub(text: str, post_substitutions) -> str:
for index, sub in enumerate(post_substitutions):
text = text.replace(POST_SUBSTITUTION_TAG % index, sub)
return text
def skip_doc(cls) -> bool:
"""Returns True if we should skip cls in docstring extraction."""
return cls.__name__.endswith("Box") or (hasattr(cls, "no_doc") and cls.no_doc)
class Tests:
# FIXME: add optional guide section
def __init__(self, part: str, chapter: str, section: str, doctests):
self.part, self.chapter = part, chapter
self.section, self.tests = section, doctests
def skip_module_doc(module, modules_seen) -> bool:
return (
module.__doc__ is None
or module in modules_seen
or module.__name__.split(".")[0] not in ("mathics", "pymathics")
or hasattr(module, "no_doc")
and module.no_doc
)
def sorted_chapters(chapters: list) -> list:
"""Return chapters sorted by title"""
return sorted(chapters, key=lambda chapter: chapter.title)
def gather_tests(
doc: str,
test_collection_constructor: Callable,
test_case_constructor: Callable,
text_constructor: Callable,
key_part=None,
) -> list:
"""
This parses string `doc` (using regular expresssions) into Python objects.
test_collection_fn() is the class construtorto call to create an object for the
test collection. Each test is created via test_case_fn().
Text within the test is stored via text_constructor.
"""
# Remove commented lines.
doc = filter_comments(doc).strip(r"\s")
# Remove leading <dl>...</dl>
# doc = DL_RE.sub("", doc)
# pre-substitute Python code because it might contain tests
doc, post_substitutions = pre_sub(
PYTHON_RE, doc, lambda m: "<python>%s</python>" % m.group(1)
)
# HACK: Artificially construct a last testcase to get the "intertext"
# after the last (real) testcase. Ignore the test, of course.
doc += "\n >> test\n = test"
testcases = TESTCASE_RE.findall(doc)
tests = None
items = []
for index in range(len(testcases)):
testcase = list(testcases[index])
text = testcase.pop(0).strip()
if text:
if tests is not None:
items.append(tests)
tests = None
text = post_sub(text, post_substitutions)
items.append(text_constructor(text))
tests = None
if index < len(testcases) - 1:
test = test_case_constructor(index, testcase, key_part)
if tests is None:
tests = test_collection_constructor()
tests.tests.append(test)
if tests is not None:
items.append(tests)
tests = None
return items
class Documentation:
def __init__(self, part, title: str, doc=None):
self.doc = doc
self.guide_sections = []
self.part = part
self.sections = []
self.sections_by_slug = {}
self.slug = slugify(title)
self.title = title
part.chapters_by_slug[self.slug] = self
def add_section(
self,
chapter,
section_name: str,
section_object,
operator,
is_guide: bool = False,
in_guide: bool = False,
):
"""
Adds a DocSection or DocGuideSection
object to the chapter, a DocChapter object.
"section_object" is either a Python module or a Class object instance.
"""
installed = check_requires_list(getattr(section_object, "requires", []))
# FIXME add an additional mechanism in the module
# to allow a docstring and indicate it is not to go in the
# user manual
if not section_object.__doc__:
return
if is_guide:
section = self.doc_guide_section_fn(
chapter,
section_name,
section_object.__doc__,
section_object,
installed=installed,
)
chapter.guide_sections.append(section)
else:
section = self.doc_section_fn(
chapter,
section_name,
section_object.__doc__,
operator=operator,
installed=installed,
in_guide=in_guide,
)
chapter.sections.append(section)
return section
def add_subsection(
self,
chapter,
section,
subsection_name: str,
instance,
operator=None,
in_guide=False,
):
installed = check_requires_list(getattr(instance, "requires", []))
# FIXME add an additional mechanism in the module
# to allow a docstring and indicate it is not to go in the
# user manual
"""
Append a subsection for ``instance`` into ``section.subsections``
"""
installed = True
for package in getattr(instance, "requires", []):
try:
importlib.import_module(package)
except ImportError:
installed = False
break
# FIXME add an additional mechanism in the module
# to allow a docstring and indicate it is not to go in the
# user manual
if not instance.__doc__:
return
summary_text = (
instance.summary_text if hasattr(instance, "summary_text") else ""
)
subsection = self.doc_subsection_fn(
chapter,
section,
subsection_name,
instance.__doc__,
operator=operator,
installed=installed,
in_guide=in_guide,
summary_text=summary_text,
)
section.subsections.append(subsection)
def doc_part(self, title, modules, builtins_by_module, start):
"""
Produce documentation for a "Part" - reference section or
possibly Pymathics modules
"""
builtin_part = self.doc_part_fn(self, title, is_reference=start)
modules_seen = set([])
want_sorting = True
if want_sorting:
module_collection_fn = lambda x: sorted(
modules,
key=lambda module: module.sort_order
if hasattr(module, "sort_order")
else module.__name__,
)
else:
module_collection_fn = lambda x: x
for module in module_collection_fn(modules):
if skip_module_doc(module, modules_seen):
continue
title, text = get_module_doc(module)
chapter = self.doc_chapter_fn(
builtin_part, title, self.doc_fn(text, title, None)
)
builtins = builtins_by_module[module.__name__]
sections = [
builtin for builtin in builtins if not skip_doc(builtin.__class__)
]
if module.__file__.endswith("__init__.py"):
# We have a Guide Section.
name = get_doc_name_from_module(module)
guide_section = self.add_section(
chapter, name, module, operator=None, is_guide=True
)
submodules = [
value
for value in module.__dict__.values()
if isinstance(value, ModuleType)
]
sorted_submodule = lambda x: sorted(
submodules,
key=lambda submodule: submodule.sort_order
if hasattr(submodule, "sort_order")
else submodule.__name__,
)
# Add sections in the guide section...
for submodule in sorted_submodule(submodules):
if skip_module_doc(submodule, modules_seen):
continue
elif IS_PYPY and submodule.__name__ == "builtins":
# PyPy seems to add this module on its own,
# but it is not something that can be importable
continue
submodule_name = get_doc_name_from_module(submodule)
section = self.add_section(
chapter,
submodule_name,
submodule,
operator=None,
is_guide=False,
in_guide=True,
)
modules_seen.add(submodule)
guide_section.subsections.append(section)
builtins = builtins_by_module.get(submodule.__name__, [])
subsections = [builtin for builtin in builtins]
for instance in subsections:
if hasattr(instance, "no_doc") and instance.no_doc:
continue
modules_seen.add(instance)
name = instance.get_name(short=True)
self.add_subsection(
chapter,
section,
instance.get_name(short=True),
instance,
instance.get_operator(),
in_guide=True,
)
else:
self.doc_sections(sections, modules_seen, chapter)
builtin_part.chapters.append(chapter)
self.parts.append(builtin_part)
def doc_sections(self, sections, modules_seen, chapter):
for instance in sections:
if instance not in modules_seen and (
not hasattr(instance, "no_doc") or not instance.no_doc
):
name = instance.get_name(short=True)
self.add_section(
chapter,
name,
instance,
instance.get_operator(),
is_guide=False,
in_guide=False,
)
modules_seen.add(instance)
def gather_doctest_data(self):
"""
Extract doctest data from various static XML-like doc files, Mathics3 Built-in functions
(inside mathics.builtin), and external Mathics3 Modules.
The extracted structure is stored in ``self``.
"""
# First gather data from static XML-like files. This constitutes "Part 1" of the
# documentation.
files = listdir(self.doc_dir)
files.sort()
appendix = []
for file in files:
part_title = file[2:]
if part_title.endswith(".mdoc"):
part_title = part_title[: -len(".mdoc")]
part = self.doc_part_fn(self, part_title)
text = open(osp.join(self.doc_dir, file), "rb").read().decode("utf8")
text = filter_comments(text)
chapters = CHAPTER_RE.findall(text)
for title, text in chapters:
chapter = self.doc_chapter_fn(part, title)
text += '<section title=""></section>'
sections = SECTION_RE.findall(text)
for pre_text, title, text in sections:
if title:
section = self.doc_section_fn(
chapter, title, text, operator=None, installed=True
)
chapter.sections.append(section)
subsections = SUBSECTION_RE.findall(text)
for subsection_title in subsections:
subsection = self.doc_subsection_fn(
chapter,
section,
subsection_title,
text,
)
section.subsections.append(subsection)
pass
pass
else:
section = None
if not chapter.doc:
chapter.doc = self.doc_fn(pre_text, title, section)
pass
part.chapters.append(chapter)
if file[0].isdigit():
self.parts.append(part)
else:
part.is_appendix = True
appendix.append(part)
# Next extract data that has been loaded into Mathics3 when it runs.
# This is information from `mathics.builtin`.
# This is Part 2 of the documentation.
for title, modules, builtins_by_module, start in [
(
"Reference of Built-in Symbols",
builtin.modules,
builtin.builtins_by_module,
True,
)
]:
self.doc_part(title, modules, builtins_by_module, start)
# Now extract external Mathics3 Modules that have been loaded via
# LoadModule, or eval_LoadModule.
# This is Part 3 of the documentation.
for title, modules, builtins_by_module, start in [
(
"Mathics3 Modules",
pymathics_modules,
pymathics_builtins_by_module,
True,
)
]:
self.doc_part(title, modules, builtins_by_module, start)
# Now extract Appendix information. This include License text
# This is the final Part of the documentation.
for part in appendix:
self.parts.append(part)
# Via the wanderings above, collect all tests that have been
# seen.
#
# Each test is accessble by its part + chapter + section and test number
# in that section.
for tests in self.get_tests():
for test in tests.tests:
test.key = (tests.part, tests.chapter, tests.section, test.index)
return
def get_part(self, part_slug):
return self.parts_by_slug.get(part_slug)
def get_chapter(self, part_slug, chapter_slug):
part = self.parts_by_slug.get(part_slug)
if part:
return part.chapters_by_slug.get(chapter_slug)
return None
def get_section(self, part_slug, chapter_slug, section_slug):
part = self.parts_by_slug.get(part_slug)
if part:
chapter = part.chapters_by_slug.get(chapter_slug)
if chapter:
return chapter.sections_by_slug.get(section_slug)
return None
def get_subsection(self, part_slug, chapter_slug, section_slug, subsection_slug):
part = self.parts_by_slug.get(part_slug)
if part:
chapter = part.chapters_by_slug.get(chapter_slug)
if chapter:
section = chapter.sections_by_slug.get(section_slug)
if section:
return section.subsections_by_slug.get(subsection_slug)
return None
def get_tests(self, want_sorting=False):
for part in self.parts:
if want_sorting:
chapter_collection_fn = lambda x: sorted_chapters(x)
else:
chapter_collection_fn = lambda x: x
for chapter in chapter_collection_fn(part.chapters):
tests = chapter.doc.get_tests()
if tests:
yield Tests(part.title, chapter.title, "", tests)
for section in chapter.all_sections:
if section.installed:
if isinstance(section, DocGuideSection):
for docsection in section.subsections:
for docsubsection in docsection.subsections:
# FIXME: Something is weird here where tests for subsection items
# appear not as a collection but individually and need to be
# iterated below. Probably some other code is faulty and
# when fixed the below loop and collection into doctest_list[]
# will be removed.
if not docsubsection.installed:
continue
doctest_list = []
index = 1
for doctests in docsubsection.items:
doctest_list += list(doctests.get_tests())
for test in doctest_list:
test.index = index
index += 1
if doctest_list:
yield Tests(
section.chapter.part.title,
section.chapter.title,
docsubsection.title,
doctest_list,
)
else:
tests = section.doc.get_tests()
if tests:
yield Tests(
part.title, chapter.title, section.title, tests
)
pass
pass
pass
pass
pass
pass
return
class DocChapter:
def __init__(self, part, title, doc=None):
self.doc = doc
self.guide_sections = []
self.part = part
self.title = title
self.slug = slugify(title)
self.sections = []
self.sections_by_slug = {}
part.chapters_by_slug[self.slug] = self
def __str__(self):
sections = "\n".join(str(section) for section in self.sections)
return f"= {self.title} =\n\n{sections}"
@property
def all_sections(self):
return sorted(self.sections + self.guide_sections)
class DocSection:
def __init__(
self,
chapter,
title: str,
text: str,
operator,
installed=True,
in_guide=False,
summary_text="",
):
self.chapter = chapter
self.in_guide = in_guide
self.installed = installed
self.items = [] # tests in section when this is under a guide section
self.operator = operator
self.slug = slugify(title)
self.subsections = []
self.subsections_by_slug = {}
self.summary_text = summary_text
self.title = title
if text.count("<dl>") != text.count("</dl>"):
raise ValueError(
"Missing opening or closing <dl> tag in "
"{} documentation".format(title)
)
# Needs to come after self.chapter is initialized since
# XMLDoc uses self.chapter.
self.doc = XMLDoc(text, title, self)
chapter.sections_by_slug[self.slug] = self
# Add __eq__ and __lt__ so we can sort Sections.
def __eq__(self, other):
return self.title == other.title
def __lt__(self, other):
return self.title < other.title
def __str__(self):
return f"== {self.title} ==\n{self.doc}"
class DocGuideSection(DocSection):
"""An object for a Documented Guide Section.
A Guide Section is part of a Chapter. "Colors" or "Special Functions"
are examples of Guide Sections, and each contains a number of Sections.
like NamedColors or Orthogonal Polynomials.
"""
def __init__(
self, chapter: str, title: str, text: str, submodule, installed: bool = True
):
self.chapter = chapter
self.doc = XMLDoc(text, title, None)
self.in_guide = False
self.installed = installed
self.section = submodule
self.slug = slugify(title)
self.subsections = []
self.subsections_by_slug = {}
self.title = title
# FIXME: Sections never are operators. Subsections can have
# operators though. Fix up the view and searching code not to
# look for the operator field of a section.
self.operator = False
if text.count("<dl>") != text.count("</dl>"):
raise ValueError(
"Missing opening or closing <dl> tag in "
"{} documentation".format(title)
)
# print("YYY Adding section", title)
chapter.sections_by_slug[self.slug] = self
def get_tests(self):
# FIXME: The below is a little weird for Guide Sections.
# Figure out how to make this clearer.
# A guide section's subsection are Sections without the Guide.
# it is *their* subsections where we generally find tests.
for section in self.subsections:
if not section.installed:
continue
for subsection in section.subsections:
# FIXME we are omitting the section title here...
if not subsection.installed:
continue
for doctests in subsection.items:
yield doctests.get_tests()
class DocSubsection:
"""An object for a Documented Subsection.
A Subsection is part of a Section.
"""
def __init__(
self,
chapter,
section,
title,
text,
operator=None,
installed=True,
in_guide=False,
summary_text="",
):
"""
Information that goes into a subsection object. This can be a written text, or
text extracted from the docstring of a builtin module or class.
About some of the parameters...
Some subsections are contained in a grouping module and need special work to
get the grouping module name correct.
For example the Chapter "Colors" is a module so the docstring text for it is in
mathics/builtin/colors/__init__.py . In mathics/builtin/colors/named-colors.py we have
the "section" name for the class Read (the subsection) inside it.
"""
title_summary_text = re.split(" -- ", title)
n = len(title_summary_text)
self.title = title_summary_text[0] if n > 0 else ""
self.summary_text = title_summary_text[1] if n > 1 else summary_text
self.doc = XMLDoc(text, title, section)
self.chapter = chapter
self.in_guide = in_guide
self.installed = installed
self.operator = operator
self.section = section
self.slug = slugify(title)
self.subsections = []
self.title = title
if section:
chapter = section.chapter
part = chapter.part
# Note: we elide section.title
key_prefix = (part.title, chapter.title, title)
else:
key_prefix = None
if in_guide:
# Tests haven't been picked out yet from the doc string yet.
# Gather them here.
self.items = gather_tests(text, DocTests, DocTest, DocText, key_prefix)
else:
self.items = []
if text.count("<dl>") != text.count("</dl>"):
raise ValueError(
"Missing opening or closing <dl> tag in "
"{} documentation".format(title)
)
self.section.subsections_by_slug[self.slug] = self
def __str__(self):
return f"=== {self.title} ===\n{self.doc}"
class DocTest:
"""
DocTest formatting rules:
* `>>` Marks test case; it will also appear as part of
the documentation.
* `#>` Marks test private or one that does not appear as part of
the documentation.
* `X>` Shows the example in the docs, but disables testing the example.
* `S>` Shows the example in the docs, but disables testing if environment
variable SANDBOX is set.
* `=` Compares the result text.
* `:` Compares an (error) message.
`|` Prints output.
"""
def __init__(self, index, testcase, key_prefix=None):
def strip_sentinal(line):
"""Remove END_LINE_SENTINAL from the end of a line if it appears.
Some editors like to strip blanks at the end of a line.
Since the line ends in END_LINE_SENTINAL which isn't blank,
any blanks that appear before will be preserved.
Some tests require some lines to be blank or entry because
Mathics3 output can be that way
"""
if line.endswith(END_LINE_SENTINAL):
line = line[: -len(END_LINE_SENTINAL)]
# Also remove any remaining trailing blanks since that
# seems *also* what we want to do.
return line.strip()
self.index = index
self.result = None
self.outs = []
# Private test cases are executed, but NOT shown as part of the docs
self.private = testcase[0] == "#"
# Ignored test cases are NOT executed, but shown as part of the docs
# Sandboxed test cases are NOT executed if environment SANDBOX is set
if testcase[0] == "X" or (testcase[0] == "S" and getenv("SANDBOX", False)):
self.ignore = True
# substitute '>' again so we get the correct formatting
testcase[0] = ">"
else:
self.ignore = False
self.test = strip_sentinal(testcase[1])
self.key = None
if key_prefix:
self.key = tuple(key_prefix + (index,))
outs = testcase[2].splitlines()
for line in outs:
line = strip_sentinal(line)
if line:
if line.startswith("."):
text = line[1:]
if text.startswith(" "):
text = text[1:]
text = "\n" + text
if self.result is not None:
self.result += text
elif self.outs:
self.outs[-1].text += text
continue
match = TESTCASE_OUT_RE.match(line)
if not match:
continue
symbol, text = match.group(1), match.group(2)
text = text.strip()
if symbol == "=":
self.result = text
elif symbol == ":":
out = Message("", "", text)
self.outs.append(out)
elif symbol == "|":
out = Print(text)
self.outs.append(out)
def __str__(self):
return self.test
# FIXME: think about - do we need this? Or can we use DjangoMathicsDocumentation and
# LatTeXMathicsDocumentation only?
class MathicsMainDocumentation(Documentation):
"""
This module is used for creating test data and saving it to a Python Pickle file
and running tests that appear in the documentation (doctests).
There are other classes DjangoMathicsDocumentation and LaTeXMathicsDocumentation
that format the data accumulated here. In fact I think those can sort of serve
instead of this.
"""
def __init__(self, want_sorting=False):
self.doc_chapter_fn = DocChapter
self.doc_dir = settings.DOC_DIR
self.doc_fn = XMLDoc
self.doc_guide_section_fn = DocGuideSection
self.doc_part_fn = DocPart
self.doc_section_fn = DocSection
self.doc_subsection_fn = DocSubsection
self.doctest_latex_pcl_path = settings.DOCTEST_LATEX_DATA_PCL
self.parts = []
self.parts_by_slug = {}
self.pymathics_doc_loaded = False
self.doc_data_file = settings.get_doctest_latex_data_path(
should_be_readable=True
)
self.title = "Overview"
class XMLDoc:
"""A class to hold our internal XML-like format data.
Specialized classes like LaTeXDoc or and DjangoDoc provide methods for
getting formatted output. For LaTeXDoc ``latex()`` is added while for
DjangoDoc ``html()`` is added
Mathics core also uses this in getting usage strings (`??`).
"""
def __init__(self, doc, title, section=None):
self.title = title
if section:
chapter = section.chapter
part = chapter.part
# Note: we elide section.title
key_prefix = (part.title, chapter.title, title)
else:
key_prefix = None
self.rawdoc = doc
self.items = gather_tests(self.rawdoc, DocTests, DocTest, DocText, key_prefix)
def __str__(self):
return "\n".join(str(item) for item in self.items)
def text(self, detail_level):
# used for introspection
# TODO parse XML and pretty print
# HACK
item = str(self.items[0])
item = "\n".join(line.strip() for line in item.split("\n"))
item = item.replace("<dl>", "")
item = item.replace("</dl>", "")
item = item.replace("<dt>", " ")
item = item.replace("</dt>", "")
item = item.replace("<dd>", " ")
item = item.replace("</dd>", "")
item = "\n".join(line for line in item.split("\n") if not line.isspace())
return item
def get_tests(self):
tests = []
for item in self.items:
tests.extend(item.get_tests())
return tests
class DocPart:
def __init__(self, doc, title, is_reference=False):
self.doc = doc
self.title = title
self.slug = slugify(title)
self.chapters = []
self.chapters_by_slug = {}
self.is_reference = is_reference
self.is_appendix = False
doc.parts_by_slug[self.slug] = self
def __str__(self):
return "%s\n\n%s" % (
self.title,
"\n".join(str(chapter) for chapter in sorted_chapters(self.chapters)),
)
class DocText:
def __init__(self, text):
self.text = text
def __str__(self):
return self.text
def get_tests(self):
return []
def is_private(self):
return False
def test_indices(self):
return []
class DocTests:
def __init__(self):
self.tests = []
self.text = ""
def get_tests(self):
return self.tests
def is_private(self):
return all(test.private for test in self.tests)
def __str__(self):
return "\n".join(str(test) for test in self.tests)
def test_indices(self):
return [test.index for test in self.tests] | PypiClean |
/BlueWhale3-Recommendation-0.1.3.tar.gz/BlueWhale3-Recommendation-0.1.3/doc/source/widgets/baselines.rst | Baselines
=========
.. figure:: ../resources/icons/user-item-baseline.svg
:width: 64pt
This widget includes four basic baseline models: Global average, User average,
Item average and User-Item baseline.
Signals
-------
**Inputs**:
- **Data**
Data set.
- **Preprocessor**
Preprocessed data.
**Outputs**:
- **Learner**
The selected learner in the widget.
- **Predictor**
Trained recommender. Signal *Predictor* sends the output signal only if
input *Data* is present.
Description
-----------
- **Global average:**
Computes the average of all ratings and use it to make predictions.
- **User average:**
Takes the average rating value of a user to make predictions.
- **Item average:**
Takes the average rating value of an item to make predictions.
- **User-Item baseline:**
Takes the bias of users and items plus the global average to make predictions.
Example
-------
Below is a simple workflow showing how to use both the *Predictor* and
the *Learner* output. For the *Predictor* we input the prediction model
into `Predictions <http://docs.orange.biolab.si/3/visual-programming/widgets/evaluation/predictions.html>`_
widget and view the results in `Data Table <http://docs.orange.biolab.si/3/visual-programming/widgets/data/datatable.html>`_.
For *Learner* we can compare different learners in `Test&Score <http://docs.orange.biolab.si/3/visual-programming/widgets/evaluation/testlearners.html>`_ widget.
.. figure:: ../resources/images/example_baselines.png
| PypiClean |
/IPFX-1.0.8.tar.gz/IPFX-1.0.8/ipfx/attach_metadata/sink/dandi_yaml_sink.py | from typing import (
List, Dict, Any, Set, Optional
)
import yaml
from ipfx.attach_metadata.sink.metadata_sink import (
MetadataSink, OneOrMany
)
class DandiYamlSink(MetadataSink):
""" Sink specialized for writing data to a DANDI-compatible YAML file.
"""
@property
def targets(self) -> List[Dict[str, Any]]:
return self._targets
@property
def supported_cell_fields(self) -> Set[str]:
return {
"species",
"age",
"sex",
"gender",
"date_of_birth",
"genotype",
"cre_line"
}
@property
def supported_sweep_fields(self) -> Set[str]:
return set()
def __init__(self):
self._targets: List[Dict] = []
self._data = {}
def serialize(self, targets: Optional[OneOrMany[Dict[str, Any]]] = None):
""" Writes this sink's data to an external target or targets. Does not
modify this sink.
Parameters
----------
targets : If provided, these targets will be written to. Otherwise,
write to targets previously defined by register_target.
"""
for target in self._ensure_plural_targets(targets):
with open(target["output_path"], "w") as file_:
yaml.dump(self._data, stream=file_)
def register(
self,
name: str,
value: Any,
sweep_id: Optional[int] = None
):
""" Attaches a named piece of metadata to this sink's internal store.
Should dispatch to a protected method which carries out appropriate
validations and transformations.
Parameters
----------
name : the well-known name of the metadata
value : the value of the metadata (before any required transformations)
sweep_id : If provided, this will be interpreted as sweep-level
metadata and sweep_id will be used to identify the sweep to which
value ought to be attached. If None, this will be interpreted as
cell-level metadata
Raises
------
ValueError : An argued piece of metadata is not supported by this sink
"""
if name in self.supported_cell_fields:
# this format is just a straightforward mapping
self._data[name] = value
else:
raise ValueError(
f"don't know how to attach metadata field: {name}\n"
) | PypiClean |
/Alpha-Mind-0.3.1.tar.gz/Alpha-Mind-0.3.1/alphamind/analysis/quantileanalysis.py | from typing import Optional
import numpy as np
import pandas as pd
from alphamind.data.processing import factor_processing
from alphamind.data.quantile import quantile
from alphamind.data.standardize import standardize
from alphamind.data.winsorize import winsorize_normal
from alphamind.utilities import agg_mean
def quantile_analysis(factors: pd.DataFrame,
factor_weights: np.ndarray,
dx_return: np.ndarray,
n_bins: int = 5,
risk_exp: Optional[np.ndarray] = None,
**kwargs):
if 'pre_process' in kwargs:
pre_process = kwargs['pre_process']
del kwargs['pre_process']
else:
pre_process = [winsorize_normal, standardize]
if 'post_process' in kwargs:
post_process = kwargs['post_process']
del kwargs['post_process']
else:
post_process = [standardize]
er = factor_processing(factors.values, pre_process, risk_exp, post_process) @ factor_weights
return er_quantile_analysis(er, n_bins, dx_return, **kwargs)
def er_quantile_analysis(er: np.ndarray,
n_bins: int,
dx_return: np.ndarray,
de_trend=False) -> np.ndarray:
er = er.flatten()
q_groups = quantile(er, n_bins)
if dx_return.ndim < 2:
dx_return.shape = -1, 1
group_return = agg_mean(q_groups, dx_return).flatten()
total_return = group_return.sum()
ret = group_return.copy()
if de_trend:
resid = n_bins - 1
res_weight = 1. / resid
for i, value in enumerate(ret):
ret[i] = (1. + res_weight) * value - res_weight * total_return
return ret
if __name__ == '__main__':
n = 5000
n_f = 5
n_bins = 5
x = np.random.randn(n, 5)
risk_exp = np.random.randn(n, 3)
x_w = np.random.randn(n_f)
r = np.random.randn(n)
f_df = pd.DataFrame(x)
calculated = quantile_analysis(f_df,
x_w,
r,
risk_exp=None,
n_bins=n_bins,
pre_process=[], # [winsorize_normal, standardize],
post_process=[]) # [standardize])
er = x_w @ f_df.values.T
expected = er_quantile_analysis(er, n_bins, r)
print(calculated)
print(expected) | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/search/non-minified-js/porter-stemmer.js | var JSX = {};
(function (JSX) {
/**
* extends the class
*/
function $__jsx_extend(derivations, base) {
var ctor = function () {};
ctor.prototype = base.prototype;
var proto = new ctor();
for (var i in derivations) {
derivations[i].prototype = proto;
}
}
/**
* copies the implementations from source interface to target
*/
function $__jsx_merge_interface(target, source) {
for (var k in source.prototype)
if (source.prototype.hasOwnProperty(k))
target.prototype[k] = source.prototype[k];
}
/**
* defers the initialization of the property
*/
function $__jsx_lazy_init(obj, prop, func) {
function reset(obj, prop, value) {
delete obj[prop];
obj[prop] = value;
return value;
}
Object.defineProperty(obj, prop, {
get: function () {
return reset(obj, prop, func());
},
set: function (v) {
reset(obj, prop, v);
},
enumerable: true,
configurable: true
});
}
var $__jsx_imul = Math.imul;
if (typeof $__jsx_imul === "undefined") {
$__jsx_imul = function (a, b) {
var ah = (a >>> 16) & 0xffff;
var al = a & 0xffff;
var bh = (b >>> 16) & 0xffff;
var bl = b & 0xffff;
return ((al * bl) + (((ah * bl + al * bh) << 16) >>> 0)|0);
};
}
/**
* fused int-ops with side-effects
*/
function $__jsx_ipadd(o, p, r) {
return o[p] = (o[p] + r) | 0;
}
function $__jsx_ipsub(o, p, r) {
return o[p] = (o[p] - r) | 0;
}
function $__jsx_ipmul(o, p, r) {
return o[p] = $__jsx_imul(o[p], r);
}
function $__jsx_ipdiv(o, p, r) {
return o[p] = (o[p] / r) | 0;
}
function $__jsx_ipmod(o, p, r) {
return o[p] = (o[p] % r) | 0;
}
function $__jsx_ippostinc(o, p) {
var v = o[p];
o[p] = (v + 1) | 0;
return v;
}
function $__jsx_ippostdec(o, p) {
var v = o[p];
o[p] = (v - 1) | 0;
return v;
}
/**
* non-inlined version of Array#each
*/
function $__jsx_forEach(o, f) {
var l = o.length;
for (var i = 0; i < l; ++i)
f(o[i]);
}
/*
* global functions, renamed to avoid conflict with local variable names
*/
var $__jsx_parseInt = parseInt;
var $__jsx_parseFloat = parseFloat;
function $__jsx_isNaN(n) { return n !== n; }
var $__jsx_isFinite = isFinite;
var $__jsx_encodeURIComponent = encodeURIComponent;
var $__jsx_decodeURIComponent = decodeURIComponent;
var $__jsx_encodeURI = encodeURI;
var $__jsx_decodeURI = decodeURI;
var $__jsx_ObjectToString = Object.prototype.toString;
var $__jsx_ObjectHasOwnProperty = Object.prototype.hasOwnProperty;
/*
* profiler object, initialized afterwards
*/
function $__jsx_profiler() {
}
/*
* public interface to JSX code
*/
JSX.require = function (path) {
var m = $__jsx_classMap[path];
return m !== undefined ? m : null;
};
JSX.profilerIsRunning = function () {
return $__jsx_profiler.getResults != null;
};
JSX.getProfileResults = function () {
return ($__jsx_profiler.getResults || function () { return {}; })();
};
JSX.postProfileResults = function (url, cb) {
if ($__jsx_profiler.postResults == null)
throw new Error("profiler has not been turned on");
return $__jsx_profiler.postResults(url, cb);
};
JSX.resetProfileResults = function () {
if ($__jsx_profiler.resetResults == null)
throw new Error("profiler has not been turned on");
return $__jsx_profiler.resetResults();
};
JSX.DEBUG = false;
var GeneratorFunction$0 =
(function () {
try {
return Function('import {GeneratorFunction} from "std:iteration"; return GeneratorFunction')();
} catch (e) {
return function GeneratorFunction () {};
}
})();
var __jsx_generator_object$0 =
(function () {
function __jsx_generator_object() {
this.__next = 0;
this.__loop = null;
this.__seed = null;
this.__value = undefined;
this.__status = 0; // SUSPENDED: 0, ACTIVE: 1, DEAD: 2
}
__jsx_generator_object.prototype.next = function (seed) {
switch (this.__status) {
case 0:
this.__status = 1;
this.__seed = seed;
// go next!
this.__loop(this.__next);
var done = false;
if (this.__next != -1) {
this.__status = 0;
} else {
this.__status = 2;
done = true;
}
return { value: this.__value, done: done };
case 1:
throw new Error("Generator is already running");
case 2:
throw new Error("Generator is already finished");
default:
throw new Error("Unexpected generator internal state");
}
};
return __jsx_generator_object;
}());
function Among(s, substring_i, result) {
this.s_size = s.length;
this.s = s;
this.substring_i = substring_i;
this.result = result;
this.method = null;
this.instance = null;
};
function Among$0(s, substring_i, result, method, instance) {
this.s_size = s.length;
this.s = s;
this.substring_i = substring_i;
this.result = result;
this.method = method;
this.instance = instance;
};
$__jsx_extend([Among, Among$0], Object);
function Stemmer() {
};
$__jsx_extend([Stemmer], Object);
function BaseStemmer() {
var current$0;
var cursor$0;
var limit$0;
this.cache = ({ });
current$0 = this.current = "";
cursor$0 = this.cursor = 0;
limit$0 = this.limit = current$0.length;
this.limit_backward = 0;
this.bra = cursor$0;
this.ket = limit$0;
};
$__jsx_extend([BaseStemmer], Stemmer);
BaseStemmer.prototype.setCurrent$S = function (value) {
var current$0;
var cursor$0;
var limit$0;
current$0 = this.current = value;
cursor$0 = this.cursor = 0;
limit$0 = this.limit = current$0.length;
this.limit_backward = 0;
this.bra = cursor$0;
this.ket = limit$0;
};
function BaseStemmer$setCurrent$LBaseStemmer$S($this, value) {
var current$0;
var cursor$0;
var limit$0;
current$0 = $this.current = value;
cursor$0 = $this.cursor = 0;
limit$0 = $this.limit = current$0.length;
$this.limit_backward = 0;
$this.bra = cursor$0;
$this.ket = limit$0;
};
BaseStemmer.setCurrent$LBaseStemmer$S = BaseStemmer$setCurrent$LBaseStemmer$S;
BaseStemmer.prototype.getCurrent$ = function () {
return this.current;
};
function BaseStemmer$getCurrent$LBaseStemmer$($this) {
return $this.current;
};
BaseStemmer.getCurrent$LBaseStemmer$ = BaseStemmer$getCurrent$LBaseStemmer$;
BaseStemmer.prototype.copy_from$LBaseStemmer$ = function (other) {
this.current = other.current;
this.cursor = other.cursor;
this.limit = other.limit;
this.limit_backward = other.limit_backward;
this.bra = other.bra;
this.ket = other.ket;
};
function BaseStemmer$copy_from$LBaseStemmer$LBaseStemmer$($this, other) {
$this.current = other.current;
$this.cursor = other.cursor;
$this.limit = other.limit;
$this.limit_backward = other.limit_backward;
$this.bra = other.bra;
$this.ket = other.ket;
};
BaseStemmer.copy_from$LBaseStemmer$LBaseStemmer$ = BaseStemmer$copy_from$LBaseStemmer$LBaseStemmer$;
BaseStemmer.prototype.in_grouping$AIII = function (s, min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor >= this.limit) {
return false;
}
ch = this.current.charCodeAt(this.cursor);
if (ch > max || ch < min) {
return false;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$in_grouping$LBaseStemmer$AIII($this, s, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor >= $this.limit) {
return false;
}
ch = $this.current.charCodeAt($this.cursor);
if (ch > max || ch < min) {
return false;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.in_grouping$LBaseStemmer$AIII = BaseStemmer$in_grouping$LBaseStemmer$AIII;
BaseStemmer.prototype.in_grouping_b$AIII = function (s, min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor <= this.limit_backward) {
return false;
}
ch = this.current.charCodeAt(this.cursor - 1);
if (ch > max || ch < min) {
return false;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$in_grouping_b$LBaseStemmer$AIII($this, s, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor <= $this.limit_backward) {
return false;
}
ch = $this.current.charCodeAt($this.cursor - 1);
if (ch > max || ch < min) {
return false;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.in_grouping_b$LBaseStemmer$AIII = BaseStemmer$in_grouping_b$LBaseStemmer$AIII;
BaseStemmer.prototype.out_grouping$AIII = function (s, min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor >= this.limit) {
return false;
}
ch = this.current.charCodeAt(this.cursor);
if (ch > max || ch < min) {
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
}
ch -= min;
if ((s[ch >>> 3] & 0X1 << (ch & 0x7)) === 0) {
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
}
return false;
};
function BaseStemmer$out_grouping$LBaseStemmer$AIII($this, s, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor >= $this.limit) {
return false;
}
ch = $this.current.charCodeAt($this.cursor);
if (ch > max || ch < min) {
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
}
ch -= min;
if ((s[ch >>> 3] & 0X1 << (ch & 0x7)) === 0) {
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
}
return false;
};
BaseStemmer.out_grouping$LBaseStemmer$AIII = BaseStemmer$out_grouping$LBaseStemmer$AIII;
BaseStemmer.prototype.out_grouping_b$AIII = function (s, min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor <= this.limit_backward) {
return false;
}
ch = this.current.charCodeAt(this.cursor - 1);
if (ch > max || ch < min) {
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
}
return false;
};
function BaseStemmer$out_grouping_b$LBaseStemmer$AIII($this, s, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor <= $this.limit_backward) {
return false;
}
ch = $this.current.charCodeAt($this.cursor - 1);
if (ch > max || ch < min) {
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
}
return false;
};
BaseStemmer.out_grouping_b$LBaseStemmer$AIII = BaseStemmer$out_grouping_b$LBaseStemmer$AIII;
BaseStemmer.prototype.in_range$II = function (min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor >= this.limit) {
return false;
}
ch = this.current.charCodeAt(this.cursor);
if (ch > max || ch < min) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$in_range$LBaseStemmer$II($this, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor >= $this.limit) {
return false;
}
ch = $this.current.charCodeAt($this.cursor);
if (ch > max || ch < min) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.in_range$LBaseStemmer$II = BaseStemmer$in_range$LBaseStemmer$II;
BaseStemmer.prototype.in_range_b$II = function (min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor <= this.limit_backward) {
return false;
}
ch = this.current.charCodeAt(this.cursor - 1);
if (ch > max || ch < min) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$in_range_b$LBaseStemmer$II($this, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor <= $this.limit_backward) {
return false;
}
ch = $this.current.charCodeAt($this.cursor - 1);
if (ch > max || ch < min) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.in_range_b$LBaseStemmer$II = BaseStemmer$in_range_b$LBaseStemmer$II;
BaseStemmer.prototype.out_range$II = function (min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor >= this.limit) {
return false;
}
ch = this.current.charCodeAt(this.cursor);
if (! (ch > max || ch < min)) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$out_range$LBaseStemmer$II($this, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor >= $this.limit) {
return false;
}
ch = $this.current.charCodeAt($this.cursor);
if (! (ch > max || ch < min)) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.out_range$LBaseStemmer$II = BaseStemmer$out_range$LBaseStemmer$II;
BaseStemmer.prototype.out_range_b$II = function (min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor <= this.limit_backward) {
return false;
}
ch = this.current.charCodeAt(this.cursor - 1);
if (! (ch > max || ch < min)) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$out_range_b$LBaseStemmer$II($this, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor <= $this.limit_backward) {
return false;
}
ch = $this.current.charCodeAt($this.cursor - 1);
if (! (ch > max || ch < min)) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.out_range_b$LBaseStemmer$II = BaseStemmer$out_range_b$LBaseStemmer$II;
BaseStemmer.prototype.eq_s$IS = function (s_size, s) {
var cursor$0;
if (((this.limit - this.cursor) | 0) < s_size) {
return false;
}
if (this.current.slice(cursor$0 = this.cursor, ((cursor$0 + s_size) | 0)) !== s) {
return false;
}
this.cursor = (this.cursor + s_size) | 0;
return true;
};
function BaseStemmer$eq_s$LBaseStemmer$IS($this, s_size, s) {
var cursor$0;
if ((($this.limit - $this.cursor) | 0) < s_size) {
return false;
}
if ($this.current.slice(cursor$0 = $this.cursor, ((cursor$0 + s_size) | 0)) !== s) {
return false;
}
$this.cursor = ($this.cursor + s_size) | 0;
return true;
};
BaseStemmer.eq_s$LBaseStemmer$IS = BaseStemmer$eq_s$LBaseStemmer$IS;
BaseStemmer.prototype.eq_s_b$IS = function (s_size, s) {
var cursor$0;
if (((this.cursor - this.limit_backward) | 0) < s_size) {
return false;
}
if (this.current.slice((((cursor$0 = this.cursor) - s_size) | 0), cursor$0) !== s) {
return false;
}
this.cursor = (this.cursor - s_size) | 0;
return true;
};
function BaseStemmer$eq_s_b$LBaseStemmer$IS($this, s_size, s) {
var cursor$0;
if ((($this.cursor - $this.limit_backward) | 0) < s_size) {
return false;
}
if ($this.current.slice((((cursor$0 = $this.cursor) - s_size) | 0), cursor$0) !== s) {
return false;
}
$this.cursor = ($this.cursor - s_size) | 0;
return true;
};
BaseStemmer.eq_s_b$LBaseStemmer$IS = BaseStemmer$eq_s_b$LBaseStemmer$IS;
BaseStemmer.prototype.eq_v$S = function (s) {
return BaseStemmer$eq_s$LBaseStemmer$IS(this, s.length, s);
};
function BaseStemmer$eq_v$LBaseStemmer$S($this, s) {
return BaseStemmer$eq_s$LBaseStemmer$IS($this, s.length, s);
};
BaseStemmer.eq_v$LBaseStemmer$S = BaseStemmer$eq_v$LBaseStemmer$S;
BaseStemmer.prototype.eq_v_b$S = function (s) {
return BaseStemmer$eq_s_b$LBaseStemmer$IS(this, s.length, s);
};
function BaseStemmer$eq_v_b$LBaseStemmer$S($this, s) {
return BaseStemmer$eq_s_b$LBaseStemmer$IS($this, s.length, s);
};
BaseStemmer.eq_v_b$LBaseStemmer$S = BaseStemmer$eq_v_b$LBaseStemmer$S;
BaseStemmer.prototype.find_among$ALAmong$I = function (v, v_size) {
var i;
var j;
var c;
var l;
var common_i;
var common_j;
var first_key_inspected;
var k;
var diff;
var common;
var w;
var i2;
var res;
i = 0;
j = v_size;
c = this.cursor;
l = this.limit;
common_i = 0;
common_j = 0;
first_key_inspected = false;
while (true) {
k = i + (j - i >>> 1);
diff = 0;
common = (common_i < common_j ? common_i : common_j);
w = v[k];
for (i2 = common; i2 < w.s_size; i2++) {
if (c + common === l) {
diff = -1;
break;
}
diff = this.current.charCodeAt(c + common) - w.s.charCodeAt(i2);
if (diff !== 0) {
break;
}
common++;
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) {
break;
}
if (j === i) {
break;
}
if (first_key_inspected) {
break;
}
first_key_inspected = true;
}
}
while (true) {
w = v[i];
if (common_i >= w.s_size) {
this.cursor = (c + w.s_size | 0);
if (w.method == null) {
return w.result;
}
res = w.method(w.instance);
this.cursor = (c + w.s_size | 0);
if (res) {
return w.result;
}
}
i = w.substring_i;
if (i < 0) {
return 0;
}
}
return -1;
};
function BaseStemmer$find_among$LBaseStemmer$ALAmong$I($this, v, v_size) {
var i;
var j;
var c;
var l;
var common_i;
var common_j;
var first_key_inspected;
var k;
var diff;
var common;
var w;
var i2;
var res;
i = 0;
j = v_size;
c = $this.cursor;
l = $this.limit;
common_i = 0;
common_j = 0;
first_key_inspected = false;
while (true) {
k = i + (j - i >>> 1);
diff = 0;
common = (common_i < common_j ? common_i : common_j);
w = v[k];
for (i2 = common; i2 < w.s_size; i2++) {
if (c + common === l) {
diff = -1;
break;
}
diff = $this.current.charCodeAt(c + common) - w.s.charCodeAt(i2);
if (diff !== 0) {
break;
}
common++;
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) {
break;
}
if (j === i) {
break;
}
if (first_key_inspected) {
break;
}
first_key_inspected = true;
}
}
while (true) {
w = v[i];
if (common_i >= w.s_size) {
$this.cursor = (c + w.s_size | 0);
if (w.method == null) {
return w.result;
}
res = w.method(w.instance);
$this.cursor = (c + w.s_size | 0);
if (res) {
return w.result;
}
}
i = w.substring_i;
if (i < 0) {
return 0;
}
}
return -1;
};
BaseStemmer.find_among$LBaseStemmer$ALAmong$I = BaseStemmer$find_among$LBaseStemmer$ALAmong$I;
BaseStemmer.prototype.find_among_b$ALAmong$I = function (v, v_size) {
var i;
var j;
var c;
var lb;
var common_i;
var common_j;
var first_key_inspected;
var k;
var diff;
var common;
var w;
var i2;
var res;
i = 0;
j = v_size;
c = this.cursor;
lb = this.limit_backward;
common_i = 0;
common_j = 0;
first_key_inspected = false;
while (true) {
k = i + (j - i >> 1);
diff = 0;
common = (common_i < common_j ? common_i : common_j);
w = v[k];
for (i2 = w.s_size - 1 - common; i2 >= 0; i2--) {
if (c - common === lb) {
diff = -1;
break;
}
diff = this.current.charCodeAt(c - 1 - common) - w.s.charCodeAt(i2);
if (diff !== 0) {
break;
}
common++;
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) {
break;
}
if (j === i) {
break;
}
if (first_key_inspected) {
break;
}
first_key_inspected = true;
}
}
while (true) {
w = v[i];
if (common_i >= w.s_size) {
this.cursor = (c - w.s_size | 0);
if (w.method == null) {
return w.result;
}
res = w.method(this);
this.cursor = (c - w.s_size | 0);
if (res) {
return w.result;
}
}
i = w.substring_i;
if (i < 0) {
return 0;
}
}
return -1;
};
function BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, v, v_size) {
var i;
var j;
var c;
var lb;
var common_i;
var common_j;
var first_key_inspected;
var k;
var diff;
var common;
var w;
var i2;
var res;
i = 0;
j = v_size;
c = $this.cursor;
lb = $this.limit_backward;
common_i = 0;
common_j = 0;
first_key_inspected = false;
while (true) {
k = i + (j - i >> 1);
diff = 0;
common = (common_i < common_j ? common_i : common_j);
w = v[k];
for (i2 = w.s_size - 1 - common; i2 >= 0; i2--) {
if (c - common === lb) {
diff = -1;
break;
}
diff = $this.current.charCodeAt(c - 1 - common) - w.s.charCodeAt(i2);
if (diff !== 0) {
break;
}
common++;
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) {
break;
}
if (j === i) {
break;
}
if (first_key_inspected) {
break;
}
first_key_inspected = true;
}
}
while (true) {
w = v[i];
if (common_i >= w.s_size) {
$this.cursor = (c - w.s_size | 0);
if (w.method == null) {
return w.result;
}
res = w.method($this);
$this.cursor = (c - w.s_size | 0);
if (res) {
return w.result;
}
}
i = w.substring_i;
if (i < 0) {
return 0;
}
}
return -1;
};
BaseStemmer.find_among_b$LBaseStemmer$ALAmong$I = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I;
BaseStemmer.prototype.replace_s$IIS = function (c_bra, c_ket, s) {
var adjustment;
adjustment = ((s.length - (((c_ket - c_bra) | 0))) | 0);
this.current = this.current.slice(0, c_bra) + s + this.current.slice(c_ket);
this.limit = (this.limit + adjustment) | 0;
if (this.cursor >= c_ket) {
this.cursor = (this.cursor + adjustment) | 0;
} else if (this.cursor > c_bra) {
this.cursor = c_bra;
}
return (adjustment | 0);
};
function BaseStemmer$replace_s$LBaseStemmer$IIS($this, c_bra, c_ket, s) {
var adjustment;
adjustment = ((s.length - (((c_ket - c_bra) | 0))) | 0);
$this.current = $this.current.slice(0, c_bra) + s + $this.current.slice(c_ket);
$this.limit = ($this.limit + adjustment) | 0;
if ($this.cursor >= c_ket) {
$this.cursor = ($this.cursor + adjustment) | 0;
} else if ($this.cursor > c_bra) {
$this.cursor = c_bra;
}
return (adjustment | 0);
};
BaseStemmer.replace_s$LBaseStemmer$IIS = BaseStemmer$replace_s$LBaseStemmer$IIS;
BaseStemmer.prototype.slice_check$ = function () {
var bra$0;
var ket$0;
var limit$0;
return ((bra$0 = this.bra) < 0 || bra$0 > (ket$0 = this.ket) || ket$0 > (limit$0 = this.limit) || limit$0 > this.current.length ? false : true);
};
function BaseStemmer$slice_check$LBaseStemmer$($this) {
var bra$0;
var ket$0;
var limit$0;
return ((bra$0 = $this.bra) < 0 || bra$0 > (ket$0 = $this.ket) || ket$0 > (limit$0 = $this.limit) || limit$0 > $this.current.length ? false : true);
};
BaseStemmer.slice_check$LBaseStemmer$ = BaseStemmer$slice_check$LBaseStemmer$;
BaseStemmer.prototype.slice_from$S = function (s) {
var result;
var bra$0;
var ket$0;
var limit$0;
result = false;
if ((bra$0 = this.bra) < 0 || bra$0 > (ket$0 = this.ket) || ket$0 > (limit$0 = this.limit) || limit$0 > this.current.length ? false : true) {
BaseStemmer$replace_s$LBaseStemmer$IIS(this, this.bra, this.ket, s);
result = true;
}
return result;
};
function BaseStemmer$slice_from$LBaseStemmer$S($this, s) {
var result;
var bra$0;
var ket$0;
var limit$0;
result = false;
if ((bra$0 = $this.bra) < 0 || bra$0 > (ket$0 = $this.ket) || ket$0 > (limit$0 = $this.limit) || limit$0 > $this.current.length ? false : true) {
BaseStemmer$replace_s$LBaseStemmer$IIS($this, $this.bra, $this.ket, s);
result = true;
}
return result;
};
BaseStemmer.slice_from$LBaseStemmer$S = BaseStemmer$slice_from$LBaseStemmer$S;
BaseStemmer.prototype.slice_del$ = function () {
return BaseStemmer$slice_from$LBaseStemmer$S(this, "");
};
function BaseStemmer$slice_del$LBaseStemmer$($this) {
return BaseStemmer$slice_from$LBaseStemmer$S($this, "");
};
BaseStemmer.slice_del$LBaseStemmer$ = BaseStemmer$slice_del$LBaseStemmer$;
BaseStemmer.prototype.insert$IIS = function (c_bra, c_ket, s) {
var adjustment;
adjustment = BaseStemmer$replace_s$LBaseStemmer$IIS(this, c_bra, c_ket, s);
if (c_bra <= this.bra) {
this.bra = (this.bra + adjustment) | 0;
}
if (c_bra <= this.ket) {
this.ket = (this.ket + adjustment) | 0;
}
};
function BaseStemmer$insert$LBaseStemmer$IIS($this, c_bra, c_ket, s) {
var adjustment;
adjustment = BaseStemmer$replace_s$LBaseStemmer$IIS($this, c_bra, c_ket, s);
if (c_bra <= $this.bra) {
$this.bra = ($this.bra + adjustment) | 0;
}
if (c_bra <= $this.ket) {
$this.ket = ($this.ket + adjustment) | 0;
}
};
BaseStemmer.insert$LBaseStemmer$IIS = BaseStemmer$insert$LBaseStemmer$IIS;
BaseStemmer.prototype.slice_to$S = function (s) {
var result;
var bra$0;
var ket$0;
var limit$0;
result = '';
if ((bra$0 = this.bra) < 0 || bra$0 > (ket$0 = this.ket) || ket$0 > (limit$0 = this.limit) || limit$0 > this.current.length ? false : true) {
result = this.current.slice(this.bra, this.ket);
}
return result;
};
function BaseStemmer$slice_to$LBaseStemmer$S($this, s) {
var result;
var bra$0;
var ket$0;
var limit$0;
result = '';
if ((bra$0 = $this.bra) < 0 || bra$0 > (ket$0 = $this.ket) || ket$0 > (limit$0 = $this.limit) || limit$0 > $this.current.length ? false : true) {
result = $this.current.slice($this.bra, $this.ket);
}
return result;
};
BaseStemmer.slice_to$LBaseStemmer$S = BaseStemmer$slice_to$LBaseStemmer$S;
BaseStemmer.prototype.assign_to$S = function (s) {
return this.current.slice(0, this.limit);
};
function BaseStemmer$assign_to$LBaseStemmer$S($this, s) {
return $this.current.slice(0, $this.limit);
};
BaseStemmer.assign_to$LBaseStemmer$S = BaseStemmer$assign_to$LBaseStemmer$S;
BaseStemmer.prototype.stem$ = function () {
return false;
};
BaseStemmer.prototype.stemWord$S = function (word) {
var result;
var current$0;
var cursor$0;
var limit$0;
result = this.cache['.' + word];
if (result == null) {
current$0 = this.current = word;
cursor$0 = this.cursor = 0;
limit$0 = this.limit = current$0.length;
this.limit_backward = 0;
this.bra = cursor$0;
this.ket = limit$0;
this.stem$();
result = this.current;
this.cache['.' + word] = result;
}
return result;
};
BaseStemmer.prototype.stemWord = BaseStemmer.prototype.stemWord$S;
BaseStemmer.prototype.stemWords$AS = function (words) {
var results;
var i;
var word;
var result;
var current$0;
var cursor$0;
var limit$0;
results = [ ];
for (i = 0; i < words.length; i++) {
word = words[i];
result = this.cache['.' + word];
if (result == null) {
current$0 = this.current = word;
cursor$0 = this.cursor = 0;
limit$0 = this.limit = current$0.length;
this.limit_backward = 0;
this.bra = cursor$0;
this.ket = limit$0;
this.stem$();
result = this.current;
this.cache['.' + word] = result;
}
results.push(result);
}
return results;
};
BaseStemmer.prototype.stemWords = BaseStemmer.prototype.stemWords$AS;
function PorterStemmer() {
BaseStemmer.call(this);
this.B_Y_found = false;
this.I_p2 = 0;
this.I_p1 = 0;
};
$__jsx_extend([PorterStemmer], BaseStemmer);
PorterStemmer.prototype.copy_from$LPorterStemmer$ = function (other) {
this.B_Y_found = other.B_Y_found;
this.I_p2 = other.I_p2;
this.I_p1 = other.I_p1;
BaseStemmer$copy_from$LBaseStemmer$LBaseStemmer$(this, other);
};
PorterStemmer.prototype.copy_from = PorterStemmer.prototype.copy_from$LPorterStemmer$;
PorterStemmer.prototype.r_shortv$ = function () {
return (! BaseStemmer$out_grouping_b$LBaseStemmer$AIII(this, PorterStemmer.g_v_WXY, 89, 121) ? false : ! BaseStemmer$in_grouping_b$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121) ? false : ! BaseStemmer$out_grouping_b$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121) ? false : true);
};
PorterStemmer.prototype.r_shortv = PorterStemmer.prototype.r_shortv$;
function PorterStemmer$r_shortv$LPorterStemmer$($this) {
return (! BaseStemmer$out_grouping_b$LBaseStemmer$AIII($this, PorterStemmer.g_v_WXY, 89, 121) ? false : ! BaseStemmer$in_grouping_b$LBaseStemmer$AIII($this, PorterStemmer.g_v, 97, 121) ? false : ! BaseStemmer$out_grouping_b$LBaseStemmer$AIII($this, PorterStemmer.g_v, 97, 121) ? false : true);
};
PorterStemmer.r_shortv$LPorterStemmer$ = PorterStemmer$r_shortv$LPorterStemmer$;
PorterStemmer.prototype.r_R1$ = function () {
return (! (this.I_p1 <= this.cursor) ? false : true);
};
PorterStemmer.prototype.r_R1 = PorterStemmer.prototype.r_R1$;
function PorterStemmer$r_R1$LPorterStemmer$($this) {
return (! ($this.I_p1 <= $this.cursor) ? false : true);
};
PorterStemmer.r_R1$LPorterStemmer$ = PorterStemmer$r_R1$LPorterStemmer$;
PorterStemmer.prototype.r_R2$ = function () {
return (! (this.I_p2 <= this.cursor) ? false : true);
};
PorterStemmer.prototype.r_R2 = PorterStemmer.prototype.r_R2$;
function PorterStemmer$r_R2$LPorterStemmer$($this) {
return (! ($this.I_p2 <= $this.cursor) ? false : true);
};
PorterStemmer.r_R2$LPorterStemmer$ = PorterStemmer$r_R2$LPorterStemmer$;
PorterStemmer.prototype.r_Step_1a$ = function () {
var among_var;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, PorterStemmer.a_0, 4);
if (among_var === 0) {
return false;
}
this.bra = this.cursor;
switch (among_var) {
case 0:
return false;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ss")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "i")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
}
return true;
};
PorterStemmer.prototype.r_Step_1a = PorterStemmer.prototype.r_Step_1a$;
function PorterStemmer$r_Step_1a$LPorterStemmer$($this) {
var among_var;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, PorterStemmer.a_0, 4);
if (among_var === 0) {
return false;
}
$this.bra = $this.cursor;
switch (among_var) {
case 0:
return false;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ss")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "i")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
}
return true;
};
PorterStemmer.r_Step_1a$LPorterStemmer$ = PorterStemmer$r_Step_1a$LPorterStemmer$;
PorterStemmer.prototype.r_Step_1b$ = function () {
var among_var;
var v_1;
var v_3;
var v_4;
var lab1;
var c;
var c_bra$0;
var adjustment$0;
var c_bra$1;
var adjustment$1;
var cursor$0;
var cursor$1;
var cursor$2;
var $__jsx_postinc_t;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, PorterStemmer.a_2, 3);
if (among_var === 0) {
return false;
}
this.bra = this.cursor;
switch (among_var) {
case 0:
return false;
case 1:
if (! (! (this.I_p1 <= this.cursor) ? false : true)) {
return false;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ee")) {
return false;
}
break;
case 2:
v_1 = ((this.limit - this.cursor) | 0);
golab0:
while (true) {
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! BaseStemmer$in_grouping_b$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121)) {
break lab1;
}
break golab0;
}
if (this.cursor <= this.limit_backward) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
}
this.cursor = ((this.limit - v_1) | 0);
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
v_3 = ((this.limit - this.cursor) | 0);
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, PorterStemmer.a_1, 13);
if (among_var === 0) {
return false;
}
this.cursor = ((this.limit - v_3) | 0);
switch (among_var) {
case 0:
return false;
case 1:
c = cursor$0 = this.cursor;
c_bra$0 = cursor$0;
adjustment$0 = BaseStemmer$replace_s$LBaseStemmer$IIS(this, cursor$0, cursor$0, "e");
if (cursor$0 <= this.bra) {
this.bra = (this.bra + adjustment$0) | 0;
}
if (c_bra$0 <= this.ket) {
this.ket = (this.ket + adjustment$0) | 0;
}
this.cursor = c;
break;
case 2:
this.ket = cursor$1 = this.cursor;
if (cursor$1 <= this.limit_backward) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
this.bra = this.cursor;
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
case 3:
if (this.cursor !== this.I_p1) {
return false;
}
v_4 = ((this.limit - this.cursor) | 0);
if (! PorterStemmer$r_shortv$LPorterStemmer$(this)) {
return false;
}
cursor$2 = this.cursor = ((this.limit - v_4) | 0);
c = cursor$2;
c_bra$1 = cursor$2;
adjustment$1 = BaseStemmer$replace_s$LBaseStemmer$IIS(this, cursor$2, cursor$2, "e");
if (cursor$2 <= this.bra) {
this.bra = (this.bra + adjustment$1) | 0;
}
if (c_bra$1 <= this.ket) {
this.ket = (this.ket + adjustment$1) | 0;
}
this.cursor = c;
break;
}
break;
}
return true;
};
PorterStemmer.prototype.r_Step_1b = PorterStemmer.prototype.r_Step_1b$;
function PorterStemmer$r_Step_1b$LPorterStemmer$($this) {
var among_var;
var v_1;
var v_3;
var v_4;
var lab1;
var c;
var c_bra$0;
var adjustment$0;
var c_bra$1;
var adjustment$1;
var cursor$0;
var cursor$1;
var cursor$2;
var $__jsx_postinc_t;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, PorterStemmer.a_2, 3);
if (among_var === 0) {
return false;
}
$this.bra = $this.cursor;
switch (among_var) {
case 0:
return false;
case 1:
if (! (! ($this.I_p1 <= $this.cursor) ? false : true)) {
return false;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ee")) {
return false;
}
break;
case 2:
v_1 = (($this.limit - $this.cursor) | 0);
golab0:
while (true) {
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! BaseStemmer$in_grouping_b$LBaseStemmer$AIII($this, PorterStemmer.g_v, 97, 121)) {
break lab1;
}
break golab0;
}
if ($this.cursor <= $this.limit_backward) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
}
$this.cursor = (($this.limit - v_1) | 0);
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
v_3 = (($this.limit - $this.cursor) | 0);
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, PorterStemmer.a_1, 13);
if (among_var === 0) {
return false;
}
$this.cursor = (($this.limit - v_3) | 0);
switch (among_var) {
case 0:
return false;
case 1:
c = cursor$0 = $this.cursor;
c_bra$0 = cursor$0;
adjustment$0 = BaseStemmer$replace_s$LBaseStemmer$IIS($this, cursor$0, cursor$0, "e");
if (cursor$0 <= $this.bra) {
$this.bra = ($this.bra + adjustment$0) | 0;
}
if (c_bra$0 <= $this.ket) {
$this.ket = ($this.ket + adjustment$0) | 0;
}
$this.cursor = c;
break;
case 2:
$this.ket = cursor$1 = $this.cursor;
if (cursor$1 <= $this.limit_backward) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
$this.bra = $this.cursor;
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
case 3:
if ($this.cursor !== $this.I_p1) {
return false;
}
v_4 = (($this.limit - $this.cursor) | 0);
if (! PorterStemmer$r_shortv$LPorterStemmer$($this)) {
return false;
}
cursor$2 = $this.cursor = (($this.limit - v_4) | 0);
c = cursor$2;
c_bra$1 = cursor$2;
adjustment$1 = BaseStemmer$replace_s$LBaseStemmer$IIS($this, cursor$2, cursor$2, "e");
if (cursor$2 <= $this.bra) {
$this.bra = ($this.bra + adjustment$1) | 0;
}
if (c_bra$1 <= $this.ket) {
$this.ket = ($this.ket + adjustment$1) | 0;
}
$this.cursor = c;
break;
}
break;
}
return true;
};
PorterStemmer.r_Step_1b$LPorterStemmer$ = PorterStemmer$r_Step_1b$LPorterStemmer$;
PorterStemmer.prototype.r_Step_1c$ = function () {
var v_1;
var lab0;
var lab1;
var lab3;
var $__jsx_postinc_t;
this.ket = this.cursor;
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
v_1 = ((this.limit - this.cursor) | 0);
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "y")) {
break lab1;
}
break lab0;
}
this.cursor = ((this.limit - v_1) | 0);
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "Y")) {
return false;
}
}
this.bra = this.cursor;
golab2:
while (true) {
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
if (! BaseStemmer$in_grouping_b$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121)) {
break lab3;
}
break golab2;
}
if (this.cursor <= this.limit_backward) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
}
return (! BaseStemmer$slice_from$LBaseStemmer$S(this, "i") ? false : true);
};
PorterStemmer.prototype.r_Step_1c = PorterStemmer.prototype.r_Step_1c$;
function PorterStemmer$r_Step_1c$LPorterStemmer$($this) {
var v_1;
var lab0;
var lab1;
var lab3;
var $__jsx_postinc_t;
$this.ket = $this.cursor;
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
v_1 = (($this.limit - $this.cursor) | 0);
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "y")) {
break lab1;
}
break lab0;
}
$this.cursor = (($this.limit - v_1) | 0);
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "Y")) {
return false;
}
}
$this.bra = $this.cursor;
golab2:
while (true) {
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
if (! BaseStemmer$in_grouping_b$LBaseStemmer$AIII($this, PorterStemmer.g_v, 97, 121)) {
break lab3;
}
break golab2;
}
if ($this.cursor <= $this.limit_backward) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
}
return (! BaseStemmer$slice_from$LBaseStemmer$S($this, "i") ? false : true);
};
PorterStemmer.r_Step_1c$LPorterStemmer$ = PorterStemmer$r_Step_1c$LPorterStemmer$;
PorterStemmer.prototype.r_Step_2$ = function () {
var among_var;
var cursor$0;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, PorterStemmer.a_3, 20);
if (among_var === 0) {
return false;
}
this.bra = cursor$0 = this.cursor;
if (! (! (this.I_p1 <= cursor$0) ? false : true)) {
return false;
}
switch (among_var) {
case 0:
return false;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "tion")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ence")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ance")) {
return false;
}
break;
case 4:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "able")) {
return false;
}
break;
case 5:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ent")) {
return false;
}
break;
case 6:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "e")) {
return false;
}
break;
case 7:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ize")) {
return false;
}
break;
case 8:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ate")) {
return false;
}
break;
case 9:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "al")) {
return false;
}
break;
case 10:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "al")) {
return false;
}
break;
case 11:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ful")) {
return false;
}
break;
case 12:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ous")) {
return false;
}
break;
case 13:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ive")) {
return false;
}
break;
case 14:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ble")) {
return false;
}
break;
}
return true;
};
PorterStemmer.prototype.r_Step_2 = PorterStemmer.prototype.r_Step_2$;
function PorterStemmer$r_Step_2$LPorterStemmer$($this) {
var among_var;
var cursor$0;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, PorterStemmer.a_3, 20);
if (among_var === 0) {
return false;
}
$this.bra = cursor$0 = $this.cursor;
if (! (! ($this.I_p1 <= cursor$0) ? false : true)) {
return false;
}
switch (among_var) {
case 0:
return false;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "tion")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ence")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ance")) {
return false;
}
break;
case 4:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "able")) {
return false;
}
break;
case 5:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ent")) {
return false;
}
break;
case 6:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "e")) {
return false;
}
break;
case 7:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ize")) {
return false;
}
break;
case 8:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ate")) {
return false;
}
break;
case 9:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "al")) {
return false;
}
break;
case 10:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "al")) {
return false;
}
break;
case 11:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ful")) {
return false;
}
break;
case 12:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ous")) {
return false;
}
break;
case 13:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ive")) {
return false;
}
break;
case 14:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ble")) {
return false;
}
break;
}
return true;
};
PorterStemmer.r_Step_2$LPorterStemmer$ = PorterStemmer$r_Step_2$LPorterStemmer$;
PorterStemmer.prototype.r_Step_3$ = function () {
var among_var;
var cursor$0;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, PorterStemmer.a_4, 7);
if (among_var === 0) {
return false;
}
this.bra = cursor$0 = this.cursor;
if (! (! (this.I_p1 <= cursor$0) ? false : true)) {
return false;
}
switch (among_var) {
case 0:
return false;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "al")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ic")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
}
return true;
};
PorterStemmer.prototype.r_Step_3 = PorterStemmer.prototype.r_Step_3$;
function PorterStemmer$r_Step_3$LPorterStemmer$($this) {
var among_var;
var cursor$0;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, PorterStemmer.a_4, 7);
if (among_var === 0) {
return false;
}
$this.bra = cursor$0 = $this.cursor;
if (! (! ($this.I_p1 <= cursor$0) ? false : true)) {
return false;
}
switch (among_var) {
case 0:
return false;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "al")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ic")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
}
return true;
};
PorterStemmer.r_Step_3$LPorterStemmer$ = PorterStemmer$r_Step_3$LPorterStemmer$;
PorterStemmer.prototype.r_Step_4$ = function () {
var among_var;
var v_1;
var lab0;
var lab1;
var cursor$0;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, PorterStemmer.a_5, 19);
if (among_var === 0) {
return false;
}
this.bra = cursor$0 = this.cursor;
if (! (! (this.I_p2 <= cursor$0) ? false : true)) {
return false;
}
switch (among_var) {
case 0:
return false;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
case 2:
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
v_1 = ((this.limit - this.cursor) | 0);
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "s")) {
break lab1;
}
break lab0;
}
this.cursor = ((this.limit - v_1) | 0);
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "t")) {
return false;
}
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
}
return true;
};
PorterStemmer.prototype.r_Step_4 = PorterStemmer.prototype.r_Step_4$;
function PorterStemmer$r_Step_4$LPorterStemmer$($this) {
var among_var;
var v_1;
var lab0;
var lab1;
var cursor$0;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, PorterStemmer.a_5, 19);
if (among_var === 0) {
return false;
}
$this.bra = cursor$0 = $this.cursor;
if (! (! ($this.I_p2 <= cursor$0) ? false : true)) {
return false;
}
switch (among_var) {
case 0:
return false;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
case 2:
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
v_1 = (($this.limit - $this.cursor) | 0);
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "s")) {
break lab1;
}
break lab0;
}
$this.cursor = (($this.limit - v_1) | 0);
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "t")) {
return false;
}
}
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
}
return true;
};
PorterStemmer.r_Step_4$LPorterStemmer$ = PorterStemmer$r_Step_4$LPorterStemmer$;
PorterStemmer.prototype.r_Step_5a$ = function () {
var v_1;
var v_2;
var lab0;
var lab1;
var lab2;
var cursor$0;
this.ket = this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "e")) {
return false;
}
this.bra = this.cursor;
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
v_1 = ((this.limit - this.cursor) | 0);
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! (! (this.I_p2 <= this.cursor) ? false : true)) {
break lab1;
}
break lab0;
}
cursor$0 = this.cursor = ((this.limit - v_1) | 0);
if (! (! (this.I_p1 <= cursor$0) ? false : true)) {
return false;
}
v_2 = ((this.limit - this.cursor) | 0);
lab2 = true;
lab2:
while (lab2 === true) {
lab2 = false;
if (! PorterStemmer$r_shortv$LPorterStemmer$(this)) {
break lab2;
}
return false;
}
this.cursor = ((this.limit - v_2) | 0);
}
return (! BaseStemmer$slice_from$LBaseStemmer$S(this, "") ? false : true);
};
PorterStemmer.prototype.r_Step_5a = PorterStemmer.prototype.r_Step_5a$;
function PorterStemmer$r_Step_5a$LPorterStemmer$($this) {
var v_1;
var v_2;
var lab0;
var lab1;
var lab2;
var cursor$0;
$this.ket = $this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "e")) {
return false;
}
$this.bra = $this.cursor;
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
v_1 = (($this.limit - $this.cursor) | 0);
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! (! ($this.I_p2 <= $this.cursor) ? false : true)) {
break lab1;
}
break lab0;
}
cursor$0 = $this.cursor = (($this.limit - v_1) | 0);
if (! (! ($this.I_p1 <= cursor$0) ? false : true)) {
return false;
}
v_2 = (($this.limit - $this.cursor) | 0);
lab2 = true;
lab2:
while (lab2 === true) {
lab2 = false;
if (! PorterStemmer$r_shortv$LPorterStemmer$($this)) {
break lab2;
}
return false;
}
$this.cursor = (($this.limit - v_2) | 0);
}
return (! BaseStemmer$slice_from$LBaseStemmer$S($this, "") ? false : true);
};
PorterStemmer.r_Step_5a$LPorterStemmer$ = PorterStemmer$r_Step_5a$LPorterStemmer$;
PorterStemmer.prototype.r_Step_5b$ = function () {
var cursor$0;
this.ket = this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "l")) {
return false;
}
this.bra = cursor$0 = this.cursor;
return (! (! (this.I_p2 <= cursor$0) ? false : true) ? false : ! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "l") ? false : ! BaseStemmer$slice_from$LBaseStemmer$S(this, "") ? false : true);
};
PorterStemmer.prototype.r_Step_5b = PorterStemmer.prototype.r_Step_5b$;
function PorterStemmer$r_Step_5b$LPorterStemmer$($this) {
var cursor$0;
$this.ket = $this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "l")) {
return false;
}
$this.bra = cursor$0 = $this.cursor;
return (! (! ($this.I_p2 <= cursor$0) ? false : true) ? false : ! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "l") ? false : ! BaseStemmer$slice_from$LBaseStemmer$S($this, "") ? false : true);
};
PorterStemmer.r_Step_5b$LPorterStemmer$ = PorterStemmer$r_Step_5b$LPorterStemmer$;
PorterStemmer.prototype.stem$ = function () {
var v_1;
var v_2;
var v_3;
var v_4;
var v_5;
var v_10;
var v_11;
var v_12;
var v_13;
var v_14;
var v_15;
var v_16;
var v_18;
var v_19;
var v_20;
var lab0;
var lab1;
var lab3;
var lab5;
var lab6;
var lab8;
var lab10;
var lab12;
var lab14;
var lab15;
var lab16;
var lab17;
var lab18;
var lab19;
var lab20;
var lab21;
var lab22;
var lab23;
var lab25;
var lab27;
var cursor$0;
var cursor$1;
var limit$0;
var cursor$2;
var cursor$3;
var limit$1;
var cursor$4;
var limit$2;
var cursor$5;
var limit$3;
var cursor$6;
var limit$4;
var cursor$7;
var limit$5;
var cursor$8;
var limit$6;
var cursor$9;
var limit$7;
var cursor$10;
var cursor$11;
var cursor$12;
var $__jsx_postinc_t;
this.B_Y_found = false;
v_1 = this.cursor;
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
this.bra = this.cursor;
if (! BaseStemmer$eq_s$LBaseStemmer$IS(this, 1, "y")) {
break lab0;
}
this.ket = this.cursor;
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "Y")) {
return false;
}
this.B_Y_found = true;
}
cursor$1 = this.cursor = v_1;
v_2 = cursor$1;
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
replab2:
while (true) {
v_3 = this.cursor;
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
golab4:
while (true) {
v_4 = this.cursor;
lab5 = true;
lab5:
while (lab5 === true) {
lab5 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121)) {
break lab5;
}
this.bra = this.cursor;
if (! BaseStemmer$eq_s$LBaseStemmer$IS(this, 1, "y")) {
break lab5;
}
this.ket = this.cursor;
this.cursor = v_4;
break golab4;
}
cursor$0 = this.cursor = v_4;
if (cursor$0 >= this.limit) {
break lab3;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "Y")) {
return false;
}
this.B_Y_found = true;
continue replab2;
}
this.cursor = v_3;
break replab2;
}
}
cursor$2 = this.cursor = v_2;
this.I_p1 = limit$0 = this.limit;
this.I_p2 = limit$0;
v_5 = cursor$2;
lab6 = true;
lab6:
while (lab6 === true) {
lab6 = false;
golab7:
while (true) {
lab8 = true;
lab8:
while (lab8 === true) {
lab8 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121)) {
break lab8;
}
break golab7;
}
if (this.cursor >= this.limit) {
break lab6;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
golab9:
while (true) {
lab10 = true;
lab10:
while (lab10 === true) {
lab10 = false;
if (! BaseStemmer$out_grouping$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121)) {
break lab10;
}
break golab9;
}
if (this.cursor >= this.limit) {
break lab6;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
this.I_p1 = this.cursor;
golab11:
while (true) {
lab12 = true;
lab12:
while (lab12 === true) {
lab12 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121)) {
break lab12;
}
break golab11;
}
if (this.cursor >= this.limit) {
break lab6;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
golab13:
while (true) {
lab14 = true;
lab14:
while (lab14 === true) {
lab14 = false;
if (! BaseStemmer$out_grouping$LBaseStemmer$AIII(this, PorterStemmer.g_v, 97, 121)) {
break lab14;
}
break golab13;
}
if (this.cursor >= this.limit) {
break lab6;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
this.I_p2 = this.cursor;
}
cursor$3 = this.cursor = v_5;
this.limit_backward = cursor$3;
cursor$4 = this.cursor = limit$1 = this.limit;
v_10 = ((limit$1 - cursor$4) | 0);
lab15 = true;
lab15:
while (lab15 === true) {
lab15 = false;
if (! PorterStemmer$r_Step_1a$LPorterStemmer$(this)) {
break lab15;
}
}
cursor$5 = this.cursor = (((limit$2 = this.limit) - v_10) | 0);
v_11 = ((limit$2 - cursor$5) | 0);
lab16 = true;
lab16:
while (lab16 === true) {
lab16 = false;
if (! PorterStemmer$r_Step_1b$LPorterStemmer$(this)) {
break lab16;
}
}
cursor$6 = this.cursor = (((limit$3 = this.limit) - v_11) | 0);
v_12 = ((limit$3 - cursor$6) | 0);
lab17 = true;
lab17:
while (lab17 === true) {
lab17 = false;
if (! PorterStemmer$r_Step_1c$LPorterStemmer$(this)) {
break lab17;
}
}
cursor$7 = this.cursor = (((limit$4 = this.limit) - v_12) | 0);
v_13 = ((limit$4 - cursor$7) | 0);
lab18 = true;
lab18:
while (lab18 === true) {
lab18 = false;
if (! PorterStemmer$r_Step_2$LPorterStemmer$(this)) {
break lab18;
}
}
cursor$8 = this.cursor = (((limit$5 = this.limit) - v_13) | 0);
v_14 = ((limit$5 - cursor$8) | 0);
lab19 = true;
lab19:
while (lab19 === true) {
lab19 = false;
if (! PorterStemmer$r_Step_3$LPorterStemmer$(this)) {
break lab19;
}
}
cursor$9 = this.cursor = (((limit$6 = this.limit) - v_14) | 0);
v_15 = ((limit$6 - cursor$9) | 0);
lab20 = true;
lab20:
while (lab20 === true) {
lab20 = false;
if (! PorterStemmer$r_Step_4$LPorterStemmer$(this)) {
break lab20;
}
}
cursor$10 = this.cursor = (((limit$7 = this.limit) - v_15) | 0);
v_16 = ((limit$7 - cursor$10) | 0);
lab21 = true;
lab21:
while (lab21 === true) {
lab21 = false;
if (! PorterStemmer$r_Step_5a$LPorterStemmer$(this)) {
break lab21;
}
}
this.cursor = ((this.limit - v_16) | 0);
lab22 = true;
lab22:
while (lab22 === true) {
lab22 = false;
if (! PorterStemmer$r_Step_5b$LPorterStemmer$(this)) {
break lab22;
}
}
cursor$12 = this.cursor = this.limit_backward;
v_18 = cursor$12;
lab23 = true;
lab23:
while (lab23 === true) {
lab23 = false;
if (! this.B_Y_found) {
break lab23;
}
replab24:
while (true) {
v_19 = this.cursor;
lab25 = true;
lab25:
while (lab25 === true) {
lab25 = false;
golab26:
while (true) {
v_20 = this.cursor;
lab27 = true;
lab27:
while (lab27 === true) {
lab27 = false;
this.bra = this.cursor;
if (! BaseStemmer$eq_s$LBaseStemmer$IS(this, 1, "Y")) {
break lab27;
}
this.ket = this.cursor;
this.cursor = v_20;
break golab26;
}
cursor$11 = this.cursor = v_20;
if (cursor$11 >= this.limit) {
break lab25;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "y")) {
return false;
}
continue replab24;
}
this.cursor = v_19;
break replab24;
}
}
this.cursor = v_18;
return true;
};
PorterStemmer.prototype.stem = PorterStemmer.prototype.stem$;
PorterStemmer.prototype.equals$X = function (o) {
return o instanceof PorterStemmer;
};
PorterStemmer.prototype.equals = PorterStemmer.prototype.equals$X;
function PorterStemmer$equals$LPorterStemmer$X($this, o) {
return o instanceof PorterStemmer;
};
PorterStemmer.equals$LPorterStemmer$X = PorterStemmer$equals$LPorterStemmer$X;
PorterStemmer.prototype.hashCode$ = function () {
var classname;
var hash;
var i;
var char;
classname = "PorterStemmer";
hash = 0;
for (i = 0; i < classname.length; i++) {
char = classname.charCodeAt(i);
hash = (hash << 5) - hash + char;
hash = hash & hash;
}
return (hash | 0);
};
PorterStemmer.prototype.hashCode = PorterStemmer.prototype.hashCode$;
function PorterStemmer$hashCode$LPorterStemmer$($this) {
var classname;
var hash;
var i;
var char;
classname = "PorterStemmer";
hash = 0;
for (i = 0; i < classname.length; i++) {
char = classname.charCodeAt(i);
hash = (hash << 5) - hash + char;
hash = hash & hash;
}
return (hash | 0);
};
PorterStemmer.hashCode$LPorterStemmer$ = PorterStemmer$hashCode$LPorterStemmer$;
PorterStemmer.serialVersionUID = 1;
$__jsx_lazy_init(PorterStemmer, "methodObject", function () {
return new PorterStemmer();
});
$__jsx_lazy_init(PorterStemmer, "a_0", function () {
return [ new Among("s", -1, 3), new Among("ies", 0, 2), new Among("sses", 0, 1), new Among("ss", 0, -1) ];
});
$__jsx_lazy_init(PorterStemmer, "a_1", function () {
return [ new Among("", -1, 3), new Among("bb", 0, 2), new Among("dd", 0, 2), new Among("ff", 0, 2), new Among("gg", 0, 2), new Among("bl", 0, 1), new Among("mm", 0, 2), new Among("nn", 0, 2), new Among("pp", 0, 2), new Among("rr", 0, 2), new Among("at", 0, 1), new Among("tt", 0, 2), new Among("iz", 0, 1) ];
});
$__jsx_lazy_init(PorterStemmer, "a_2", function () {
return [ new Among("ed", -1, 2), new Among("eed", 0, 1), new Among("ing", -1, 2) ];
});
$__jsx_lazy_init(PorterStemmer, "a_3", function () {
return [ new Among("anci", -1, 3), new Among("enci", -1, 2), new Among("abli", -1, 4), new Among("eli", -1, 6), new Among("alli", -1, 9), new Among("ousli", -1, 12), new Among("entli", -1, 5), new Among("aliti", -1, 10), new Among("biliti", -1, 14), new Among("iviti", -1, 13), new Among("tional", -1, 1), new Among("ational", 10, 8), new Among("alism", -1, 10), new Among("ation", -1, 8), new Among("ization", 13, 7), new Among("izer", -1, 7), new Among("ator", -1, 8), new Among("iveness", -1, 13), new Among("fulness", -1, 11), new Among("ousness", -1, 12) ];
});
$__jsx_lazy_init(PorterStemmer, "a_4", function () {
return [ new Among("icate", -1, 2), new Among("ative", -1, 3), new Among("alize", -1, 1), new Among("iciti", -1, 2), new Among("ical", -1, 2), new Among("ful", -1, 3), new Among("ness", -1, 3) ];
});
$__jsx_lazy_init(PorterStemmer, "a_5", function () {
return [ new Among("ic", -1, 1), new Among("ance", -1, 1), new Among("ence", -1, 1), new Among("able", -1, 1), new Among("ible", -1, 1), new Among("ate", -1, 1), new Among("ive", -1, 1), new Among("ize", -1, 1), new Among("iti", -1, 1), new Among("al", -1, 1), new Among("ism", -1, 1), new Among("ion", -1, 2), new Among("er", -1, 1), new Among("ous", -1, 1), new Among("ant", -1, 1), new Among("ent", -1, 1), new Among("ment", 15, 1), new Among("ement", 16, 1), new Among("ou", -1, 1) ];
});
PorterStemmer.g_v = [ 17, 65, 16, 1 ];
PorterStemmer.g_v_WXY = [ 1, 17, 65, 208, 1 ];
var $__jsx_classMap = {
"src/among.jsx": {
Among: Among,
Among$SII: Among,
Among$SIIF$LBaseStemmer$B$LBaseStemmer$: Among$0
},
"src/stemmer.jsx": {
Stemmer: Stemmer,
Stemmer$: Stemmer
},
"src/base-stemmer.jsx": {
BaseStemmer: BaseStemmer,
BaseStemmer$: BaseStemmer
},
"src/porter-stemmer.jsx": {
PorterStemmer: PorterStemmer,
PorterStemmer$: PorterStemmer
}
};
})(JSX);
var Among = JSX.require("src/among.jsx").Among;
var Among$SII = JSX.require("src/among.jsx").Among$SII;
var Stemmer = JSX.require("src/stemmer.jsx").Stemmer;
var BaseStemmer = JSX.require("src/base-stemmer.jsx").BaseStemmer;
var PorterStemmer = JSX.require("src/porter-stemmer.jsx").PorterStemmer; | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/static/loader-bootstrap.js | var loading = [];
// Remove some weird things from location hash
if (
window.location.hash &&
(window.location.hash.indexOf('"') > -1 ||
window.location.hash.indexOf("=") > -1)
) {
window.location.hash = "";
}
// Loading indicator handler
function increaseLoading(sel) {
if (!(sel in loading)) {
loading[sel] = 0;
}
if (loading[sel] === 0) {
$("#loading-" + sel).show();
}
loading[sel] += 1;
}
function decreaseLoading(sel) {
loading[sel] -= 1;
if (loading[sel] === 0) {
$("#loading-" + sel).hide();
}
}
function addAlert(message, kind = "danger", delay = 3000) {
var alerts = $("#popup-alerts");
var e = $(
'<div class="alert alert-dismissible" role="alert"><button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button></div>'
);
e.addClass("alert-" + kind);
e.append(new Text(message));
e.hide();
alerts.show().append(e);
e.slideDown(200);
e.on("closed.bs.alert", function () {
if (alerts.find(".alert").length == 0) {
alerts.hide();
}
});
if (delay) {
e.delay(delay).slideUp(200, function () {
$(this).alert("close");
});
}
}
jQuery.fn.extend({
insertAtCaret: function (myValue) {
return this.each(function () {
if (document.selection) {
// For browsers like Internet Explorer
this.focus();
let sel = document.selection.createRange();
sel.text = myValue;
this.focus();
} else if (this.selectionStart || this.selectionStart === 0) {
//For browsers like Firefox and Webkit based
let startPos = this.selectionStart;
let endPos = this.selectionEnd;
let scrollTop = this.scrollTop;
this.value =
this.value.substring(0, startPos) +
myValue +
this.value.substring(endPos, this.value.length);
this.focus();
this.selectionStart = startPos + myValue.length;
this.selectionEnd = startPos + myValue.length;
this.scrollTop = scrollTop;
} else {
this.value += myValue;
this.focus();
}
// Need `bubbles` because some event listeners (like this
// https://github.com/WeblateOrg/weblate/blob/86d4fb308c9941f32b48f007e16e8c153b0f3fd7/weblate/static/editor/base.js#L50
// ) are attached to the parent elements.
this.dispatchEvent(new Event("input", { bubbles: true }));
this.dispatchEvent(new Event("change", { bubbles: true }));
});
},
replaceValue: function (myValue) {
return this.each(function () {
this.value = myValue;
// Need `bubbles` because some event listeners (like this
// https://github.com/WeblateOrg/weblate/blob/86d4fb308c9941f32b48f007e16e8c153b0f3fd7/weblate/static/editor/base.js#L50
// ) are attached to the parent elements.
this.dispatchEvent(new Event("input", { bubbles: true }));
this.dispatchEvent(new Event("change", { bubbles: true }));
});
},
});
function submitForm(evt) {
var $target = $(evt.target);
var $form = $target.closest("form");
if ($form.length === 0) {
$form = $(".translation-form");
}
if ($form.length > 0) {
let submits = $form.find('input[type="submit"]');
if (submits.length === 0) {
submits = $form.find('button[type="submit"]');
}
if (submits.length > 0) {
submits[0].click();
}
}
return false;
}
Mousetrap.bindGlobal(["alt+enter", "mod+enter"], submitForm);
function screenshotStart() {
$("#search-results tbody.unit-listing-body").empty();
increaseLoading("screenshots");
}
function screenshotFailure() {
screenshotLoaded({ responseCode: 500 });
}
function screenshotAddString() {
var pk = $(this).data("pk");
var form = $("#screenshot-add-form");
$("#add-source").val(pk);
$.ajax({
type: "POST",
url: form.attr("action"),
data: form.serialize(),
dataType: "json",
success: function () {
var list = $("#sources-listing");
$.get(list.data("href"), function (data) {
list.find("table").replaceWith(data);
});
},
error: function (jqXHR, textStatus, errorThrown) {
addAlert(errorThrown);
},
});
}
function screnshotResultError(severity, message) {
$("#search-results tbody.unit-listing-body").html(
$("<tr/>").addClass(severity).html($('<td colspan="4"></td>').text(message))
);
}
function screenshotLoaded(data) {
decreaseLoading("screenshots");
if (data.responseCode !== 200) {
screnshotResultError("danger", gettext("Error loading search results!"));
} else if (data.results.length === 0) {
screnshotResultError(
"warning",
gettext("No new matching source strings found.")
);
} else {
$("#search-results table").replaceWith(data.results);
$("#search-results").find(".add-string").click(screenshotAddString);
}
}
function isNumber(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
}
function extractText(cell) {
var value = $(cell).data("value");
if (typeof value !== "undefined") {
return value;
}
return $.text(cell);
}
function compareCells(a, b) {
if (typeof a === "number" && typeof b === "number") {
} else if (a.indexOf("%") !== -1 && b.indexOf("%") !== -1) {
a = parseFloat(a.replace(",", "."));
b = parseFloat(b.replace(",", "."));
} else if (isNumber(a) && isNumber(b)) {
a = parseFloat(a.replace(",", "."));
b = parseFloat(b.replace(",", "."));
} else if (typeof a === "string" && typeof b === "string") {
a = a.toLowerCase();
b = b.toLowerCase();
}
if (a == b) {
return 0;
}
if (a > b) {
return 1;
}
return -1;
}
function loadTableSorting() {
$("table.sort").each(function () {
var table = $(this),
tbody = table.find("tbody"),
thead = table.find("thead"),
thIndex = 0;
$(this)
.find("thead th")
.each(function () {
var th = $(this),
inverse = 1;
// handle colspan
if (th.attr("colspan")) {
thIndex += parseInt(th.attr("colspan"), 10) - 1;
}
// skip empty cells and cells with icon (probably already processed)
if (
th.text() !== "" &&
!th.hasClass("sort-init") &&
!th.hasClass("sort-skip")
) {
// Store index copy
let myIndex = thIndex;
// Add icon, title and class
th.addClass("sort-init");
if (!th.hasClass("sort-cell")) {
// Skip statically initialized parts (when server side ordering is supported)
th.attr("title", gettext("Sort this column"))
.addClass("sort-cell")
.append('<span class="sort-icon" />');
}
// Click handler
th.click(function () {
tbody
.find("tr")
.sort(function (a, b) {
var $a = $(a),
$b = $(b);
var a_parent = $a.data("parent"),
b_parent = $b.data("parent");
if (a_parent) {
$a = tbody.find("#" + a_parent);
}
if (b_parent) {
$b = tbody.find("#" + b_parent);
}
return (
inverse *
compareCells(
extractText($a.find("td,th")[myIndex]),
extractText($b.find("td,th")[myIndex])
)
);
})
.appendTo(tbody);
thead.find(".sort-icon").removeClass("sort-down sort-up");
if (inverse === 1) {
$(this).find(".sort-icon").addClass("sort-down");
} else {
$(this).find(".sort-icon").addClass("sort-up");
}
inverse = inverse * -1;
});
}
// Increase index
thIndex += 1;
});
});
}
/* Thin wrappers for django to avoid problems when i18n js can not be loaded */
function gettext(msgid) {
if (typeof django !== "undefined") {
return django.gettext(msgid);
}
return msgid;
}
function pgettext(context, msgid) {
if (typeof django !== "undefined") {
return django.pgettext(context, msgid);
}
return msgid;
}
function interpolate(fmt, obj, named) {
if (typeof django !== "undefined") {
return django.interpolate(fmt, obj, named);
}
return fmt.replace(/%s/g, function () {
return String(obj.shift());
});
}
function load_matrix() {
var $loadingNext = $("#loading-next");
var $loader = $("#matrix-load");
var offset = parseInt($loader.data("offset"));
if ($("#last-section").length > 0 || $loadingNext.css("display") !== "none") {
return;
}
$loadingNext.show();
$loader.data("offset", 20 + offset);
$.get($loader.attr("href") + "&offset=" + offset, function (data) {
$loadingNext.hide();
$(".matrix tbody").append(data);
});
}
function adjustColspan() {
$("table.autocolspan").each(function () {
var $this = $(this);
var numOfVisibleCols = $this.find("thead th:visible").length;
if (numOfVisibleCols === 0) {
numOfVisibleCols = 3;
}
$this.find("td.autocolspan").attr("colspan", numOfVisibleCols - 1);
});
}
function quoteSearch(value) {
if (value.indexOf(" ") === -1) {
return value;
}
if (value.indexOf('"') === -1) {
return '"' + value + '"';
}
if (value.indexOf("'") === -1) {
return "'" + value + "'";
}
/* We should do some escaping here */
return value;
}
function initHighlight(root) {
if (typeof ResizeObserver === "undefined") {
return;
}
root.querySelectorAll(".highlight-editor").forEach(function (editor) {
var parent = editor.parentElement;
var hasFocus = editor == document.activeElement;
if (parent.classList.contains("editor-wrap")) {
return;
}
var mode = editor.getAttribute("data-mode");
/* Create wrapper element */
var wrapper = document.createElement("div");
wrapper.setAttribute("class", "editor-wrap");
/* Inject wrapper */
parent.replaceChild(wrapper, editor);
/* Create highlighter */
var highlight = document.createElement("div");
highlight.setAttribute("class", "highlighted-output");
if (editor.readOnly) {
highlight.classList.add("readonly");
}
highlight.setAttribute("role", "status");
if (editor.hasAttribute("dir")) {
highlight.setAttribute("dir", editor.getAttribute("dir"));
}
if (editor.hasAttribute("lang")) {
highlight.setAttribute("lang", editor.getAttribute("lang"));
}
wrapper.appendChild(highlight);
/* Add editor to wrapper */
wrapper.appendChild(editor);
if (hasFocus) {
editor.focus();
}
/* Content synchronisation and highlighting */
var languageMode = Prism.languages[mode];
if (editor.classList.contains("translation-editor")) {
let placeables = editor.getAttribute("data-placeables");
/* This should match WHITESPACE_REGEX in weblate/trans/templatetags/translations.py */
let whitespace_regex = new RegExp(
[
" +|(^) +| +(?=$)| +\n|\n +|\t|",
"\u00A0|\u1680|\u2000|\u2001|",
"\u2002|\u2003|\u2004|\u2005|",
"\u2006|\u2007|\u2008|\u2009|",
"\u200A|\u202F|\u205F|\u3000",
].join("")
);
let extension = {
hlspace: {
pattern: whitespace_regex,
lookbehind: true,
},
};
if (placeables) {
extension.placeable = RegExp(placeables);
}
/*
* We can not use Prism.extend here as we want whitespace highlighting
* to apply first. The code is borrowed from Prism.util.clone.
*/
for (var key in languageMode) {
if (languageMode.hasOwnProperty(key)) {
extension[key] = Prism.util.clone(languageMode[key]);
}
}
languageMode = extension;
}
var syncContent = function () {
highlight.innerHTML = Prism.highlight(editor.value, languageMode, mode);
autosize.update(editor);
};
syncContent();
editor.addEventListener("input", syncContent);
/* Handle scrolling */
editor.addEventListener("scroll", (event) => {
highlight.scrollTop = editor.scrollTop;
highlight.scrollLeft = editor.scrollLeft;
});
/* Handle resizing */
const resizeObserver = new ResizeObserver((entries) => {
for (let entry of entries) {
if (entry.target === editor) {
// match the height and width of the output area to the input area
highlight.style.height = editor.offsetHeight + "px";
highlight.style.width = editor.offsetWidth + "px";
}
}
});
resizeObserver.observe(editor);
/* Autosizing */
autosize(editor);
});
}
$(function () {
var $window = $(window),
$document = $(document);
adjustColspan();
$window.resize(adjustColspan);
$document.on("shown.bs.tab", adjustColspan);
/* AJAX loading of tabs/pills */
$document.on(
"show.bs.tab",
'[data-toggle="tab"][data-href], [data-toggle="pill"][data-href]',
function (e) {
var $target = $(e.target);
var $content = $($target.attr("href"));
if ($target.data("loaded")) {
return;
}
if ($content.find(".panel-body").length > 0) {
$content = $content.find(".panel-body");
}
$content.load($target.data("href"), function (responseText, status, xhr) {
if (status !== "success") {
var msg = gettext("Error while loading page:");
$content.text(
msg +
" " +
xhr.statusText +
" (" +
xhr.status +
"): " +
responseText
);
}
$target.data("loaded", 1);
loadTableSorting();
});
}
);
if ($("#form-activetab").length > 0) {
$document.on("show.bs.tab", '[data-toggle="tab"]', function (e) {
var $target = $(e.target);
$("#form-activetab").attr("value", $target.attr("href"));
});
}
/* Form automatic submission */
$("form.autosubmit select").change(function () {
$("form.autosubmit").submit();
});
var activeTab;
/* Load correct tab */
if (location.hash !== "") {
/* From URL hash */
var separator = location.hash.indexOf("__");
if (separator != -1) {
activeTab = $(
'.nav [data-toggle=tab][href="' +
location.hash.substr(0, separator) +
'"]'
);
if (activeTab.length) {
activeTab.tab("show");
}
}
activeTab = $('.nav [data-toggle=tab][href="' + location.hash + '"]');
if (activeTab.length) {
activeTab.tab("show");
window.scrollTo(0, 0);
} else {
document.getElementById(location.hash.substr(1)).scrollIntoView();
}
} else if (
$(".translation-tabs").length > 0 &&
Cookies.get("translate-tab")
) {
/* From cookie */
activeTab = $(
'[data-toggle=tab][href="' + Cookies.get("translate-tab") + '"]'
);
if (activeTab.length) {
activeTab.tab("show");
}
}
/* Add a hash to the URL when the user clicks on a tab */
$('a[data-toggle="tab"]').on("shown.bs.tab", function (e) {
history.pushState(null, null, $(this).attr("href"));
/* Remove focus on rows */
$(".selectable-row").removeClass("active");
});
/* Navigate to a tab when the history changes */
window.addEventListener("popstate", function (e) {
if (location.hash !== "") {
activeTab = $('[data-toggle=tab][href="' + location.hash + '"]');
} else {
activeTab = Array();
}
if (activeTab.length) {
activeTab.tab("show");
} else {
$(".nav-tabs a:first").tab("show");
}
});
/* Activate tab with error */
var formErrors = $("div.has-error");
if (formErrors.length > 0) {
var tab = formErrors.closest("div.tab-pane");
if (tab.length > 0) {
$('[data-toggle=tab][href="#' + tab.attr("id") + '"]').tab("show");
}
}
/* Announcement discard */
$(".alert").on("close.bs.alert", function () {
var $this = $(this);
var $form = $("#link-post");
if ($this.data("action")) {
$.ajax({
type: "POST",
url: $this.data("action"),
data: {
csrfmiddlewaretoken: $form.find("input").val(),
id: $this.data("id"),
},
error: function (jqXHR, textStatus, errorThrown) {
addAlert(errorThrown);
},
});
}
});
/* Widgets selector */
$(".select-tab").on("change", function (e) {
$(this).parent().find(".tab-pane").removeClass("active");
$("#" + $(this).val()).addClass("active");
});
/* Code samples (on widgets page) */
$(".code-example").focus(function () {
$(this).select();
});
/* Table sorting */
loadTableSorting();
/* Matrix mode handling */
if ($(".matrix").length > 0) {
load_matrix();
$window.scroll(function () {
if ($window.scrollTop() >= $document.height() - 2 * $window.height()) {
load_matrix();
}
});
}
/* Social auth disconnect */
$("a.disconnect").click(function (e) {
e.preventDefault();
$("form#disconnect-form").attr("action", $(this).attr("href")).submit();
});
/* Check if browser provides native datepicker */
if (Modernizr.inputtypes.date) {
$(document).off(".datepicker.data-api");
}
/* Datepicker localization */
var week_start = "1";
if (typeof django !== "undefined") {
week_start = django.formats.FIRST_DAY_OF_WEEK;
}
$.fn.datepicker.dates.en = {
days: [
gettext("Sunday"),
gettext("Monday"),
gettext("Tuesday"),
gettext("Wednesday"),
gettext("Thursday"),
gettext("Friday"),
gettext("Saturday"),
gettext("Sunday"),
],
daysShort: [
pgettext("Short (for example three letter) name of day in week", "Sun"),
pgettext("Short (for example three letter) name of day in week", "Mon"),
pgettext("Short (for example three letter) name of day in week", "Tue"),
pgettext("Short (for example three letter) name of day in week", "Wed"),
pgettext("Short (for example three letter) name of day in week", "Thu"),
pgettext("Short (for example three letter) name of day in week", "Fri"),
pgettext("Short (for example three letter) name of day in week", "Sat"),
pgettext("Short (for example three letter) name of day in week", "Sun"),
],
daysMin: [
pgettext("Minimal (for example two letter) name of day in week", "Su"),
pgettext("Minimal (for example two letter) name of day in week", "Mo"),
pgettext("Minimal (for example two letter) name of day in week", "Tu"),
pgettext("Minimal (for example two letter) name of day in week", "We"),
pgettext("Minimal (for example two letter) name of day in week", "Th"),
pgettext("Minimal (for example two letter) name of day in week", "Fr"),
pgettext("Minimal (for example two letter) name of day in week", "Sa"),
pgettext("Minimal (for example two letter) name of day in week", "Su"),
],
months: [
gettext("January"),
gettext("February"),
gettext("March"),
gettext("April"),
gettext("May"),
gettext("June"),
gettext("July"),
gettext("August"),
gettext("September"),
gettext("October"),
gettext("November"),
gettext("December"),
],
monthsShort: [
pgettext("Short name of month", "Jan"),
pgettext("Short name of month", "Feb"),
pgettext("Short name of month", "Mar"),
pgettext("Short name of month", "Apr"),
pgettext("Short name of month", "May"),
pgettext("Short name of month", "Jun"),
pgettext("Short name of month", "Jul"),
pgettext("Short name of month", "Aug"),
pgettext("Short name of month", "Sep"),
pgettext("Short name of month", "Oct"),
pgettext("Short name of month", "Nov"),
pgettext("Short name of month", "Dec"),
],
today: gettext("Today"),
clear: gettext("Clear"),
weekStart: week_start,
titleFormat: "MM yyyy",
};
$(".dropdown-menu")
.find("form")
.click(function (e) {
e.stopPropagation();
});
$document.on("click", ".link-post", function () {
var $form = $("#link-post");
var $this = $(this);
$form.attr("action", $this.attr("data-href"));
$.each($this.data("params"), function (name, value) {
var elm = $("<input>")
.attr("type", "hidden")
.attr("name", name)
.attr("value", value);
$form.append(elm);
});
$form.submit();
return false;
});
$(".link-auto").click();
$document.on("click", ".thumbnail", function () {
var $this = $(this);
$("#imagepreview").attr("src", $this.attr("href"));
$("#screenshotModal").text($this.attr("title"));
$("#modalEditLink").attr("href", $this.data("edit"));
$("#imagemodal").modal("show");
return false;
});
/* Screenshot management */
$("#screenshots-search,#screenshots-auto").click(function () {
var $this = $(this);
screenshotStart();
$.ajax({
type: "POST",
url: $this.data("href"),
data: $this.parent().serialize(),
dataType: "json",
success: screenshotLoaded,
error: screenshotFailure,
});
return false;
});
/* Avoid double submission of non AJAX forms */
$("form:not(.double-submission)").on("submit", function (e) {
var $form = $(this);
if ($form.data("submitted") === true) {
// Previously submitted - don't submit again
e.preventDefault();
} else {
// Mark it so that the next submit can be ignored
$form.data("submitted", true);
}
});
/* Reset submitted flag when leaving the page, so that it is not set when going back in history */
$window.on("pagehide", function () {
$("form:not(.double-submission)").data("submitted", false);
});
/* Client side form persistence */
var $forms = $("[data-persist]");
if ($forms.length > 0 && window.localStorage) {
/* Load from local storage */
$forms.each(function () {
var $this = $(this);
var storedValue = window.localStorage[$this.data("persist")];
if (storedValue) {
storedValue = JSON.parse(storedValue);
$.each(storedValue, function (key, value) {
var target = $this.find("[name=" + key + "]");
if (target.is(":checkbox")) {
target.prop("checked", value);
} else {
target.val(value);
}
});
}
});
/* Save on submit */
$forms.submit(function (e) {
var data = {};
var $this = $(this);
$this.find(":checkbox").each(function () {
var $this = $(this);
data[$this.attr("name")] = $this.prop("checked");
});
$this.find("select").each(function () {
var $this = $(this);
data[$this.attr("name")] = $this.val();
});
window.localStorage[$this.data("persist")] = JSON.stringify(data);
});
}
/*
* Disable modal enforce focus to fix compatibility
* issues with ClipboardJS, see https://stackoverflow.com/a/40862005/225718
*/
$.fn.modal.Constructor.prototype.enforceFocus = function () {};
/* Focus first input in modal */
$(document).on("shown.bs.modal", function (event) {
var button = $(event.relatedTarget); // Button that triggered the modal
var target = button.data("focus");
if (target) {
/* Modal context focusing */
$(target).focus();
} else {
$("input:visible:enabled:first", event.target).focus();
}
});
/* Copy to clipboard */
var clipboard = new ClipboardJS("[data-clipboard-text]");
clipboard.on("success", function (e) {
var text =
e.trigger.getAttribute("data-clipboard-message") ||
gettext("Text copied to clipboard.");
addAlert(text, (kind = "info"));
});
clipboard.on("error", function (e) {
addAlert(gettext("Please press Ctrl+C to copy."), (kind = "danger"));
});
$("[data-clipboard-text]").on("click", function (e) {
e.preventDefault();
});
/* Auto translate source select */
var select_auto_source = $('input[name="auto_source"]');
if (select_auto_source.length > 0) {
select_auto_source.on("change", function () {
if ($('input[name="auto_source"]:checked').val() == "others") {
$("#auto_source_others").show();
$("#auto_source_mt").hide();
} else {
$("#auto_source_others").hide();
$("#auto_source_mt").show();
}
});
select_auto_source.trigger("change");
}
/* Override all multiple selects */
$("select[multiple]").multi({
enable_search: true,
search_placeholder: gettext("Search…"),
non_selected_header: gettext("Available:"),
selected_header: gettext("Chosen:"),
});
/* Slugify name */
slugify.extend({ ".": "-" });
$('input[name="slug"]').each(function () {
var $slug = $(this);
var $form = $slug.closest("form");
$form
.find('input[name="name"]')
.on("change keypress keydown keyup paste", function () {
$slug.val(
slugify($(this).val(), { remove: /[^\w\s-]+/g }).toLowerCase()
);
});
});
/* Component update progress */
$("[data-progress-url]").each(function () {
var $progress = $(this);
var $pre = $progress.find("pre"),
$bar = $progress.find(".progress-bar"),
url = $progress.data("progress-url");
var $form = $("#link-post");
$pre.animate({ scrollTop: $pre.get(0).scrollHeight });
var progress_completed = function () {
$bar.width("100%");
if ($("#progress-redirect").prop("checked")) {
window.location = $("#progress-return").attr("href");
}
};
var progress_interval = setInterval(function () {
$.ajax({
url: url,
type: "get",
error: function (XMLHttpRequest, textStatus, errorThrown) {
if (XMLHttpRequest.status == 404) {
clearInterval(progress_interval);
progress_completed();
}
},
success: function (data) {
$bar.width(data.progress + "%");
$pre.text(data.log);
$pre.animate({ scrollTop: $pre.get(0).scrollHeight });
if (data.completed) {
clearInterval(progress_interval);
progress_completed();
}
},
});
}, 1000);
$("#terminate-task-button").click((e) => {
fetch(url, {
method: "DELETE",
headers: {
Accept: "application/json",
"X-CSRFToken": $form.find("input").val(),
},
}).then((data) => {
window.location = $("#progress-return").attr("href");
});
e.preventDefault();
});
});
/* Generic messages progress */
$("[data-task]").each(function () {
var $message = $(this);
var $bar = $message.find(".progress-bar");
var task_interval = setInterval(function () {
$.get($message.data("task"), function (data) {
$bar.width(data.progress + "%");
if (data.completed) {
clearInterval(task_interval);
$message.text(data.result.message);
}
});
}, 1000);
});
/* Disable invalid file format choices */
$(".invalid-format").each(function () {
$(this).parent().find("input").attr("disabled", "1");
});
// Show the correct toggle button
if ($(".sort-field").length) {
var sort_name = $("#query-sort-dropdown span.search-label").text();
var sort_dropdown_value = $(".sort-field li a")
.filter(function () {
return $(this).text() == sort_name;
})
.data("sort");
var sort_value = $("#id_sort_by").val();
var $label = $(this).find("span.search-icon");
if (sort_dropdown_value) {
if (
sort_value.replace("-", "") === sort_dropdown_value.replace("-", "") &&
sort_value !== sort_dropdown_value
) {
$label.toggle();
}
}
}
/* Branch loading */
$(".branch-loader select[name=component]").change(function () {
var $this = $(this);
var $form = $this.closest("form");
var branches = $form.data("branches");
var $select = $form.find("select[name=branch]");
$select.empty();
$.each(branches[$this.val()], function (key, value) {
$select.append($("<option></option>").attr("value", value).text(value));
});
});
/* Click to edit position inline. Disable when clicked outside or pressed ESC */
$("#position-input").on("click", function () {
$("#position-input").hide();
$("#position-input-editable").show();
$("#position-input-editable input").focus();
document.addEventListener("click", clickedOutsideEditableInput);
document.addEventListener("keyup", pressedEscape);
});
var clickedOutsideEditableInput = function (event) {
if (
!$.contains($("#position-input-editable")[0], event.target) &&
event.target != $("#position-input")[0]
) {
$("#position-input").show();
$("#position-input-editable").hide();
document.removeEventListener("click", clickedOutsideEditableInput);
document.removeEventListener("keyup", pressedEscape);
}
};
var pressedEscape = function (event) {
if (event.key == "Escape" && event.target != $("#position-input")[0]) {
$("#position-input").show();
$("#position-input-editable").hide();
document.removeEventListener("click", clickedOutsideEditableInput);
document.removeEventListener("keyup", pressedEscape);
}
};
/* Advanced search */
$(".search-group li a").click(function () {
var $this = $(this);
var $group = $this.closest(".search-group");
var $button = $group.find("button.search-field");
$button.attr("data-field", $this.data("field"));
var $title = $this.find("span.title");
var text = $this.text();
if ($title.length) {
text = $title.text();
}
$group.find("span.search-label").text(text);
if ($group.hasClass("sort-field")) {
$group.find("input[name=sort_by]").val($this.data("sort"));
if ($this.closest(".result-page-form").length) {
$this.closest("form").submit();
}
}
if ($group.hasClass("query-field")) {
$group.find("input[name=q]").val($this.data("field"));
if ($this.closest(".result-page-form").length) {
var $form = $this.closest("form");
$form.find("input[name=offset]").val("1");
$form.submit();
}
}
$this.closest("ul").dropdown("toggle");
return false;
});
$(".query-sort-toggle").click(function () {
var $this = $(this);
var $input = $this.closest(".search-group").find("input[name=sort_by]");
var sort_params = $input.val().split(",");
sort_params.forEach(function (param, index) {
if (param.indexOf("-") !== -1) {
sort_params[index] = param.replace("-", "");
} else {
sort_params[index] = `-${param}`;
}
});
$input.val(sort_params.join(","));
if ($this.closest(".result-page-form").length) {
$this.closest("form").submit();
}
});
$(".search-group input")
.not("#id_q,#id_position,#id_term")
.on("keydown", function (event) {
if (event.key === "Enter") {
$(this).closest(".input-group").find(".search-add").click();
event.preventDefault();
return false;
}
});
$("#id_q").on("change", function (event) {
var $form = $(this).closest("form");
$form.find("input[name=offset]").val("1");
});
$(".search-add").click(function () {
var group = $(this).closest(".search-group");
var button = group.find("button.search-field");
var input = group.find("input");
if (input.length === 0) {
$("#id_q").insertAtCaret(" " + button.attr("data-field") + " ");
} else if (input.val() !== "") {
var prefix = "";
if (group.find("#is-exact input[type=checkbox]").is(":checked")) {
prefix = "=";
}
$("#id_q").insertAtCaret(
" " +
button.attr("data-field") +
prefix +
quoteSearch(input.val()) +
" "
);
}
});
$(".search-insert").click(function () {
$("#id_q").insertAtCaret(
" " + $(this).closest("tr").find("code").text() + " "
);
});
/* Clickable rows */
$("tr[data-href]").click(function () {
window.location = $(this).data("href");
});
/* ZIP import - autofill name and slug */
$("#id_zipcreate_zipfile,#id_doccreate_docfile,#id_image").change(
function () {
var $form = $(this).closest("form");
var target = $form.find("input[name=name]");
if (this.files.length > 0 && target.val() === "") {
var name = this.files[0].name;
target.val(name.substring(0, name.lastIndexOf(".")));
target.change();
}
}
);
/* Alert when creating a component */
$("#form-create-component-branch,#form-create-component-vcs").submit(
function () {
addAlert(
gettext("Weblate is now scanning the repository, please be patient."),
(kind = "info"),
(delay = 0)
);
}
);
/* Username autocompletion */
var tribute = new Tribute({
trigger: "@",
requireLeadingSpace: true,
menuShowMinLength: 2,
searchOpts: {
pre: "",
post: "",
},
noMatchTemplate: function () {
return "";
},
menuItemTemplate: function (item) {
let link = document.createElement("a");
link.innerText = item.string;
return link.outerHTML;
},
values: (text, callback) => {
$.ajax({
type: "GET",
url: `/api/users/?username=${text}`,
dataType: "json",
success: function (data) {
var userMentionList = data.results.map(function (user) {
return {
value: user.username,
key: `${user.full_name} (${user.username})`,
};
});
callback(userMentionList);
},
error: function (jqXHR, textStatus, errorThrown) {
console.error(errorThrown);
},
});
},
});
tribute.attach(document.querySelectorAll(".markdown-editor"));
document.querySelectorAll(".markdown-editor").forEach((editor) => {
editor.addEventListener("tribute-active-true", function (e) {
$(".tribute-container").addClass("open");
$(".tribute-container ul").addClass("dropdown-menu");
});
});
/* forset fields adding */
$(".add-multifield").on("click", function () {
const updateElementIndex = function (el, prefix, ndx) {
const id_regex = new RegExp("(" + prefix + "-(\\d+|__prefix__))");
const replacement = prefix + "-" + ndx;
if ($(el).prop("for")) {
$(el).prop("for", $(el).prop("for").replace(id_regex, replacement));
}
if (el.id) {
el.id = el.id.replace(id_regex, replacement);
}
if (el.name) {
el.name = el.name.replace(id_regex, replacement);
}
};
var $this = $(this);
var $form = $this.parents("form");
var prefix = $this.data("prefix");
var blank = $form.find(".multiFieldEmpty");
var row = blank.clone();
var totalForms = $("#id_" + prefix + "-TOTAL_FORMS");
row.removeClass(["multiFieldEmpty", "hidden"]).addClass("multiField");
row.find("*").each(function () {
updateElementIndex(this, prefix, totalForms.val());
});
row.insertBefore(blank);
totalForms.val(parseInt(totalForms.val(), 10) + 1);
return false;
});
/* Textarea highlighting */
Prism.languages.none = {};
initHighlight(document);
$(".replace-preview input[type='checkbox']").on("change", function () {
$(this).closest("tr").toggleClass("warning", this.checked);
});
/* Warn users that they do not want to use developer console in most cases */
console.log(
"%c" +
pgettext("Alert to user when opening browser developer console", "Stop!"),
"color: red; font-weight: bold; font-size: 50px; font-family: sans-serif; -webkit-text-stroke: 1px black;"
);
console.log(
"%c" +
gettext(
"This is a browser feature intended for developers. If someone told you to copy-paste something here, they are likely trying to compromise your Weblate account."
),
"font-size: 20px; font-family: sans-serif"
);
console.log(
"%c" +
gettext(
"See https://en.wikipedia.org/wiki/Self-XSS for more information."
),
"font-size: 20px; font-family: sans-serif"
);
}); | PypiClean |
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/client/users.py | import asyncio
import datetime
import itertools
import time
import typing
from .. import errors, helpers, utils, hints
from ..errors import MultiError, RPCError
from ..helpers import retry_range
from ..tl import TLRequest, types, functions
_NOT_A_REQUEST = lambda: TypeError("You can only invoke requests, not types!")
if typing.TYPE_CHECKING:
from .telegramclient import TelegramClient
def _fmt_flood(delay, request, *, early=False, td=datetime.timedelta):
return (
"Sleeping%s for %ds (%s) on %s flood wait",
" early" if early else "",
delay,
td(seconds=delay),
request.__class__.__name__,
)
class UserMethods:
async def __call__(
self: "TelegramClient", request, ordered=False, flood_sleep_threshold=None
):
return await self._call(self._sender, request, ordered=ordered)
async def _call(
self: "TelegramClient",
sender,
request,
ordered=False,
flood_sleep_threshold=None,
):
if flood_sleep_threshold is None:
flood_sleep_threshold = self.flood_sleep_threshold
requests = request if utils.is_list_like(request) else (request,)
for r in requests:
if not isinstance(r, TLRequest):
raise _NOT_A_REQUEST()
await r.resolve(self, utils)
# Avoid making the request if it's already in a flood wait
if r.CONSTRUCTOR_ID in self._flood_waited_requests:
due = self._flood_waited_requests[r.CONSTRUCTOR_ID]
diff = round(due - time.time())
if diff <= 3: # Flood waits below 3 seconds are "ignored"
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
elif diff <= flood_sleep_threshold:
self._log[__name__].info(*_fmt_flood(diff, r, early=True))
await asyncio.sleep(diff)
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
else:
raise errors.FloodWaitError(request=r, capture=diff)
if self._no_updates:
r = functions.InvokeWithoutUpdatesRequest(r)
request_index = 0
last_error = None
self._last_request = time.time()
for attempt in retry_range(self._request_retries):
try:
future = sender.send(request, ordered=ordered)
if isinstance(future, list):
results = []
exceptions = []
for f in future:
try:
result = await f
except RPCError as e:
exceptions.append(e)
results.append(None)
continue
self.session.process_entities(result)
self._entity_cache.add(result)
exceptions.append(None)
results.append(result)
request_index += 1
if any(x is not None for x in exceptions):
raise MultiError(exceptions, results, requests)
else:
return results
else:
result = await future
self.session.process_entities(result)
self._entity_cache.add(result)
return result
except (
errors.ServerError,
errors.RpcCallFailError,
errors.RpcMcgetFailError,
errors.InterdcCallErrorError,
errors.InterdcCallRichErrorError,
) as e:
last_error = e
self._log[__name__].warning(
"Telegram is having internal issues %s: %s",
last_error.__class__.__name__,
last_error,
)
await asyncio.sleep(2)
except (
errors.FloodWaitError,
errors.SlowModeWaitError,
errors.FloodTestPhoneWaitError,
) as e:
last_error = e
if utils.is_list_like(request):
request = request[request_index]
# SLOW_MODE_WAIT is chat-specific, not request-specific
if not isinstance(e, errors.SlowModeWaitError):
self._flood_waited_requests[request.CONSTRUCTOR_ID] = (
time.time() + e.seconds
)
# In test servers, FLOOD_WAIT_0 has been observed, and sleeping for
# such a short amount will cause retries very fast leading to issues.
if e.seconds == 0:
e.seconds = 1
if e.seconds > self.flood_sleep_threshold:
raise
self._log[__name__].info(*_fmt_flood(e.seconds, request))
await asyncio.sleep(e.seconds)
except (
errors.PhoneMigrateError,
errors.NetworkMigrateError,
errors.UserMigrateError,
) as e:
last_error = e
self._log[__name__].info("Phone migrated to %d", e.new_dc)
should_raise = isinstance(
e, (errors.PhoneMigrateError, errors.NetworkMigrateError)
)
if should_raise and await self.is_user_authorized():
raise
await self._switch_dc(e.new_dc)
if self._raise_last_call_error and last_error is not None:
raise last_error
raise ValueError("Request was unsuccessful {} time(s)".format(attempt))
# region Public methods
async def reorder_usernames(
self: "TelegramClient",
order: typing.List[str],
) -> bool:
"""
Reorders the usernames of user account
Args:
order (List[str]): List of usernames in the order you want them to be
Returns:
bool: True if successful
Example:
.. code-block:: python
client.reorder_usernames(['username1', 'username2'])
"""
return await self(functions.account.ReorderUsernamesRequest(order=order))
async def toggle_username(
self: "TelegramClient",
username: str,
active: bool,
) -> bool:
"""
Toggles the given username's active status.
Args:
username (``str``):
The username to toggle.
active (``bool``):
Whether the username should be active or not.
Returns:
``bool``: Whether the operation was successful.
Example:
.. code-block:: python
await client.toggle_username('username', False)
"""
return await self(
functions.account.ToggleUsernameRequest(username=username, active=active)
)
async def get_me(
self: "TelegramClient",
input_peer: bool = False,
) -> "typing.Union[types.User, types.InputPeerUser]":
"""
Gets "me", the current :tl:`User` who is logged in.
If the user has not logged in yet, this method returns `None`.
Arguments
input_peer (`bool`, optional):
Whether to return the :tl:`InputPeerUser` version or the normal
:tl:`User`. This can be useful if you just need to know the ID
of yourself.
Returns
Your own :tl:`User`.
Example
.. code-block:: python
me = await client.get_me()
print(me.username)
"""
if input_peer and self._self_input_peer:
return self._self_input_peer
try:
me = (await self(functions.users.GetUsersRequest([types.InputUserSelf()])))[
0
]
self._bot = me.bot
if not self._self_input_peer:
self._self_input_peer = utils.get_input_peer(me, allow_self=False)
return self._self_input_peer if input_peer else me
except errors.UnauthorizedError:
return None
@property
def _self_id(self: "TelegramClient") -> typing.Optional[int]:
"""
Returns the ID of the logged-in user, if known.
This property is used in every update, and some like `updateLoginToken`
occur prior to login, so it gracefully handles when no ID is known yet.
"""
return self._self_input_peer.user_id if self._self_input_peer else None
async def is_bot(self: "TelegramClient") -> bool:
"""
Return `True` if the signed-in user is a bot, `False` otherwise.
Example
.. code-block:: python
if await client.is_bot():
print('Beep')
else:
print('Hello')
"""
if self._bot is None:
self._bot = (await self.get_me()).bot
return self._bot
async def is_user_authorized(self: "TelegramClient") -> bool:
"""
Returns `True` if the user is authorized (logged in).
Example
.. code-block:: python
if not await client.is_user_authorized():
await client.send_code_request(phone)
code = input('enter code: ')
await client.sign_in(phone, code)
"""
if self._authorized is None:
try:
# Any request that requires authorization will work
await self(functions.updates.GetStateRequest())
self._authorized = True
except errors.RPCError:
self._authorized = False
return self._authorized
async def get_entity(
self: "TelegramClient", entity: "hints.EntitiesLike"
) -> "hints.Entity":
"""
Turns the given entity into a valid Telegram :tl:`User`, :tl:`Chat`
or :tl:`Channel`. You can also pass a list or iterable of entities,
and they will be efficiently fetched from the network.
Arguments
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If a username is given, **the username will be resolved** making
an API call every time. Resolving usernames is an expensive
operation and will start hitting flood waits around 50 usernames
in a short period of time.
If you want to get the entity for a *cached* username, you should
first `get_input_entity(username) <get_input_entity>` which will
use the cache), and then use `get_entity` with the result of the
previous call.
Similar limits apply to invite links, and you should use their
ID instead.
Using phone numbers (from people in your contact list), exact
names, integer IDs or :tl:`Peer` rely on a `get_input_entity`
first, which in turn needs the entity to be in cache, unless
a :tl:`InputPeer` was passed.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns
:tl:`User`, :tl:`Chat` or :tl:`Channel` corresponding to the
input entity. A list will be returned if more than one was given.
Example
.. code-block:: python
from telethon import utils
me = await client.get_entity('me')
print(utils.get_display_name(me))
chat = await client.get_input_entity('username')
async for message in client.iter_messages(chat):
...
# Note that you could have used the username directly, but it's
# good to use get_input_entity if you will reuse it a lot.
async for message in client.iter_messages('username'):
...
# Note that for this to work the phone number must be in your contacts
some_id = await client.get_peer_id('+34123456789')
"""
single = not utils.is_list_like(entity)
if single:
entity = (entity,)
# Group input entities by string (resolve username),
# input users (get users), input chat (get chats) and
# input channels (get channels) to get the most entities
# in the less amount of calls possible.
inputs = []
for x in entity:
if isinstance(x, str):
inputs.append(x)
else:
inputs.append(await self.get_input_entity(x))
lists = {
helpers._EntityType.USER: [],
helpers._EntityType.CHAT: [],
helpers._EntityType.CHANNEL: [],
}
for x in inputs:
try:
lists[helpers._entity_type(x)].append(x)
except TypeError:
pass
users = lists[helpers._EntityType.USER]
chats = lists[helpers._EntityType.CHAT]
channels = lists[helpers._EntityType.CHANNEL]
if users:
# GetUsersRequest has a limit of 200 per call
tmp = []
while users:
curr, users = users[:200], users[200:]
tmp.extend(await self(functions.users.GetUsersRequest(curr)))
users = tmp
if chats: # TODO Handle chats slice?
chats = (
await self(
functions.messages.GetChatsRequest([x.chat_id for x in chats])
)
).chats
if channels:
channels = (
await self(functions.channels.GetChannelsRequest(channels))
).chats
# Merge users, chats and channels into a single dictionary
id_entity = {
utils.get_peer_id(x): x for x in itertools.chain(users, chats, channels)
}
# We could check saved usernames and put them into the users,
# chats and channels list from before. While this would reduce
# the amount of ResolveUsername calls, it would fail to catch
# username changes.
result = []
for x in inputs:
if isinstance(x, str):
result.append(await self._get_entity_from_string(x))
elif not isinstance(x, types.InputPeerSelf):
result.append(id_entity[utils.get_peer_id(x)])
else:
result.append(
next(
u
for u in id_entity.values()
if isinstance(u, types.User) and u.is_self
)
)
return result[0] if single else result
async def get_input_entity(
self: "TelegramClient", peer: "hints.EntityLike"
) -> "types.TypeInputPeer":
"""
Turns the given entity into its input entity version.
Most requests use this kind of :tl:`InputPeer`, so this is the most
suitable call to make for those cases. **Generally you should let the
library do its job** and don't worry about getting the input entity
first, but if you're going to use an entity often, consider making the
call:
Arguments
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If a username or invite link is given, **the library will
use the cache**. This means that it's possible to be using
a username that *changed* or an old invite link (this only
happens if an invite link for a small group chat is used
after it was upgraded to a mega-group).
If the username or ID from the invite link is not found in
the cache, it will be fetched. The same rules apply to phone
numbers (``'+34 123456789'``) from people in your contact list.
If an exact name is given, it must be in the cache too. This
is not reliable as different people can share the same name
and which entity is returned is arbitrary, and should be used
only for quick tests.
If a positive integer ID is given, the entity will be searched
in cached users, chats or channels, without making any call.
If a negative integer ID is given, the entity will be searched
exactly as either a chat (prefixed with ``-``) or as a channel
(prefixed with ``-100``).
If a :tl:`Peer` is given, it will be searched exactly in the
cache as either a user, chat or channel.
If the given object can be turned into an input entity directly,
said operation will be done.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns
:tl:`InputPeerUser`, :tl:`InputPeerChat` or :tl:`InputPeerChannel`
or :tl:`InputPeerSelf` if the parameter is ``'me'`` or ``'self'``.
If you need to get the ID of yourself, you should use
`get_me` with ``input_peer=True``) instead.
Example
.. code-block:: python
# If you're going to use "username" often in your code
# (make a lot of calls), consider getting its input entity
# once, and then using the "user" everywhere instead.
user = await client.get_input_entity('username')
# The same applies to IDs, chats or channels.
chat = await client.get_input_entity(-123456789)
"""
# Short-circuit if the input parameter directly maps to an InputPeer
try:
return utils.get_input_peer(peer)
except TypeError:
pass
# Next in priority is having a peer (or its ID) cached in-memory
try:
# 0x2d45687 == crc32(b'Peer')
if isinstance(peer, int) or peer.SUBCLASS_OF_ID == 0x2D45687:
return self._entity_cache[peer]
except (AttributeError, KeyError):
pass
# Then come known strings that take precedence
if peer in ("me", "self"):
return types.InputPeerSelf()
# No InputPeer, cached peer, or known string. Fetch from disk cache
try:
return self.session.get_input_entity(peer)
except ValueError:
pass
# Only network left to try
if isinstance(peer, str):
return utils.get_input_peer(await self._get_entity_from_string(peer))
# If we're a bot and the user has messaged us privately users.getUsers
# will work with access_hash = 0. Similar for channels.getChannels.
# If we're not a bot but the user is in our contacts, it seems to work
# regardless. These are the only two special-cased requests.
peer = utils.get_peer(peer)
if isinstance(peer, types.PeerUser):
users = await self(
functions.users.GetUsersRequest(
[types.InputUser(peer.user_id, access_hash=0)]
)
)
if users and not isinstance(users[0], types.UserEmpty):
# If the user passed a valid ID they expect to work for
# channels but would be valid for users, we get UserEmpty.
# Avoid returning the invalid empty input peer for that.
#
# We *could* try to guess if it's a channel first, and if
# it's not, work as a chat and try to validate it through
# another request, but that becomes too much work.
return utils.get_input_peer(users[0])
elif isinstance(peer, types.PeerChat):
return types.InputPeerChat(peer.chat_id)
elif isinstance(peer, types.PeerChannel):
try:
channels = await self(
functions.channels.GetChannelsRequest(
[types.InputChannel(peer.channel_id, access_hash=0)]
)
)
return utils.get_input_peer(channels.chats[0])
except errors.ChannelInvalidError:
pass
raise ValueError(
"Could not find the input entity for {} ({}). Please read https://"
"docs.telethon.dev/en/latest/concepts/entities.html to"
" find out more details.".format(peer, type(peer).__name__)
)
async def _get_peer(self: "TelegramClient", peer: "hints.EntityLike"):
i, cls = utils.resolve_id(await self.get_peer_id(peer))
return cls(i)
async def get_peer_id(
self: "TelegramClient", peer: "hints.EntityLike", add_mark: bool = True
) -> int:
"""
Gets the ID for the given entity.
This method needs to be ``async`` because `peer` supports usernames,
invite-links, phone numbers (from people in your contact list), etc.
If ``add_mark is False``, then a positive ID will be returned
instead. By default, bot-API style IDs (signed) are returned.
Example
.. code-block:: python
print(await client.get_peer_id('me'))
"""
if isinstance(peer, int):
return utils.get_peer_id(peer, add_mark=add_mark)
try:
if peer.SUBCLASS_OF_ID not in (0x2D45687, 0xC91C90B6):
# 0x2d45687, 0xc91c90b6 == crc32(b'Peer') and b'InputPeer'
peer = await self.get_input_entity(peer)
except AttributeError:
peer = await self.get_input_entity(peer)
if isinstance(peer, types.InputPeerSelf):
peer = await self.get_me(input_peer=True)
return utils.get_peer_id(peer, add_mark=add_mark)
# endregion
# region Private methods
async def _get_entity_from_string(self: "TelegramClient", string):
"""
Gets a full entity from the given string, which may be a phone or
a username, and processes all the found entities on the session.
The string may also be a user link, or a channel/chat invite link.
This method has the side effect of adding the found users to the
session database, so it can be queried later without API calls,
if this option is enabled on the session.
Returns the found entity, or raises TypeError if not found.
"""
phone = utils.parse_phone(string)
if phone:
try:
for user in (
await self(functions.contacts.GetContactsRequest(0))
).users:
if user.phone == phone:
return user
except errors.BotMethodInvalidError:
raise ValueError(
"Cannot get entity by phone number as a "
"bot (try using integer IDs, not strings)"
)
elif string.lower() in ("me", "self"):
return await self.get_me()
else:
username, is_join_chat = utils.parse_username(string)
if is_join_chat:
invite = await self(functions.messages.CheckChatInviteRequest(username))
if isinstance(invite, types.ChatInvite):
raise ValueError(
"Cannot get entity from a channel (or group) "
"that you are not part of. Join the group and retry"
)
elif isinstance(invite, types.ChatInviteAlready):
return invite.chat
elif username:
try:
result = await self(
functions.contacts.ResolveUsernameRequest(username)
)
except errors.UsernameNotOccupiedError as e:
raise ValueError(
'No user has "{}" as username'.format(username)
) from e
try:
pid = utils.get_peer_id(result.peer, add_mark=False)
if isinstance(result.peer, types.PeerUser):
return next(x for x in result.users if x.id == pid)
else:
return next(x for x in result.chats if x.id == pid)
except StopIteration:
pass
try:
# Nobody with this username, maybe it's an exact name/title
return await self.get_entity(self.session.get_input_entity(string))
except ValueError:
pass
raise ValueError('Cannot find any entity corresponding to "{}"'.format(string))
async def _get_input_dialog(self: "TelegramClient", dialog):
"""
Returns a :tl:`InputDialogPeer`. This is a bit tricky because
it may or not need access to the client to convert what's given
into an input entity.
"""
try:
if dialog.SUBCLASS_OF_ID == 0xA21C9795: # crc32(b'InputDialogPeer')
dialog.peer = await self.get_input_entity(dialog.peer)
return dialog
elif dialog.SUBCLASS_OF_ID == 0xC91C90B6: # crc32(b'InputPeer')
return types.InputDialogPeer(dialog)
except AttributeError:
pass
return types.InputDialogPeer(await self.get_input_entity(dialog))
async def _get_input_notify(self: "TelegramClient", notify):
"""
Returns a :tl:`InputNotifyPeer`. This is a bit tricky because
it may or not need access to the client to convert what's given
into an input entity.
"""
try:
if notify.SUBCLASS_OF_ID == 0x58981615:
if isinstance(notify, types.InputNotifyPeer):
notify.peer = await self.get_input_entity(notify.peer)
return notify
except AttributeError:
pass
return types.InputNotifyPeer(await self.get_input_entity(notify))
async def set_status(
self: "TelegramClient",
document_id: int,
until: typing.Optional[int] = None,
) -> bool:
return await self(
functions.account.UpdateEmojiStatusRequest(
types.EmojiStatusUntil(document_id, until)
if until
else types.EmojiStatus(document_id)
)
)
# endregion | PypiClean |
/ECCArithmetic-1.0.0.tar.gz/ECCArithmetic-1.0.0/README.md | # ECCArithmetic
## Installation
```
pip install ECCArithmetic
```
## Generate the Curve
```
from ECCArithmetic.ec import *
Curve = EC(0, 5, 2, 23981)
```
## PickGenerator
```python
from ECCArithmetic.ec import *
Curve = EC(0, 5, 2, 23981)
G = Curve.pickGenerator()
```
## PickPoint
```python
from ECCArithmetic.ec import *
Curve = EC(0, 5, 2, 23981)
P = Curve.pickPoint()
```
## isPointOnEC
Multiplication is realised with the double and add algorithm.
```python
from ECCArithmetic.ec import *
Curve = EC(0, 5, 2, 23981)
G = Curve.isPointOnEC([14967, 14215])
```
## Identity Element
```python
from ECCArithmetic.ec import *
O = ECPt.identity()
```
## Find All Points
```python
from ECCArithmetic.ec import *
Curve = EC(0, 5, 2, 23981)
all = Curve.enumerateAllPoints()
```
## Addition
```python
from ECCArithmetic.ec import *
Curve = EC(0, 5, 2, 23981)
P = Curve.pickPoint()
Q = Curve.pickPoint()
S = P + Q
```
## Subtraction
```python
from ECCArithmetic.ec import *
Curve = EC(0, 5, 2, 23981)
P = Curve.pickPoint()
Q = Curve.pickPoint()
S = P - Q
```
## Multiplication
```python
from ECCArithmetic.ec import *
Curve = EC(0, 5, 2, 23981)
P = Curve.pickPoint()
Q = Curve.pickPoint()
S = P * Q
```
| PypiClean |
/Box2D-2.3.2.tar.gz/Box2D-2.3.2/examples/liquid.py |
from math import sqrt
from .framework import (Framework, Keys, main)
from Box2D import (b2CircleShape, b2FixtureDef, b2PolygonShape, b2Random,
b2Vec2, b2_epsilon)
# ***** NOTE *****
# ***** NOTE *****
# This example does not appear to be working currently...
# It was ported from the JBox2D (Java) version
# ***** NOTE *****
# ***** NOTE *****
class Liquid (Framework):
name = "Liquid Test"
description = ''
bullet = None
num_particles = 1000
total_mass = 10.0
fluid_minx = -11.0
fluid_maxx = 5.0
fluid_miny = -10.0
fluid_maxy = 10.0
hash_width = 40
hash_height = 40
rad = 0.6
visc = 0.004
def __init__(self):
super(Liquid, self).__init__()
self.per_particle_mass = self.total_mass / self.num_particles
ground = self.world.CreateStaticBody(
shapes=[
b2PolygonShape(box=[5.0, 0.5]),
b2PolygonShape(box=[1.0, 0.2, (0, 4), -0.2]),
b2PolygonShape(box=[1.5, 0.2, (-1.2, 5.2), -1.5]),
b2PolygonShape(box=[0.5, 50.0, (5, 0), 0.0]),
b2PolygonShape(box=[0.5, 3.0, (-8, 0), 0.0]),
b2PolygonShape(box=[2.0, 0.1, (-6, -2.8), 0.1]),
b2CircleShape(radius=0.5, pos=(-.5, -4)),
]
)
cx = 0
cy = 25
box_width = 2.0
box_height = 20.0
self.liquid = []
for i in range(self.num_particles):
self.createDroplet((b2Random(cx - box_width * 0.5,
cx + box_width * 0.5),
b2Random(cy - box_height * 0.5,
cy + box_height * 0.5)))
self.createBoxSurfer()
if hasattr(self, 'settings'):
self.settings.enableSubStepping = False
def createBoxSurfer(self):
self.surfer = self.world.CreateDynamicBody(position=(0, 25))
self.surfer.CreatePolygonFixture(
density=1,
box=(b2Random(0.3, 0.7), b2Random(0.3, 0.7)),
)
def createDroplet(self, position):
body = self.world.CreateDynamicBody(
position=position,
fixedRotation=True,
allowSleep=False,
)
body.CreateCircleFixture(
groupIndex=-10,
radius=0.05,
restitution=0.4,
friction=0,
)
body.mass = self.per_particle_mass
self.liquid.append(body)
def applyLiquidConstraint(self, dt):
# (original comments left untouched)
# Unfortunately, this simulation method is not actually scale
# invariant, and it breaks down for rad < ~3 or so. So we need
# to scale everything to an ideal rad and then scale it back after.
idealRad = 50
idealRad2 = idealRad ** 2
multiplier = idealRad / self.rad
info = dict([(drop, (drop.position, multiplier * drop.position,
multiplier * drop.linearVelocity))
for drop in self.liquid])
change = dict([(drop, b2Vec2(0, 0)) for drop in self.liquid])
dx = self.fluid_maxx - self.fluid_minx
dy = self.fluid_maxy - self.fluid_miny
range_ = (-1, 0, 1)
hash_width = self.hash_width
hash_height = self.hash_height
max_len = 9.9e9
visc = self.visc
hash = self.hash
neighbors = set()
# Populate the neighbor list from the 9 nearest cells
for drop, ((worldx, worldy), (mx, my), (mvx, mvy)) in list(info.items()):
hx = int((worldx / dx) * hash_width)
hy = int((worldy / dy) * hash_height)
neighbors.clear()
for nx in range_:
xc = hx + nx
if not (0 <= xc < hash_width):
continue
for ny in range_:
yc = hy + ny
if yc in hash[xc]:
for neighbor in hash[xc][yc]:
neighbors.add(neighbor)
if drop in neighbors:
neighbors.remove(drop)
# Particle pressure calculated by particle proximity
# Pressures = 0 iff all particles within range are idealRad
# distance away
lengths = []
p = 0
pnear = 0
for neighbor in neighbors:
nx, ny = info[neighbor][1]
vx, vy = nx - mx, ny - my
if -idealRad < vx < idealRad and -idealRad < vy < idealRad:
len_sqr = vx ** 2 + vy ** 2
if len_sqr < idealRad2:
len_ = sqrt(len_sqr)
if len_ < b2_epsilon:
len_ = idealRad - 0.01
lengths.append(len_)
oneminusq = 1.0 - (len_ / idealRad)
sq = oneminusq ** 2
p += sq
pnear += sq * oneminusq
else:
lengths.append(max_len)
# Now actually apply the forces
pressure = (p - 5) / 2.0 # normal pressure term
presnear = pnear / 2.0 # near particles term
changex, changey = 0, 0
for len_, neighbor in zip(lengths, neighbors):
(nx, ny), (nvx, nvy) = info[neighbor][1:3]
vx, vy = nx - mx, ny - my
if -idealRad < vx < idealRad and -idealRad < vy < idealRad:
if len_ < idealRad:
oneminusq = 1.0 - (len_ / idealRad)
factor = oneminusq * \
(pressure + presnear * oneminusq) / (2.0 * len_)
dx_, dy_ = vx * factor, vy * factor
relvx, relvy = nvx - mvx, nvy - mvy
factor = visc * oneminusq * dt
dx_ -= relvx * factor
dy_ -= relvy * factor
change[neighbor] += (dx_, dy_)
changex -= dx_
changey -= dy_
change[drop] += (changex, changey)
for drop, (dx_, dy_) in list(change.items()):
if dx_ != 0 or dy_ != 0:
drop.position += (dx_ / multiplier, dy_ / multiplier)
drop.linearVelocity += (dx_ / (multiplier * dt),
dy_ / (multiplier * dt))
def hashLocations(self):
hash_width = self.hash_width
hash_height = self.hash_height
self.hash = hash = dict([(i, {}) for i in range(hash_width)])
info = [(drop, drop.position) for drop in self.liquid]
dx = self.fluid_maxx - self.fluid_minx
dy = self.fluid_maxy - self.fluid_miny
xs, ys = set(), set()
for drop, (worldx, worldy) in info:
hx = int((worldx / dx) * hash_width)
hy = int((worldy / dy) * hash_height)
xs.add(hx)
ys.add(hy)
if 0 <= hx < hash_width and 0 <= hy < hash_height:
x = hash[hx]
if hy not in x:
x[hy] = [drop]
else:
x[hy].append(drop)
def dampenLiquid(self):
for drop in self.liquid:
drop.linearVelocity *= 0.995
def checkBounds(self):
self.hash = None
to_remove = [
drop for drop in self.liquid if drop.position.y < self.fluid_miny]
for drop in to_remove:
self.liquid.remove(drop)
self.world.DestroyBody(drop)
self.createDroplet(
(0.0 + b2Random(-0.6, 0.6), 15.0 + b2Random(-2.3, 2.0)))
if self.surfer.position.y < -15:
self.world.DestroyBody(self.surfer)
self.createBoxSurfer()
def Step(self, settings):
super(Liquid, self).Step(settings)
dt = 1.0 / settings.hz
self.hashLocations()
self.applyLiquidConstraint(dt)
self.dampenLiquid()
self.checkBounds()
def Keyboard(self, key):
if key == Keys.K_b:
if self.bullet:
self.world.DestroyBody(self.bullet)
self.bullet = None
circle = b2FixtureDef(
shape=b2CircleShape(radius=0.25),
density=20,
restitution=0.05)
self.bullet = self.world.CreateDynamicBody(
position=(-31, 5),
bullet=True,
fixtures=circle,
linearVelocity=(400, 0),
)
if __name__ == "__main__":
main(Liquid) | PypiClean |
/MappedAPI-1.0.0.tar.gz/MappedAPI-1.0.0/README.md | ## Mapped API ##
[](https://travis-ci.com/venuebook/mappedapi)
A python library for an easily mapped RESTful API.
**Installation:**
(unless performing a system wide install, it's recommended to install inside of a virtualenv)
```bash
# Install dependencies:
pip install -r requirements.txt # Install core & tests
pip install -r requirements/core.txt # Just install core dependencies
pip install -r requirements/tests.txt # Install test dependencies
# Install mappedapi
python setup.py install
```
**Usage:**
```python
## example/__init__.py
## example/mapping.py
RESOURCE_MAPPING = {
'dogs': {
{'shibes':
'get': {
# https://www.example.com/api/3/dogs/{dog_id}/shibes
'endpoint_base': ['dogs', 'shibes'],
'endpoint_ids': ['dog_id'],
'verb': 'get',
},
'post': {
# https://www.example.com/api/3/dogs/{dog_id}/shibes
'endpoint_base': ['dogs', 'shibes'],
'endpoint_ids': ['dog_id'],
'required_args': ['name'],
'verb': 'post',
},
},
},
}
## example/client.py
import mappedapi.base
from example import mapping
from example.api import APIResource
class Client(object):
RESOURCE_CLASS = APIResource
RESOURCE_MAPPING = mapping.RESOURCE_MAPPING
def __init__(self, access_token):
super(Client, self).__init__()
self.auth = {'token': access_token}
## example/settings.py
API_BASE_URL = 'https://www.example.com/api/'
API_VERSION = '3'
## example/api.py
import mappedapi.base
from example import settings
class APIResource(mappedapi.base.APIResourceItem):
"""Item in a APIResource - Either a nested resource or an action."""
def _get_base_url(self):
return '%s/%s' % (settings.API_BASE_URL, settings.API_VERSION)
def _get_headers(self):
return {
'Authorization': 'Bearer %s' % self.auth['token'],
}
def _process_call_arguments(self, kwargs):
if 'operations' in kwargs:
data = []
for operation in kwargs['operations']:
data.append({'operation': operation[0], 'property': operation[1], 'value': operation[2]})
kwargs['data'] = data
return kwargs
## run.py
import uuid
from example.client import Client
# Initialize the client.
client = Client(YOUR_ACCESS_TOKEN)
print(client.dogs.shibes.get(dog_id='10203').json())
client.dogs.shibes.post(dog_id='10203', data={'name':'Doctor Wow'})
```
**Tests:**
```
py.test mappedapi --cov=mappedapi
```
**Dependencies:**
Core library depends on ``requests``.
Tests depend on ``pytest, pytest-cov, responses``.
| PypiClean |
/Flask-Monitoring-1.1.2.tar.gz/Flask-Monitoring-1.1.2/flask_monitoringdashboard/core/config/parser.py | import ast
import os
from flask_monitoringdashboard.core.logger import log
def parse_version(parser, header, version):
"""
Parse the version given in the config-file.
If both GIT and VERSION are used, the GIT argument is used.
:param parser: the parser to be used for parsing
:param header: name of the header in the configuration file
:param version: the default version
"""
version = parse_string(parser, header, 'APP_VERSION', version)
if parser.has_option(header, 'GIT'):
git = parser.get(header, 'GIT')
try:
# current hash can be found in the link in HEAD-file in git-folder
# The file is specified by: 'ref: <location>'
git_file = (open(os.path.join(git, 'HEAD')).read().rsplit(': ', 1)[1]).rstrip()
# read the git-version
version = open(git + '/' + git_file).read()
# cut version to at most 6 chars
return version[:6]
except IOError:
log("Error reading one of the files to retrieve the current git-version.")
raise
return version
def parse_string(parser, header, arg_name, arg_value):
"""
Parse an argument from the given parser. If the argument is not specified, return the default
value
:param parser: the parser to be used for parsing
:param header: name of the header in the configuration file
:param arg_name: name in the configuration file
:param arg_value: default value, the the value is not found
"""
env = get_environment_var(arg_name)
arg_value = env if env else arg_value
if parser.has_option(header, arg_name):
return parser.get(header, arg_name)
return arg_value
def parse_bool(parser, header, arg_name, arg_value):
"""
Parse an argument from the given parser. If the argument is not specified, return the default
value
:param parser: the parser to be used for parsing
:param header: name of the header in the configuration file
:param arg_name: name in the configuration file
:param arg_value: default value, the the value is not found
"""
env = get_environment_var(arg_name)
arg_value = env if env else arg_value
if parser.has_option(header, arg_name):
return parser.get(header, arg_name) == 'True'
return arg_value
def parse_literal(parser, header, arg_name, arg_value):
"""
Parse an argument from the given parser. If the argument is not specified, return the default
value
:param parser: the parser to be used for parsing
:param header: name of the header in the configuration file
:param arg_name: name in the configuration file
:param arg_value: default value, the the value is not found
"""
env = get_environment_var(arg_name)
arg_value = ast.literal_eval(env) if env else arg_value
if parser.has_option(header, arg_name):
return ast.literal_eval(parser.get(header, arg_name))
return arg_value
def get_environment_var(environment_var):
"""
Retrieve the arg_value from the environment variable
:param environment_var: name of the environment variable
:return: either the value of the environment_var or None
"""
return os.environ.get(environment_var, None) | PypiClean |
/Crack-O-Matic-0.2.1.tar.gz/Crack-O-Matic-0.2.1/crackomatic/models.py | from contextlib import contextmanager
from sqlalchemy import func, create_engine, Column, Integer, String, Boolean, \
DateTime, Text, ForeignKey, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, scoped_session
Base = declarative_base()
engine = None
class AttrDict(dict):
__slots__ = ()
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def init_db(path):
global engine
engine = create_engine(path, echo=False)
Base.metadata.create_all(engine)
@contextmanager
def session_scope(**kwargs):
"""Provide a transactional scope around a series of operations."""
session = scoped_session(sessionmaker(bind=engine, **kwargs))
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def freeze(model):
"""Create an object that contains the same data as the database object
but is not attached to any session"""
attrs = []
vals = []
for attr in dir(model):
val = getattr(model, attr)
if not attr.startswith('_'):
attrs.append(attr)
vals.append(val)
result = AttrDict(zip(attrs, vals))
return result
class Audit(Base):
__tablename__ = 'audits'
id = Column(Integer, primary_key=True)
uuid = Column(String(32), nullable=False)
user = Column(String(64), nullable=False)
domain = Column(String(64), nullable=False)
dc_ip = Column(String(64), nullable=True)
password = Column(String(256), nullable=True)
ldap_url = Column(String(64), nullable=False)
ca_file = Column(String(512), nullable=False)
email_field = Column(String(64), nullable=False)
user_filter = Column(String(1024), nullable=False)
admin_filter = Column(String(1024), nullable=False)
subject = Column(String(256), nullable=False)
message = Column(String(2048), nullable=False)
include_cracked = Column(Boolean, nullable=False, default=False)
start = Column(DateTime, nullable=True)
end = Column(DateTime, nullable=True)
state = Column(Integer, nullable=False)
frequency = Column(Integer, nullable=True)
report = relationship(
'Report',
backref='audits',
uselist=False,
lazy=True,
)
def __repr__(self):
return '<Audit {}@{}>'.format(self.id, self.start)
class Report(Base):
__tablename__ = 'reports'
id = Column(Integer, primary_key=True)
audit_id = Column(Integer, ForeignKey('audits.id'), nullable=False)
total_hashes = Column(Integer, nullable=False)
cracked = Column(Float, nullable=False)
mean_pw_len = Column(Float, nullable=True)
lengths = Column(String)
cliques = Column(String)
largest_clique = Column(Integer)
cliquiness = Column(Float, nullable=True)
char_classes = Column(String)
top_basewords = Column(String)
top_patterns = Column(String)
class Event(Base):
__tablename__ = 'events'
id = Column(Integer, primary_key=True)
user_facing = Column(Boolean)
level = Column(String(16))
timestamp = Column(DateTime)
user_id = Column(String(256))
message = Column(Text)
def __repr__(self):
return '<Event {}@{}>'.format(self.id, self.timestamp)
class Log(Base):
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
logger = Column(String)
level = Column(String)
trace = Column(String)
msg = Column(String)
created_at = Column(DateTime, default=func.now())
class Meta(Base):
__tablename__ = 'meta'
id = Column(Boolean, primary_key=True, default=True, nullable=False)
# This column stores the app's version, so we know when to migrate
version = Column(String(16))
class Config(Base):
__tablename__ = 'config'
id = Column(Boolean, primary_key=True, default=True, nullable=False)
config_json = Column(Text)
class LocalUser(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(256), nullable=False, unique=True)
password = Column(String(256), nullable=False)
# Reserved for the future
active = Column(Boolean(), nullable=False, default=True)
role = Column(Integer(), nullable=True, default=0) | PypiClean |
/KratosRomApplication-9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/RomApplication/empirical_cubature_method.py | import numpy as np
try:
from matplotlib import pyplot as plt
missing_matplotlib = False
except ImportError as e:
missing_matplotlib = True
class EmpiricalCubatureMethod():
"""
This class selects a subset of elements and corresponding positive weights necessary for the construction of a hyper-reduced order model
Reference: Hernandez 2020. "A multiscale method for periodic structures using domain decomposition and ECM-hyperreduction"
"""
"""
Constructor setting up the parameters for the Element Selection Strategy
ECM_tolerance: approximation tolerance for the element selection algorithm
Filter_tolerance: parameter limiting the number of candidate points (elements) to those above this tolerance
Plotting: whether to plot the error evolution of the element selection algorithm
"""
def __init__(self, ECM_tolerance = 0, Filter_tolerance = 1e-16, Plotting = False):
self.ECM_tolerance = ECM_tolerance
self.Filter_tolerance = Filter_tolerance
self.Name = "EmpiricalCubature"
self.Plotting = Plotting
"""
Method for setting up the element selection
input: - ResidualsBasis: numpy array containing a basis to the residuals projected
- constrain_sum_of_weights: enable the user to constrain weights to be the sum of the number of entities.
- constrain_conditions: enable the user to enforce weights to consider conditions (for specific boundary conditions).
"""
def SetUp(self, ResidualsBasis, constrain_sum_of_weights=True, constrain_conditions = False, number_of_conditions = 0):
self.W = np.ones(np.shape(ResidualsBasis)[0])
self.G = ResidualsBasis.T
self.add_constrain_count = None
total_number_of_entities = np.shape(self.G)[1]
elements_constraint = np.ones(total_number_of_entities)
conditions_begin = total_number_of_entities - number_of_conditions
elements_constraint[conditions_begin:] = 0
if constrain_sum_of_weights and not constrain_conditions:
"""
-This is necessary in case the sum of the columns of self.G equals the 0 vector,to avoid the trivial solution
-It is enforcing that the sum of the weights equals the number of columns in self.G (total number of elements)
"""
projection_of_constant_vector_elements = elements_constraint - self.G.T@( self.G @ elements_constraint)
projection_of_constant_vector_elements/= np.linalg.norm(projection_of_constant_vector_elements)
self.G = np.vstack([ self.G , projection_of_constant_vector_elements] )
self.add_constrain_count = -1
elif constrain_sum_of_weights and constrain_conditions:#Only for models which contains conditions
projection_of_constant_vector_elements = elements_constraint - self.G.T@( self.G @ elements_constraint)
projection_of_constant_vector_elements/= np.linalg.norm(projection_of_constant_vector_elements)
self.G = np.vstack([ self.G , projection_of_constant_vector_elements] )
# # # # # # # # #
conditions_constraint = np.ones(total_number_of_entities)
conditions_constraint[:conditions_begin] = 0
projection_of_constant_vector_conditions = conditions_constraint - self.G.T@( self.G @ conditions_constraint)
projection_of_constant_vector_conditions/= np.linalg.norm(projection_of_constant_vector_conditions)
self.G = np.vstack([ self.G , projection_of_constant_vector_conditions ] )
self.add_constrain_count = -2
self.b = self.G @ self.W
"""
Method performing calculations required before launching the Calculate method
"""
def Initialize(self):
self.Gnorm = np.sqrt(sum(np.multiply(self.G, self.G), 0))
M = np.shape(self.G)[1]
normB = np.linalg.norm(self.b)
self.y = np.arange(0,M,1) # Set of candidate points (those whose associated column has low norm are removed)
GnormNOONE = np.sqrt(sum(np.multiply(self.G[:self.add_constrain_count,:], self.G[:self.add_constrain_count,:]), 0))
if self.Filter_tolerance > 0:
TOL_REMOVE = self.Filter_tolerance * normB
rmvpin = np.where(GnormNOONE[self.y] < TOL_REMOVE)
self.y = np.delete(self.y,rmvpin)
self.z = {} # Set of intergration points
self.mPOS = 0 # Number of nonzero weights
self.r = self.b # residual vector
self.m = len(self.b) # Default number of points
self.nerror = np.linalg.norm(self.r)/normB
self.nerrorACTUAL = self.nerror
def Run(self):
self.Initialize()
self.Calculate()
"""
Method launching the element selection algorithm to find a set of elements: self.z, and wiegths: self.w
"""
def Calculate(self):
k = 1 # number of iterations
while self.nerrorACTUAL > self.ECM_tolerance and self.mPOS < self.m and len(self.y) != 0:
#Step 1. Compute new point
ObjFun = self.G[:,self.y].T @ self.r.T
ObjFun = ObjFun.T / self.Gnorm[self.y]
indSORT = np.argmax(ObjFun)
i = self.y[indSORT]
if k==1:
alpha = np.linalg.lstsq(self.G[:, [i]], self.b)[0]
H = 1/(self.G[:,i] @ self.G[:,i].T)
else:
H, alpha = self._UpdateWeightsInverse(self.G[:,self.z],H,self.G[:,i],alpha)
#Step 3. Move i from set y to set z
if k == 1:
self.z = i
else:
self.z = np.r_[self.z,i]
self.y = np.delete(self.y,indSORT)
# Step 4. Find possible negative weights
if any(alpha < 0):
print("WARNING: NEGATIVE weight found")
indexes_neg_weight = np.where(alpha <= 0.)[0]
self.y = np.append(self.y, (self.z[indexes_neg_weight]).T)
self.z = np.delete(self.z, indexes_neg_weight)
H = self._MultiUpdateInverseHermitian(H, indexes_neg_weight)
alpha = H @ (self.G[:, self.z].T @ self.b)
alpha = alpha.reshape(len(alpha),1)
#Step 6 Update the residual
if len(alpha)==1:
self.r = self.b - (self.G[:,self.z] * alpha)
else:
Aux = self.G[:,self.z] @ alpha
self.r = np.squeeze(self.b - Aux.T)
self.nerror = np.linalg.norm(self.r) / np.linalg.norm(self.b) # Relative error (using r and b)
self.nerrorACTUAL = self.nerror
# STEP 7
self.mPOS = np.size(self.z)
print(f'k = {k}, m = {np.size(self.z)}, error n(res)/n(b) (%) = {self.nerror*100}, Actual error % = {self.nerrorACTUAL*100} ')
if k == 1:
ERROR_GLO = np.array([self.nerrorACTUAL])
NPOINTS = np.array([np.size(self.z)])
else:
ERROR_GLO = np.c_[ ERROR_GLO , self.nerrorACTUAL]
NPOINTS = np.c_[ NPOINTS , np.size(self.z)]
k = k+1
self.w = alpha.T * np.sqrt(self.W[self.z]) #TODO FIXME cope with weights vectors different from 1
print(f'Total number of iterations = {k}')
if missing_matplotlib == False and self.Plotting == True:
plt.plot(NPOINTS[0], ERROR_GLO[0])
plt.title('Element Selection Error Evolution')
plt.xlabel('Number of elements')
plt.ylabel('Error %')
plt.show()
"""
Method for the quick update of weights (self.w), whenever a negative weight is found
"""
def _UpdateWeightsInverse(self, A,Aast,a,xold):
c = np.dot(A.T, a)
d = np.dot(Aast, c).reshape(-1, 1)
s = np.dot(a.T, a) - np.dot(c.T, d)
aux1 = np.hstack([Aast + np.outer(d, d) / s, -d / s])
if np.shape(-d.T / s)[1]==1:
aux2 = np.squeeze(np.hstack([-d.T / s, 1 / s]))
else:
aux2 = np.hstack([np.squeeze(-d.T / s), 1 / s])
Bast = np.vstack([aux1, aux2])
v = np.dot(a.T, self.r) / s
x = np.vstack([(xold - d * v), v])
return Bast, x
"""
Method for the quick update of weights (self.w), whenever a negative weight is found
"""
def _MultiUpdateInverseHermitian(self, invH, neg_indexes):
neg_indexes = np.sort(neg_indexes)
for i in range(np.size(neg_indexes)):
neg_index = neg_indexes[i] - i
invH = self._UpdateInverseHermitian(invH, neg_index)
return invH
"""
Method for the quick update of weights (self.w), whenever a negative weight is found
"""
def _UpdateInverseHermitian(self, invH, neg_index):
if neg_index == np.shape(invH)[1]:
aux = (invH[0:-1, -1] * invH[-1, 0:-1]) / invH(-1, -1)
invH_new = invH[:-1, :-1] - aux
else:
aux1 = np.hstack([invH[:, 0:neg_index], invH[:, neg_index + 1:], invH[:, neg_index].reshape(-1, 1)])
aux2 = np.vstack([aux1[0:neg_index, :], aux1[neg_index + 1:, :], aux1[neg_index, :]])
invH_new = aux2[0:-1, 0:-1] - np.outer(aux2[0:-1, -1], aux2[-1, 0:-1]) / aux2[-1, -1]
return invH_new | PypiClean |
/MyoSuite-2.0.1-py3-none-any.whl/myosuite/utils/paths_utils.py | import numpy as np
import os
import glob
import pickle
import h5py
import skvideo.io
from PIL import Image
import click
from myosuite.utils.dict_utils import flatten_dict, dict_numpify
import json
# Useful to check the horizon for teleOp / Hardware experiments
def plot_horizon(paths, env, fileName_prefix=None):
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 5})
if "time" in paths[0]['env_infos']:
horizon = np.zeros(len(paths))
# plot timesteps
plt.clf()
rl_dt_ideal = env.env.frame_skip * env.env.model.opt.timestep
for i, path in enumerate(paths):
dt = path['env_infos']['time'][1:] - path['env_infos']['time'][:-1]
horizon[i] = path['env_infos']['time'][-1] - path['env_infos'][
'time'][0]
h1 = plt.plot(
path['env_infos']['time'][1:],
dt,
'-',
label=('time=%1.2f' % horizon[i]))
h1 = plt.plot(
np.array([0, max(horizon)]),
rl_dt_ideal * np.ones(2),
'g', alpha=.5,
linewidth=2.0)
plt.legend([h1[0]], ['ideal'], loc='upper right')
plt.ylabel('time step (sec)')
plt.xlabel('time (sec)')
plt.ylim(rl_dt_ideal - 0.005, rl_dt_ideal + .005)
plt.suptitle('Timestep profile for %d rollouts' % len(paths))
file_name = fileName_prefix + '_timesteps.pdf'
plt.savefig(file_name)
print("Saved:", file_name)
# plot horizon
plt.clf()
h1 = plt.plot(
np.array([0, len(paths)]),
env.horizon * rl_dt_ideal * np.ones(2),
'g',
linewidth=5.0,
label='ideal')
plt.bar(np.arange(0, len(paths)), horizon, label='observed')
plt.ylabel('rollout duration (sec)')
plt.xlabel('rollout id')
plt.legend()
plt.suptitle('Horizon distribution for %d rollouts' % len(paths))
file_name = fileName_prefix + '_horizon.pdf'
plt.savefig(file_name)
print("Saved:", file_name)
# Plot paths to a pdf file
def plot(paths, env=None, fileName_prefix=''):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 5})
for path_name, path in paths.items():
plt.clf()
# observations
nplt1 = len(path['env_infos']['obs_dict'].keys())
for iplt1, key in enumerate(
sorted(path['env_infos']['obs_dict'].keys())):
ax = plt.subplot(nplt1, 2, iplt1 * 2 + 1)
if iplt1 != (nplt1 - 1):
ax.axes.xaxis.set_ticklabels([])
if iplt1 == 0:
plt.title('Observations')
ax.yaxis.tick_right()
if path['env_infos']['obs_dict'][key].ndim<3:
plt.plot(
path['env_infos']['time'],
path['env_infos']['obs_dict'][key],
label=key)
# plt.ylabel(key)
plt.text(0.01, .01, key, transform=ax.transAxes)
plt.xlabel('time (sec)')
# actions
nplt2 = 3
ax = plt.subplot(nplt2, 2, 2)
ax.set_prop_cycle(None)
# h4 = plt.plot(path['env_infos']['time'], env.env.act_mid + path['actions']*env.env.act_rng, '-', label='act') # plot scaled actions
h4 = plt.plot(
path['env_infos']['time'], path['actions'], '-',
label='act') # plot normalized actions
plt.ylabel('actions')
ax.axes.xaxis.set_ticklabels([])
ax.yaxis.tick_right()
# rewards/ scores
if "score" in path['env_infos']:
ax = plt.subplot(nplt2, 2, 6)
plt.plot(
path['env_infos']['time'],
path['env_infos']['score'],
label='score')
plt.xlabel('time')
plt.ylabel('score')
ax.yaxis.tick_right()
if "rwd_dict" in path['env_infos']:
ax = plt.subplot(nplt2, 2, 4)
ax.set_prop_cycle(None)
for key in sorted(path['env_infos']['rwd_dict'].keys()):
plt.plot(
path['env_infos']['time'],
path['env_infos']['rwd_dict'][key],
label=key)
plt.legend(
loc='upper left',
fontsize='x-small',
bbox_to_anchor=(.75, 0.25),
borderaxespad=0.)
ax.axes.xaxis.set_ticklabels([])
plt.ylabel('rewards')
ax.yaxis.tick_right()
if env and hasattr(env.env, "rwd_keys_wt"):
ax = plt.subplot(nplt2, 2, 6)
ax.set_prop_cycle(None)
for key in sorted(env.env.rwd_keys_wt.keys()):
plt.plot(
path['env_infos']['time'],
path['env_infos']['rwd_dict'][key]*env.env.rwd_keys_wt[key],
label=key)
plt.legend(
loc='upper left',
fontsize='x-small',
bbox_to_anchor=(.75, 0.25),
borderaxespad=0.)
ax.axes.xaxis.set_ticklabels([])
plt.ylabel('wt*rewards')
ax.yaxis.tick_right()
file_name = fileName_prefix + path_name + '.pdf'
plt.savefig(file_name)
print("saved ", file_name)
# Render frames/videos
def render(rollout_path, render_format:str="mp4", cam_names:list=["left"]):
# rollout_path: Absolute path of the rollout (h5/pickle)', default=None
# format: Format to save. Choice['rgb', 'mp4']
# cam: list of cameras to render. Example ['left', 'right', 'top', 'Franka_wrist']
output_dir = os.path.dirname(rollout_path)
rollout_name = os.path.split(rollout_path)[-1]
output_name, output_type = os.path.splitext(rollout_name)
file_name = os.path.join(output_dir, output_name+"_"+"-".join(cam_names))
# resolve data format
if output_type=='.h5':
paths = h5py.File(rollout_path, 'r')
elif output_type=='.pickle':
paths = pickle.load(open(rollout_path, 'rb'))
else:
raise TypeError("Unknown path format. Check file")
# Run through all trajs in the paths
for i_path, path in enumerate(paths):
if output_type=='.h5':
data = paths[path]['data']
path_horizon = data['time'].shape[0]
else:
data = path['env_infos']['obs_dict']
path_horizon = path['env_infos']['time'].shape[0]
# find full key name
data_keys = data.keys()
cam_keys = []
for cam_name in cam_names:
cam_key = None
for key in data_keys:
if cam_name in key and 'rgb' in key:
cam_key = key
break
assert cam_key != None, "Cam: {} not found in data. Available keys: [{}]".format(cam_name, data_keys)
cam_keys.append(key)
# pre allocate buffer
if i_path==0:
height, width, _ = data[cam_keys[0]][0].shape
frame_tile = np.zeros((height, width*len(cam_keys), 3), dtype=np.uint8)
if render_format == "mp4":
frames = np.zeros((path_horizon, height, width*len(cam_keys), 3), dtype=np.uint8)
# Render
print("Recovering {} frames:".format(render_format), end="")
for t in range(path_horizon):
# render single frame
for i_cam, cam_key in enumerate(cam_keys):
frame_tile[:,i_cam*width:(i_cam+1)*width, :] = data[cam_key][t]
# process single frame
if render_format == "mp4":
frames[t,:,:,:] = frame_tile
elif render_format == "rgb":
image = Image.fromarray(frame_tile)
image.save(file_name+"_{}-{}.png".format(i_path, t))
else:
raise TypeError("Unknown format")
print(t, end=",", flush=True)
# Save video
if render_format == "mp4":
file_name_mp4 = file_name+"_{}.mp4".format(i_path)
skvideo.io.vwrite(file_name_mp4, np.asarray(frames))
print("\nSaving: " + file_name_mp4)
# parse path from robohive format into robopen dataset format
def path2dataset(path:dict, config_path=None)->dict:
"""
Convert Robohive format into roboset format
"""
obs_keys = path['env_infos']['obs_dict'].keys()
dataset = {}
# Data =====
dataset['data/time'] = path['env_infos']['obs_dict']['time']
# actions
if 'actions' in path.keys():
dataset['data/ctrl_arm'] = path['actions'][:,:7]
dataset['data/ctrl_ee'] = path['actions'][:,7:]
# states
for key in ['qp_arm', 'qv_arm', 'tau_arm', 'qp_ee', 'qv_ee']:
if key in obs_keys:
dataset['data/'+key] = path['env_infos']['obs_dict'][key]
# cams
for cam in ['left', 'right', 'top', 'wrist']:
for key in obs_keys:
if cam in key:
if 'rgb:' in key:
dataset['data/rgb_'+cam] = path['env_infos']['obs_dict'][key]
elif 'd:' in key:
dataset['data/d_'+cam] = path['env_infos']['obs_dict'][key]
# user
if 'user' in obs_keys:
dataset['data/user'] = path['env_infos']['obs_dict']['user']
# Derived =====
pose_ee = []
if 'pos_ee' in obs_keys or 'rot_ee' in obs_keys:
assert ('pos_ee' in obs_keys and 'rot_ee' in obs_keys), "Both pose_ee and rot_ee are required"
dataset['derived/pose_ee'] = np.hstack([path['env_infos']['obs_dict']['pos_ee'], path['env_infos']['obs_dict']['rot_ee']])
# Config =====
if config_path:
config = json.load(open(config_path, 'rb'))
dataset['config'] = config
if 'user_cmt' in path.keys():
dataset['config/solved'] = float(path['user_cmt'])
return dataset
# Print h5 schema
def print_h5_schema(obj):
"Recursively find all keys in an h5py.Group."
keys = (obj.name,)
if isinstance(obj, h5py.Group):
for key, value in obj.items():
if isinstance(value, h5py.Group):
keys = keys + print_h5_schema(value)
else:
print("\t", "{0:35}".format(value.name), value)
keys = keys + (value.name,)
return keys
# convert paths from pickle to h5 format
def pickle2h5(rollout_path, output_dir=None, verify_output=False, h5_format:str='robohive', compress_path=False, config_path=None, max_paths=1e6):
# rollout_path: Single path or folder with paths
# output_dir: Directory to save the outputs. use path location if none.
# verify_output: Verify the saved file
# h5_format: robohive path / roboset h5s
# compress_path: produce smaller outputs by removing duplicate data
# config_path: add extra configs
# resolve output dirzz
if output_dir == None: # overide the default
output_dir = os.path.dirname(rollout_path)
# resolve rollout_paths
if os.path.isfile(rollout_path):
rollout_paths = [rollout_path]
else:
rollout_paths = glob.glob(os.path.join(rollout_path, '*.pickle'))
# Parse all rollouts
n_rollouts = 0
for rollout_path in rollout_paths:
# parse all paths
print('Parsing: ', rollout_path)
if n_rollouts>=max_paths:
break
paths = pickle.load(open(rollout_path, 'rb'))
rollout_name = os.path.split(rollout_path)[-1]
output_name = os.path.splitext(rollout_name)[0]
output_path = os.path.join(output_dir, output_name + '.h5')
paths_h5 = h5py.File(output_path, "w")
# Robohive path format
if h5_format == "robohive":
for i_path, path in enumerate(paths):
print("parsing rollout", i_path)
trial = paths_h5.create_group('Trial'+str(i_path))
# remove duplicate infos
if compress_path:
if 'observations' in path.keys():
del path['observations']
if 'state' in path['env_infos'].keys():
del path['env_infos']['state']
# flatten dict and fix resolutions
path = flatten_dict(data=path)
path = dict_numpify(path, u_res=None, i_res=np.int8, f_res=np.float16)
# add trail
for k, v in path.items():
trial.create_dataset(k, data=v, compression='gzip', compression_opts=4)
n_rollouts+=1
if n_rollouts>=max_paths:
break
# RoboPen dataset format
elif h5_format == 'roboset':
for i_path, path in enumerate(paths):
print("parsing rollout", i_path)
trial = paths_h5.create_group('Trial'+str(i_path))
dataset = path2dataset(path, config_path) # convert to robopen dataset format
dataset = flatten_dict(data=dataset)
dataset = dict_numpify(dataset, u_res=None, i_res=np.int8, f_res=np.float16) # numpify + data resolutions
for k, v in dataset.items():
trial.create_dataset(k, data=v, compression='gzip', compression_opts=4)
n_rollouts+=1
if n_rollouts>=max_paths:
break
else:
raise TypeError('Unsupported h5_format')
# close the h5 writer for this path
print('Saving: ', output_path)
# Read back and verify a few keys
if verify_output:
with h5py.File(output_path, "r") as h5file:
print("Printing schema read from output: ", output_path)
keys = print_h5_schema(h5file)
print("Finished Processing")
DESC="""
Script to recover images and videos from the saved pickle files
- python utils/paths_utils.py -u render -p paths.pickle -rf mp4 -cn right
- python utils/paths_utils.py -u pickle2h5 -p paths.pickle -vo True -cp True -hf robohive
"""
@click.command(help=DESC)
@click.option('-u', '--util', type=click.Choice(['plot_horizon', 'plot', 'render', 'pickle2h5', 'h5schema']), help='pick utility', required=True)
@click.option('-p', '--path', type=click.Path(exists=True), help='absolute path of the rollout (h5/pickle)', default=None)
@click.option('-e', '--env', type=str, help='Env name', default=None)
@click.option('-on', '--output_name', type=str, default=None, help=('Output name'))
@click.option('-od', '--output_dir', type=str, default=None, help=('Directory to save the outputs'))
@click.option('-vo', '--verify_output', type=bool, default=False, help=('Verify the saved file'))
@click.option('-hf', '--h5_format', type=click.Choice(['robohive', 'roboset']), help='format to save', default='roboset')
@click.option('-cp', '--compress_path', help='compress paths. Remove obs and env_info/state keys', default=False)
@click.option('-rf', '--render_format', type=click.Choice(['rgb', 'mp4']), help='format to save', default="mp4")
@click.option('-cn', '--cam_names', multiple=True, help='camera to render. Eg: left, right, top, Franka_wrist', default=["left", "top", "right", "wrist"])
@click.option('-ac', '--add_config', help='Add extra infos to config using as json', default=None)
@click.option('-mp', '--max_paths', type=int, help='maximum number of paths to process', default=1e6)
def util_path_cli(util, path, env, output_name, output_dir, verify_output, render_format, cam_names, h5_format, compress_path, add_config, max_paths):
if util=='plot_horizon':
fileName_prefix = os.join(output_dir, output_name)
plot_horizon(path, env, fileName_prefix)
elif util=='plot':
fileName_prefix = os.join(output_dir, output_name)
plot(path, env, fileName_prefix)
elif util=='render':
render(rollout_path=path, render_format=render_format, cam_names=cam_names)
elif util=='pickle2h5':
pickle2h5(rollout_path=path, output_dir=output_dir, verify_output=verify_output, h5_format=h5_format, compress_path=compress_path, config_path=add_config, max_paths=max_paths)
elif util=='h5schema':
with h5py.File(path, "r") as h5file:
print("Printing schema read from output: ", path)
keys = print_h5_schema(h5file)
else:
raise TypeError("Unknown utility requested")
if __name__ == '__main__':
util_path_cli() | PypiClean |
/Flask-WebTest-0.1.3.tar.gz/Flask-WebTest-0.1.3/README.rst | Flask-WebTest
=============
.. image:: https://dl.circleci.com/status-badge/img/gh/level12/flask-webtest/tree/master.svg?style=svg
:target: https://dl.circleci.com/status-badge/redirect/gh/level12/flask-webtest/tree/master
Flask-WebTest provides a set of utilities to ease testing Flask applications with WebTest.
As a small-in-scope project and relatively mature/stable, lack of recent commits does not indicate
it is abandoned. Occasionally, Flask or another dependency will deprecate imports. Most updates
here will be to resolve those breakages when they occur.
Installation
------------
``pip install flask-webtest``
Documentation
-------------
Documentation is available on `Read the Docs`_.
.. _Read the Docs: https://flask-webtest.readthedocs.org/en/latest/
| PypiClean |
/InformixDB-2.5.tar.gz/InformixDB-2.5/README | INTRODUCTION
============
This is informixdb 2.5, an Informix implementation of the Python
Database API. This release implements version 2.0 of the DB-API:
http://www.python.org/topics/database/DatabaseAPI-2.0.html
The module consists of two components:
- `_informixdb', a Python C extension that interfaces with INFORMIX-ESQL/C
and implements the DB-API.
- `informixdb', a Python module that wraps around _informixdb and implements
various helper classes.
It is released under a BSD-like license. See the COPYRIGHT file
included with this distribution for more details.
INSTALLATION (UNIX)
===================
This distribution uses Python distutils to build and install the informixdb
module. It requires Python 2.2 or later.
In a hurry?
-----------
Extract the source distribution, and `cd' to the top-level directory.
Type `python setup.py build_ext' and, as root, `python setup.py install'.
Or want the details?
--------------------
First, extract the source distribution on a machine with Python 2.2 or
later and INFORMIX-ESQL/C, and `cd' to the top-level directory.
Next, run the build script `python setup.py build_ext', giving it any of the
following build options:
--esql-informixdir=DIR
Look for INFORMIX-ESQL/C in `DIR'. If omitted, it defaults to the value
of the INFORMIXDIR environment variable, or else /usr/informix.
--esql-static
Disable the use of Informix shared libraries. The default is to use
shared libraries if available. With this option, you can require
the Informix static libraries to instead be used.
--esql-threadlib=LIB
Enable the use of thread-safe Informix libraries, so that multiple Python
threads can use informixdb concurrently.
If the build command completes without errors, type `python setup.py install',
which will install the package into the appropriate site-specific library path.
INSTALLATION (Win32)
====================
Windows installers for Python 2.4 and 2.5 are available for download at
http://informixdb.sourceforge.net/. Download the appropriate installer
and run it to install the InformixDB module. Note that you'll need the
Informix Client SDK or Informix Connect to use the InformixDB module.
If you're using a different version of Python, you'll have to compile
InformixDB from the sources, following the same steps as for a Unix
installation. Distutils should do the right thing as long as you have the same
compiler that produced the Python interpreter you're using.
NEWS
====
From 2.4 to 2.5:
- Compatibility with CSDK 3.00
- Ability to manually interrupt or automatically time out SQL execution
- Proper binding of boolean parameters in WHERE clauses
- Make version information about server and client available
- Various bug fixes
From 2.3 to 2.4:
- Implement 'named' parameter style to optionally bind query parameters
by name
- Implement option to retrieve opaque types in their binary representation
From 2.2 to 2.3:
- Allow parameter list for executemany() to be arbitrary iterable objects.
- .prepare() method and .command attribute for explicitly prepared statements
- Python 2.5 compatibility
- Bug fixes
* Rare crashes caused by missing error check in DESCRIBE step.
* Inconsistent UDT input binding caused SQLCODE -1820 in bulk insert
(executemany) if UDT contents went back and forth across 32K size
boundary or between null and non-null.
* EXECUTE PROCEDURE erroneously attempted to open a results cursor for
procedures that don't return results.
* Date columns were read incorrectly on 64 bit platforms due to mixup
of int4 versus long.
From 2.1 to 2.2:
- Support for BOOLEAN columns
- DECIMAL and MONEY columns can be fetched as decimal.Decimal instances
if the decimal module is available
- autocommit mode for connections
- Bug fixes:
* Output buffer allocation used pointer/int casts that don't work on
most 64 bit platforms.
* Selecting TEXT/BYTE column from an empty set of rows caused segmentation
fault under certain circumstances.
* datetime values with trailing double-zeroes were fetched incorrectly.
From 2.0 to 2.1:
- INTERVAL support
- Scroll cursor and cursor with hold
- Support for Smart Large Objects and User Defined Types
From 1.5 to 2.0:
- Full compliance with DB-API version 2
- Implement LVARCHAR support
- Use insert cursor, if possible, in executemany for improved performance.
From 1.4 to 1.5:
- Further steps towards DB-API 2 compliance:
* added recommended keyword arguments to connect() method
* implemented cursor methods/attributes .next(), .executemany(),
.rowcount, and .arraysize
- informixdb.Error now makes details about the error (such as sqlcode)
available as attributes.
- sqlerrd wasn't initialized properly, and under many circumstances it
didn't correspond to the most recent operation. (Specifically,
it didn't get updated after PREPARE, OPEN, or FETCH.)
From 1.3 to 1.4:
- Ported from "Makefile.pre.in" mechanism to distutils.
- Allow passing username and password for connecting to remote databases.
- Expose sqlerrd.
- Trivial baby steps towards DB-API version 2 compliance.
From 1.2 to 1.3:
- Fixed the `fetch{one|many|all}' method output types and/or values
for several SQL types. See ChangeLog for more info.
From 1.01 to 1.2:
- `informixdb' now exports `error' as a module global. It previously
was only available as a connection attribute.
- The `execute' method for connections now returns a value. It
previously returned `None'.
- The `execute' method no longer limits operation strings to 2000
characters or less. Strings longer than this used to cause stack
corruption.
- `None' is mapped to NULL for `execute' method inputs.
Previously, it was mapped to the literal string "None".
- NULLS are mapped to `None' for `fetch{one|many|all}' method outputs.
Previously, they were mapped to either zero or an empty string.
(This fix can be disabled by setting the IFXDB_MASK_NULLS environment
variable to a non-zero value. See ChangeLog for more info.)
- Fixed error message formatting.
- Potentially blocking SQL statements are now executed with the Python
interpreter lock released. This permits other threads to execute
while one blocks on a database request.
- Increased the maximum number of rows returned by the `fetchall'
method from 65536 to `sys.maxint'.
- Fixed a bug that caused raw (i.e., BYTE or TEXT) inputs to
occasionally be treated as NULLs.
- Fixed a bug that caused datetime outputs with YEAR TO MINUTE
precision to be wildly inaccurate.
- Reorganized the source files as follows:
Release 1.0x Release 1.2
--------------- -----------------------
README
COPYRIGHT
ChangeLog
configure
configure.in
Makefile.in
install-sh
informixdb.py informixdb.py
ext/
Makefile.pre.in-1.4
Setup.in
ifxdbmodule.ec _informixdb.ec
dbi.c dbi.c
dbi.h dbi.h
old/
README README
Makefile Makefile
_informixdb.mak _informixdb.mak
_informixdb.def _informixdb.def
- Fixed numerous memory leaks.
- Added support for thread-safe Informix, so that each thread can open its
own connections.
- Implemented the "operation caching" feature of the `execute' method,
as described in the DB-API spec. If the same operation string is
passed consecutively to `execute' (perhaps with different inputs
each time), informixdb now recognizes this condition and uses the
previously parsed/prepared/described SQL statement.
For complete details, refer to the ChangeLog file included with this
distribution.
See also `old/README' for other NEWS.
NOTES
=====
- If informixdb is built to use Informix shared libraries, you may
need to set LD_LIBRARY_PATH (or similar environment variable) so that
the runtime linker can locate these shared libraries. Otherwise, you
may get the following exception when importing the informixdb module:
ImportError: Cannot locate C core module for informix db interface.
- In the operation string argument of the `execute' method, informixdb
recognizes not only positional parameters (i.e., identified by '?'
characters) but also ordinal parameters (i.e., identified by ":N"
substrings, where N is a number from 1 to M corresponding to the Nth
item in the parameter M-tuple).
See also `old/README' for other NOTES.
FUTURE
======
- Some kind of Type Mapping API
- Some kind of Unicode support
- More unit tests to cover correct functionality in addition to API compliance
MAINTAINER
==========
Got questions and/or problems with informixdb? Contact me at:
Carsten Haese <chaese@users.sourceforge.net>
I'd recommend that you also consider CC'ing the Python DB-SIG at:
db-sig@python.org
ACKNOWLEDGMENTS
===============
Many thanks to Greg Stein and Michael Lorton (late of the eShop) as
the initial authors of informixdb, and to Bertil Reinhammar and Stephen
Turner as its prior maintainers.
Special thanks to Daniel Smertnig for cleaning up the code, implementing
API 2 compliance, and for integrating esql with distutils without the
autoconf crutch.
RELEASE HISTORY
===============
Release Date By
------- ---------- -----------------
1.0 1997-02-19 Bertil Reinhammar
1.01 1997-02-19 Bertil Reinhammar
1.2 1999-05-21 Stephen J. Turner
1.3 1999-11-30 Stephen J. Turner
1.4 2005-04-11 Carsten Haese
1.5 2005-09-18 Carsten Haese
2.0 2005-10-22 Carsten Haese
2.1 2005-11-21 Carsten Haese
2.2 2006-03-26 Carsten Haese
2.3 2006-10-01 Carsten Haese
2.4 2006-12-02 Carsten Haese
2.5 2007-10-16 Carsten Haese
--
Carsten Haese <chaese@users.sourceforge.net>
http://informixdb.sourceforge.net/ | PypiClean |
/Newsroom-1.0-py3-none-any.whl/newsroom/push.py | import io
import hmac
import flask
import logging
import superdesk
from copy import copy
from PIL import Image, ImageEnhance
from flask import current_app as app
from superdesk.utc import utcnow
from superdesk.text_utils import get_word_count
from newsroom.notifications import push_notification
from newsroom.topics.topics import get_notification_topics
from newsroom.utils import query_resource, parse_dates
from newsroom.email import send_new_item_notification_email, \
send_history_match_notification_email, send_item_killed_notification_email
from newsroom.history import get_history_users
from newsroom.wire.views import HOME_ITEMS_CACHE_KEY
from newsroom.upload import ASSETS_RESOURCE
logger = logging.getLogger(__name__)
blueprint = flask.Blueprint('push', __name__)
KEY = 'PUSH_KEY'
THUMBNAIL_SIZE = (640, 640)
THUMBNAIL_QUALITY = 80
def test_signature(request):
"""Test if request is signed using app PUSH_KEY."""
if not app.config.get(KEY):
logger.warning('PUSH_KEY is not configured, can not verify incoming data.')
return True
payload = request.get_data()
key = app.config[KEY]
mac = hmac.new(key, payload, 'sha1')
return hmac.compare_digest(
request.headers.get('x-superdesk-signature', ''),
'sha1=%s' % mac.hexdigest()
)
def assert_test_signature(request):
if not test_signature(request):
logger.warning('signature invalid on push from %s', request.referrer or request.remote_addr)
flask.abort(403)
def fix_hrefs(doc):
if doc.get('renditions'):
for key, rendition in doc['renditions'].items():
if rendition.get('media'):
rendition['href'] = app.upload_url(rendition['media'])
for assoc in doc.get('associations', {}).values():
fix_hrefs(assoc)
def publish_item(doc):
"""Duplicating the logic from content_api.publish service."""
now = utcnow()
parse_dates(doc)
doc.setdefault('firstcreated', now)
doc.setdefault('versioncreated', now)
doc.setdefault(app.config['VERSION'], 1)
doc.setdefault('wordcount', get_word_count(doc.get('body_html', '')))
service = superdesk.get_resource_service('content_api')
if 'evolvedfrom' in doc:
parent_item = service.find_one(req=None, _id=doc['evolvedfrom'])
if parent_item:
doc['ancestors'] = copy(parent_item.get('ancestors', []))
doc['ancestors'].append(doc['evolvedfrom'])
doc['bookmarks'] = parent_item.get('bookmarks', [])
else:
logger.warning("Failed to find evolvedfrom item %s for %s", doc['evolvedfrom'], doc['guid'])
fix_hrefs(doc)
logger.info('publishing %s', doc['guid'])
for assoc in doc.get('associations', {}).values():
if assoc:
assoc.setdefault('subscribers', [])
if doc.get('associations', {}).get('featuremedia'):
generate_thumbnails(doc)
_id = service.create([doc])[0]
if 'evolvedfrom' in doc and parent_item:
service.system_update(parent_item['_id'], {'nextversion': _id}, parent_item)
return _id
@blueprint.route('/push', methods=['POST'])
def push():
assert_test_signature(flask.request)
item = flask.json.loads(flask.request.get_data())
assert 'guid' in item, {'guid': 1}
assert 'type' in item, {'type': 1}
orig = app.data.find_one('wire_search', req=None, _id=item['guid'])
item['_id'] = publish_item(item)
notify_new_item(item, check_topics=orig is None)
app.cache.delete(HOME_ITEMS_CACHE_KEY)
return flask.jsonify({})
def notify_new_item(item, check_topics=True):
if item.get('type') == 'composite':
return
lookup = {'is_enabled': True}
all_users = list(query_resource('users', lookup=lookup))
user_ids = [u['_id'] for u in all_users]
users_dict = {str(user['_id']): user for user in all_users}
all_companies = list(query_resource('companies', lookup=lookup))
company_ids = [c['_id'] for c in all_companies]
companies_dict = {str(company['_id']): company for company in all_companies}
push_notification('new_item', item=item['_id'])
if check_topics:
notify_topic_matches(item, users_dict, companies_dict)
notify_user_matches(item, users_dict, companies_dict, user_ids, company_ids)
def notify_user_matches(item, users_dict, companies_dict, user_ids, company_ids):
related_items = item.get('ancestors', [])
related_items.append(item['_id'])
history_users = get_history_users(related_items, user_ids, company_ids)
bookmark_users = superdesk.get_resource_service('wire_search'). \
get_matching_bookmarks(related_items, users_dict, companies_dict)
history_users.extend(bookmark_users)
history_users = list(set(history_users))
if history_users:
for user in history_users:
app.data.insert('notifications', [{
'item': item['_id'],
'user': user
}])
push_notification('history_matches',
item=item,
users=history_users)
send_user_notification_emails(item, history_users, users_dict)
def send_user_notification_emails(item, user_matches, users):
for user_id in user_matches:
user = users.get(str(user_id))
if item.get('pubstatus') == 'canceled':
send_item_killed_notification_email(user, item=item)
else:
if user.get('receive_email'):
send_history_match_notification_email(user, item=item)
def notify_topic_matches(item, users_dict, companies_dict):
topics = get_notification_topics()
topic_matches = superdesk.get_resource_service('wire_search'). \
get_matching_topics(item['_id'], topics, users_dict, companies_dict)
if topic_matches:
push_notification('topic_matches',
item=item,
topics=topic_matches)
send_topic_notification_emails(item, topics, topic_matches, users_dict)
def send_topic_notification_emails(item, topics, topic_matches, users):
for topic in topics:
user = users.get(str(topic['user']))
if topic['_id'] in topic_matches and user and user.get('receive_email'):
send_new_item_notification_email(user, topic['label'], item=item)
# keeping this for testing
@blueprint.route('/notify', methods=['POST'])
def notify():
data = flask.json.loads(flask.request.get_data())
notify_new_item(data['item'])
return flask.jsonify({'status': 'OK'}), 200
@blueprint.route('/push_binary', methods=['POST'])
def push_binary():
assert_test_signature(flask.request)
media = flask.request.files['media']
media_id = flask.request.form['media_id']
app.media.put(media, resource=ASSETS_RESOURCE, _id=media_id, content_type=media.content_type)
return flask.jsonify({'status': 'OK'}), 201
@blueprint.route('/push_binary/<media_id>')
def push_binary_get(media_id):
if app.media.get(media_id, resource=ASSETS_RESOURCE):
return flask.jsonify({})
else:
flask.abort(404)
def generate_thumbnails(item):
picture = item.get('associations', {}).get('featuremedia', {})
if not picture:
return
# use 4-3 rendition for generated thumbs
renditions = picture.get('renditions', {})
rendition = renditions.get('4-3', renditions.get('viewImage'))
if not rendition:
return
# generate thumbnails
binary = app.media.get(rendition['media'], resource=ASSETS_RESOURCE)
im = Image.open(binary)
thumbnail = _get_thumbnail(im) # 4-3 rendition resized
watermark = _get_watermark(im) # 4-3 rendition with watermark
picture['renditions'].update({
'_newsroom_thumbnail': _store_image(thumbnail,
_id='%s%s' % (rendition['media'], '_newsroom_thumbnail')),
'_newsroom_thumbnail_large': _store_image(watermark,
_id='%s%s' % (rendition['media'], '_newsroom_thumbnail_large')),
})
# add watermark to base/view images
for key in ['base', 'view']:
rendition = picture.get('renditions', {}).get('%sImage' % key)
if rendition:
binary = app.media.get(rendition['media'], resource=ASSETS_RESOURCE)
im = Image.open(binary)
watermark = _get_watermark(im)
picture['renditions'].update({
'_newsroom_%s' % key: _store_image(watermark,
_id='%s%s' % (rendition['media'], '_newsroom_%s' % key))
})
def _store_image(image, filename=None, _id=None):
binary = io.BytesIO()
image.save(binary, 'jpeg', quality=THUMBNAIL_QUALITY)
binary.seek(0)
media_id = app.media.put(binary, filename=filename, _id=_id, resource=ASSETS_RESOURCE, content_type='image/jpeg')
if not media_id:
# media with the same id exists
media_id = _id
binary.seek(0)
return {
'media': str(media_id),
'href': app.upload_url(media_id),
'width': image.width,
'height': image.height,
'mimetype': 'image/jpeg'
}
def _get_thumbnail(image):
image = image.copy()
image.thumbnail(THUMBNAIL_SIZE)
return image
def _get_watermark(image):
image = image.copy()
if not app.config.get('WATERMARK_IMAGE'):
return image
if image.mode != 'RGBA':
image = image.convert('RGBA')
with open(app.config['WATERMARK_IMAGE'], mode='rb') as watermark_binary:
watermark_image = Image.open(watermark_binary)
set_opacity(watermark_image, 0.3)
watermark_layer = Image.new('RGBA', image.size)
watermark_layer.paste(watermark_image, (
image.size[0] - watermark_image.size[0],
int((image.size[1] - watermark_image.size[1]) * 0.66),
))
watermark = Image.alpha_composite(image, watermark_layer)
return watermark.convert('RGB')
def set_opacity(image, opacity=1):
alpha = image.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
image.putalpha(alpha) | PypiClean |
/LinOTP-2.11.1.tar.gz/LinOTP-2.11.1/linotp/lib/migrate.py | """ contains the hsm migration handler"""
from Cryptodome.Protocol.KDF import PBKDF2
import hmac
import binascii
import random # for test id genretator using random.choice
import os
from hashlib import sha256
from Cryptodome.Cipher import AES
from linotp.model import Token as model_token
from linotp.model import Config as model_config
from linotp.lib.config import getFromConfig
from linotp.lib.config.db_api import _storeConfigDB
from linotp.lib.crypto import SecretObj
from linotp.lib.context import request_context as context
import linotp.model
Session = linotp.model.Session
class DecryptionError(Exception):
pass
class MigrationHandler(object):
"""
the migration handler supports the migration of encryted data
like the token seed or pin of the encrypted config entries, that
contain sensitive data like password
"""
def __init__(self):
"""
the Migration hanlder relies on a crypto handler, which
encrypts or decryptes data.
The setup of the cryptohandler is delayed, as at startup, might
not all data be available
"""
self.salt = None
self.crypter = None
self.hsm = context.get('hsm')
def setup(self, passphrase, salt=None):
"""
setup the MigtaionHandler - or more precise the cytpto handler, which
is a MigrationHandler member.
:param passphrase: enc + decryption key is derived from the passphrase
:param salt: optional - if not given, a new one is generated
:return: the salt, as binary
"""
if salt:
self.salt = salt
if not self.salt:
self.salt = os.urandom(AES.block_size)
self.crypter = Crypter(passphrase, self.salt)
return self.salt
def calculate_mac(self, data):
"""
helper method - to return a mac from given data
:param data: the input data for the mac calculation
:return: the mac as binary
"""
return self.crypter.mac(data)
def get_config_items(self):
"""
iterator function, to return a config entry in the migration format
it reads all config entries from the config table, which have the type
password. The decrypted value is taken from the linotp config
:return: dictionary with the config entry: key, type, description
and the value, which is a dict with the encryption relevant
data like: encrypted_data, iv, mac
"""
config_entries = Session.query(model_config).\
filter(model_config.Type == 'password').all()
for entry in config_entries:
key = 'enc%s' % entry.Key
value = getFromConfig(key)
# calculate encryption and add mac from mac_data
enc_value = self.crypter.encrypt(input_data=value,
just_mac=key + entry.Value)
config_item = {
"Key": entry.Key,
"Value": enc_value,
"Type": entry.Type,
"Description": entry.Description
}
yield config_item
def set_config_entry(self, config_entry):
"""
set the config entry - using the standard way, so that the new value
will be encrypted using the new encryption key and potetialy as well an
new iv.
before storing the new entry, the old value in its encryted form is
read. The
:param config_entry: the config entry, as a dict
:return: - nothing -
"""
key = config_entry['Key']
typ = config_entry['Type']
desc = config_entry['Description']
if desc == 'None':
desc = None
config_entries = Session.query(model_config).\
filter(model_config.Key == key).all()
entry = config_entries[0]
# decypt the real value
enc_value = config_entry['Value']
value = self.crypter.decrypt(enc_value,
just_mac='enc%s' % key + entry.Value)
_storeConfigDB(key, value, typ=typ, desc=desc)
def get_token_data(self):
"""
get all tokens
"""
tokens = Session.query(model_token).all()
for token in tokens:
token_data = {}
serial = token.LinOtpTokenSerialnumber
token_data['Serial'] = serial
if token.isPinEncrypted():
iv, enc_pin = token.get_encrypted_pin()
pin = SecretObj.decrypt_pin(enc_pin, hsm=self.hsm)
just_mac = serial + token.LinOtpPinHash
enc_value = self.crypter.encrypt(input_data=pin,
just_mac=just_mac)
token_data['TokenPin'] = enc_value
# the userpin is used in motp and ocra/ocra2 token
if token.LinOtpTokenPinUser:
key, iv = token.getUserPin()
user_pin = SecretObj.decrypt(key, iv, hsm=self.hsm)
just_mac = serial + token.LinOtpTokenPinUser
enc_value = self.crypter.encrypt(input_data=user_pin,
just_mac=just_mac)
token_data['TokenUserPin'] = enc_value
# then we retrieve as well the original value,
# to identify changes
encKey = token.LinOtpKeyEnc
key, iv = token.get_encrypted_seed()
secObj = SecretObj(key, iv, hsm=self.hsm)
seed = secObj.getKey()
enc_value = self.crypter.encrypt(input_data=seed,
just_mac=serial + encKey)
token_data['TokenSeed'] = enc_value
# next we look for tokens, where the pin is encrypted
yield token_data
def set_token_data(self, token_data):
serial = token_data["Serial"]
tokens = Session.query(model_token).\
filter(model_token.LinOtpTokenSerialnumber == serial).all()
token = tokens[0]
if 'TokenPin' in token_data:
enc_pin = token_data['TokenPin']
token_pin = self.crypter.decrypt(
enc_pin,
just_mac=serial + token.LinOtpPinHash)
# prove, we can write
enc_pin = SecretObj.encrypt_pin(token_pin)
iv = enc_pin.split(':')[0]
token.set_encrypted_pin(enc_pin, binascii.unhexlify(iv))
if 'TokenUserPin' in token_data:
token_enc_user_pin = token_data['TokenUserPin']
user_pin = self.crypter.decrypt(
token_enc_user_pin,
just_mac=serial + token.LinOtpTokenPinUser)
# prove, we can write
iv, enc_user_pin = SecretObj.encrypt(user_pin, hsm=self.hsm)
token.setUserPin(enc_user_pin, iv)
# we put the current crypted seed in the mac to check if
# something changed in meantime
encKey = token.LinOtpKeyEnc
enc_seed = token_data['TokenSeed']
token_seed = self.crypter.decrypt(enc_seed,
just_mac=serial + encKey)
# the encryption of the token seed is not part of the model anymore
iv, enc_token_seed = SecretObj.encrypt(token_seed)
token.set_encrypted_seed(enc_token_seed, iv,
reset_failcount=False,
reset_counter=False)
class Crypter(object):
@staticmethod
def hmac_sha256(secret, msg):
hmac_obj = hmac.new(secret, msg=msg, digestmod=sha256)
val = hmac_obj.digest()
return val
def mac(self, *messages):
"""
calculate the mac independend of the type
"""
mac_message = ""
for message in messages:
if type(message) == str:
mac_message += message
elif type(message) == unicode:
mac_message += message.encode('utf-8')
return Crypter.hmac_sha256(self.mac_key, mac_message)
def __init__(self, password, salt):
"""
derive the encryption key, the mac signing key and the iv
from the passphrase and salt
:param password: the inital passphrase
:param salt: the rainbow defending salt
:return: - nothing -
"""
master_key = PBKDF2(password=password, salt=salt, dkLen=32,
count=65432, prf=Crypter.hmac_sha256)
U1 = sha256(master_key).digest()
U2 = sha256(U1).digest()
self.enc_key = U1[:16]
self.mac_key = U2[:16]
def encrypt(self, input_data, just_mac=""):
"""
encrypt data
:param input_data: any data as input
:return: dictionary with hexlified iv and crypted_data
"""
# generate new iv
iv = os.urandom(AES.block_size)
# init cipher
cipher = AES.new(self.enc_key, AES.MODE_CBC, iv)
# encrypt data
crypted_data = cipher.encrypt(Crypter.pad(input_data))
# mac encrypted data plus additional 'just_mac' data
# mac = self.mac("%r%r%r" % (iv, crypted_data, just_mac))
mac = self.mac(iv, crypted_data, just_mac)
return {"iv": binascii.hexlify(iv),
"crypted_data": binascii.hexlify(crypted_data),
'mac': binascii.hexlify(mac)
}
def decrypt(self, encrypted_data, just_mac=""):
"""
decrypt the stored data
:param encrypted_data: the hexlified string with (iv:enc_data)
:return: decrypted data
"""
iv = binascii.unhexlify(encrypted_data["iv"])
crypted_data = binascii.unhexlify(encrypted_data["crypted_data"])
# compare the original mac with the new calculated one
v_mac = self.mac(iv, crypted_data, just_mac)
if encrypted_data["mac"] != binascii.hexlify(v_mac):
raise DecryptionError("Data mismatch detected!")
cipher = AES.new(self.enc_key, AES.MODE_CBC, iv)
# return decrypt, unpadded data
return Crypter.unpad(cipher.decrypt(crypted_data))
@staticmethod
def unpad(output_data):
"""
pkcs7 unpadding:
the last byte value is the number of bytes to subtract
"""
padlen = ord(output_data[-1:])
return output_data[:-padlen]
@staticmethod
def pad(input_data):
"""
pkcs7 padding:
the value of the last byte is the pad lenght
!and zero is not allowed! we take a full block instead
"""
padLength = AES.block_size - (len(input_data) % AES.block_size)
return input_data + chr(padLength) * padLength
# eof # | PypiClean |
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/cmorizers/data/formatters/datasets/woa.py | import logging
import os
from warnings import catch_warnings, filterwarnings
import iris
from cf_units import Unit
from esmvaltool.cmorizers.data.utilities import (
constant_metadata,
fix_coords,
fix_var_metadata,
save_variable,
set_global_atts,
)
logger = logging.getLogger(__name__)
def _fix_data(cube, var, version):
"""Specific data fixes for different variables."""
logger.info("Fixing data ...")
if version == '2018':
with constant_metadata(cube):
if var in ['o2', 'po4', 'si', 'no3']:
cube /= 1000. # Convert from umol/kg to mol/m^3
if version == '2013v2':
with constant_metadata(cube):
mll_to_mol = ['po4', 'si', 'no3']
if var in mll_to_mol:
cube /= 1000. # Convert from ml/l to mol/m^3
elif var == 'thetao':
cube += 273.15 # Convert to Kelvin
elif var == 'o2':
cube *= 44.661 / 1000. # Convert from ml/l to mol/m^3
return cube
def collect_files(in_dir, var, cfg):
"""Compose input file list and download if missing."""
file_list = []
var_dict = cfg['variables'][var]
in_dir = os.path.join(in_dir, var_dict['name'])
fname = cfg['attributes']['short_name'].lower(
) + '_' + var_dict['file'] + '00_01.nc'
in_file = os.path.join(in_dir, fname)
file_list.append(in_file)
return file_list
def extract_variable(in_files, out_dir, attrs, raw_info, cmor_table):
"""Extract variables and create OBS dataset."""
var = raw_info['var']
var_info = cmor_table.get_variable(raw_info['mip'], var)
rawvar = raw_info['raw_var']
with catch_warnings():
filterwarnings(
action='ignore',
message='Ignoring netCDF variable .* invalid units .*',
category=UserWarning,
module='iris',
)
cubes = iris.load(in_files, rawvar)
iris.util.equalise_attributes(cubes)
cube = cubes.concatenate_cube()
# set reference time
year = raw_info['reference_year']
cube.coord('time').climatological = False
cube.coord('time').points = 6.5
cube.coord('time').units = Unit('months since ' + str(year) +
'-01-01 00:00:00',
calendar='gregorian')
fix_var_metadata(cube, var_info)
fix_coords(cube)
_fix_data(cube, var, attrs['version'])
set_global_atts(cube, attrs)
save_variable(cube, var, out_dir, attrs, unlimited_dimensions=['time'])
# derive ocean surface
if 'srf_var' in raw_info:
var_info = cmor_table.get_variable(raw_info['mip'],
raw_info['srf_var'])
logger.info("Extract surface OBS for %s", raw_info['srf_var'])
level_constraint = iris.Constraint(cube.var_name, depth=0)
cube_os = cube.extract(level_constraint)
fix_var_metadata(cube_os, var_info)
save_variable(cube_os,
raw_info['srf_var'],
out_dir,
attrs,
unlimited_dimensions=['time'])
def cmorization(in_dir, out_dir, cfg, cfg_user, start_date, end_date):
"""Cmorization func call."""
cmor_table = cfg['cmor_table']
glob_attrs = cfg['attributes']
# run the cmorization
for var, vals in cfg['variables'].items():
in_files = collect_files(in_dir, var, cfg)
logger.info("CMORizing var %s from input set %s", var, vals['name'])
raw_info = cfg['variables'][var]
raw_info.update({
'var': var,
'reference_year': cfg['custom']['reference_year'],
})
glob_attrs['mip'] = vals['mip']
extract_variable(in_files, out_dir, glob_attrs, raw_info, cmor_table) | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/tailwindcss/lib/cli/init/index.js | "use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
Object.defineProperty(exports, "init", {
enumerable: true,
get: ()=>init
});
const _fs = /*#__PURE__*/ _interopRequireDefault(require("fs"));
const _path = /*#__PURE__*/ _interopRequireDefault(require("path"));
function _interopRequireDefault(obj) {
return obj && obj.__esModule ? obj : {
default: obj
};
}
function init(args, configs) {
let messages = [];
var ref;
let tailwindConfigLocation = _path.default.resolve((ref = args["_"][1]) !== null && ref !== void 0 ? ref : `./${configs.tailwind}`);
if (_fs.default.existsSync(tailwindConfigLocation)) {
messages.push(`${_path.default.basename(tailwindConfigLocation)} already exists.`);
} else {
let stubFile = _fs.default.readFileSync(args["--full"] ? _path.default.resolve(__dirname, "../../../stubs/defaultConfig.stub.js") : _path.default.resolve(__dirname, "../../../stubs/simpleConfig.stub.js"), "utf8");
// Change colors import
stubFile = stubFile.replace("../colors", "tailwindcss/colors");
_fs.default.writeFileSync(tailwindConfigLocation, stubFile, "utf8");
messages.push(`Created Tailwind CSS config file: ${_path.default.basename(tailwindConfigLocation)}`);
}
if (args["--postcss"]) {
let postcssConfigLocation = _path.default.resolve(`./${configs.postcss}`);
if (_fs.default.existsSync(postcssConfigLocation)) {
messages.push(`${_path.default.basename(postcssConfigLocation)} already exists.`);
} else {
let stubFile1 = _fs.default.readFileSync(_path.default.resolve(__dirname, "../../../stubs/defaultPostCssConfig.stub.js"), "utf8");
_fs.default.writeFileSync(postcssConfigLocation, stubFile1, "utf8");
messages.push(`Created PostCSS config file: ${_path.default.basename(postcssConfigLocation)}`);
}
}
if (messages.length > 0) {
console.log();
for (let message of messages){
console.log(message);
}
}
} | PypiClean |
/FFC-2017.1.0.tar.gz/FFC-2017.1.0/ffc/quadrature/product.py | "This file implements a class to represent a product."
# Copyright (C) 2009-2010 Kristian B. Oelgaard
#
# This file is part of FFC.
#
# FFC is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FFC. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2009-07-12
# Last changed: 2010-03-11
from functools import reduce
# FFC modules
from ffc.log import error
from ffc.cpp import format
# FFC quadrature modules
from .symbolics import create_float
from .symbolics import create_product
from .symbolics import create_sum
from .symbolics import create_fraction
from .expr import Expr
# FFC quadrature modules
from .floatvalue import FloatValue
class Product(Expr):
__slots__ = ("vrs", "_expanded")
def __init__(self, variables):
"""Initialise a Product object, it derives from Expr and contains
the additional variables:
vrs - a list of variables
_expanded - object, an expanded object of self, e.g.,
self = x*(2+y) -> self._expanded = (2*x + x*y) (a sum), or
self = 2*x -> self._expanded = 2*x (self).
NOTE: self._prec = 2."""
# Initialise value, list of variables, class.
self.val = 1.0
self.vrs = []
self._prec = 2
# Initially set _expanded to True.
self._expanded = True
# Process variables if we have any.
if variables:
# Remove nested Products and test for expansion.
float_val = 1.0
for var in variables:
# If any value is zero the entire product is zero.
if var.val == 0.0:
self.val = 0.0
self.vrs = [create_float(0.0)]
float_val = 0.0
break
# Collect floats into one variable
if var._prec == 0: # float
float_val *= var.val
continue
# Take care of product such that we don't create
# nested products.
elif var._prec == 2: # prod
# If expanded product is a float, just add it.
if var._expanded and var._expanded._prec == 0:
float_val *= var._expanded.val
# If expanded product is symbol, this product is
# still expanded and add symbol.
elif var._expanded and var._expanded._prec == 1:
self.vrs.append(var._expanded)
# If expanded product is still a product, add the
# variables.
elif var._expanded and var._expanded._prec == 2:
# Add copies of the variables of other product
# (collect floats).
if var._expanded.vrs[0]._prec == 0:
float_val *= var._expanded.vrs[0].val
self.vrs += var._expanded.vrs[1:]
continue
self.vrs += var._expanded.vrs
# If expanded product is a sum or fraction, we
# must expand this product later.
elif var._expanded and var._expanded._prec in (3, 4):
self._expanded = False
self.vrs.append(var._expanded)
# Else the product is not expanded, and we must
# expand this one later
else:
self._expanded = False
# Add copies of the variables of other product
# (collect floats).
if var.vrs[0]._prec == 0:
float_val *= var.vrs[0].val
self.vrs += var.vrs[1:]
continue
self.vrs += var.vrs
continue
# If we have sums or fractions in the variables the
# product is not expanded.
elif var._prec in (3, 4): # sum or frac
self._expanded = False
# Just add any variable at this point to list of new
# vars.
self.vrs.append(var)
# If value is 1 there is no need to include it, unless it
# is the only parameter left i.e., 2*0.5 = 1.
if float_val and float_val != 1.0:
self.val = float_val
self.vrs.append(create_float(float_val))
# If we no longer have any variables add the float.
elif not self.vrs:
self.val = float_val
self.vrs = [create_float(float_val)]
# If 1.0 is the only value left, add it.
elif abs(float_val - 1.0) < format["epsilon"] and not self.vrs:
self.val = 1.0
self.vrs = [create_float(1)]
# If we don't have any variables the product is zero.
else:
self.val = 0.0
self.vrs = [create_float(0)]
# The type is equal to the lowest variable type.
self.t = min([v.t for v in self.vrs])
# Sort the variables such that comparisons work.
self.vrs.sort()
# Compute the representation now, such that we can use it
# directly in the __eq__ and __ne__ methods (improves
# performance a bit, but only when objects are cached).
self._repr = "Product([%s])" % ", ".join([v._repr for v in self.vrs])
# Use repr as hash value.
self._hash = hash(self._repr)
# Store self as expanded value, if we did not encounter any
# sums or fractions.
if self._expanded:
self._expanded = self
# Print functions.
def __str__(self):
"Simple string representation which will appear in the generated code."
# If we have more than one variable and the first float is -1
# exlude the 1.
if len(self.vrs) > 1 and self.vrs[0]._prec == 0 and self.vrs[0].val == -1.0:
# Join string representation of members by multiplication
return format["sub"](["", format["mul"]([str(v) for v in self.vrs[1:]])])
return format["mul"]([str(v) for v in self.vrs])
# Binary operators.
def __add__(self, other):
"Addition by other objects."
# NOTE: Assuming expanded variables.
# If two products are equal, add their float values.
if other._prec == 2 and self.get_vrs() == other.get_vrs():
# Return expanded product, to get rid of 3*x + -2*x -> x, not 1*x.
return create_product([create_float(self.val + other.val)] + list(self.get_vrs())).expand()
elif other._prec == 1: # sym
if self.get_vrs() == (other,):
# Return expanded product, to get rid of -x + x -> 0,
# not product(0).
return create_product([create_float(self.val + 1.0),
other]).expand()
# Return sum
return create_sum([self, other])
def __sub__(self, other):
"Subtract other objects."
if other._prec == 2 and self.get_vrs() == other.get_vrs():
# Return expanded product, to get rid of 3*x + -2*x -> x,
# not 1*x.
return create_product([create_float(self.val - other.val)] + list(self.get_vrs())).expand()
elif other._prec == 1: # sym
if self.get_vrs() == (other,):
# Return expanded product, to get rid of -x + x -> 0,
# not product(0).
return create_product([create_float(self.val - 1.0),
other]).expand()
# Return sum
return create_sum([self, create_product([FloatValue(-1), other])])
def __mul__(self, other):
"Multiplication by other objects."
# If product will be zero.
if self.val == 0.0 or other.val == 0.0:
return create_float(0)
# If other is a Sum or Fraction let them handle it.
if other._prec in (3, 4): # sum or frac
return other.__mul__(self)
# NOTE: We expect expanded sub-expressions with no nested
# operators. Create new product adding float or symbol.
if other._prec in (0, 1): # float or sym
return create_product(self.vrs + [other])
# Create new product adding all variables from other Product.
return create_product(self.vrs + other.vrs)
def __truediv__(self, other):
"Division by other objects."
# If division is illegal (this should definitely not happen).
if other.val == 0.0:
error("Division by zero.")
# If fraction will be zero.
if self.val == 0.0:
return self.vrs[0]
# If other is a Sum we can only return a fraction.
# NOTE: Expect that other is expanded i.e., x + x -> 2*x which
# can be handled
# TODO: Fix x / (x + x*y) -> 1 / (1 + y).
# Or should this be handled when reducing a fraction?
if other._prec == 3: # sum
return create_fraction(self, other)
# Handle division by FloatValue, Symbol, Product and Fraction.
# NOTE: assuming that we get expanded variables.
# Copy numerator, and create list for denominator.
num = self.vrs[:]
denom = []
# Add floatvalue, symbol and products to the list of denominators.
if other._prec in (0, 1): # float or sym
denom = [other]
elif other._prec == 2: # prod
# Get copy.
denom = other.vrs[:]
# fraction.
else:
error("Did not expected to divide by fraction.")
# Loop entries in denominator and remove from numerator (and
# denominator).
for d in denom[:]:
# Add the inverse of a float to the numerator and continue.
if d._prec == 0: # float
num.append(create_float(1.0 / d.val))
denom.remove(d)
continue
if d in num:
num.remove(d)
denom.remove(d)
# Create appropriate return value depending on remaining data.
if len(num) > 1:
# TODO: Make this more efficient?
# Create product and expand to reduce
# Product([5, 0.2]) == Product([1]) -> Float(1).
num = create_product(num).expand()
elif num:
num = num[0]
# If all variables in the numerator has been eliminated we
# need to add '1'.
else:
num = create_float(1)
if len(denom) > 1:
return create_fraction(num, create_product(denom))
elif denom:
return create_fraction(num, denom[0])
# If we no longer have a denominater, just return the
# numerator.
return num
__div__ = __truediv__
# Public functions.
def expand(self):
"Expand all members of the product."
# If we just have one variable, compute the expansion of it
# (it is not a Product, so it should be safe). We need this to
# get rid of Product([Symbol]) type expressions.
if len(self.vrs) == 1:
self._expanded = self.vrs[0].expand()
return self._expanded
# If product is already expanded, simply return the expansion.
if self._expanded:
return self._expanded
# Sort variables such that we don't call the '*' operator more
# than we have to.
float_syms = []
sum_fracs = []
for v in self.vrs:
if v._prec in (0, 1): # float or sym
float_syms.append(v)
continue
exp = v.expand()
# If the expanded expression is a float, sym or product,
# we can add the variables.
if exp._prec in (0, 1): # float or sym
float_syms.append(exp)
elif exp._prec == 2: # prod
float_syms += exp.vrs
else:
sum_fracs.append(exp)
# If we have floats or symbols add the symbols to the rest as a single
# product (for speed).
if len(float_syms) > 1:
sum_fracs.append(create_product(float_syms))
elif float_syms:
sum_fracs.append(float_syms[0])
# Use __mult__ to reduce list to one single variable.
# TODO: Can this be done more efficiently without creating all
# the intermediate variables?
self._expanded = reduce(lambda x, y: x * y, sum_fracs)
return self._expanded
def get_unique_vars(self, var_type):
"Get unique variables (Symbols) as a set."
# Loop all members and update the set.
var = set()
for v in self.vrs:
var.update(v.get_unique_vars(var_type))
return var
def get_var_occurrences(self):
"""Determine the number of times all variables occurs in the
expression. Returns a dictionary of variables and the number
of times they occur.
"""
# TODO: The product should be expanded at this stage, should
# we check this?
# Create dictionary and count number of occurrences of each
# variable.
d = {}
for v in self.vrs:
if v in d:
d[v] += 1
continue
d[v] = 1
return d
def get_vrs(self):
"Return all 'real' variables."
# A product should only have one float value after
# initialisation.
# TODO: Use this knowledge directly in other classes?
if self.vrs[0]._prec == 0: # float
return tuple(self.vrs[1:])
return tuple(self.vrs)
def ops(self):
"Get the number of operations to compute product."
# It takes n-1 operations ('*') for a product of n members.
op = len(self.vrs) - 1
# Loop members and add their count.
for v in self.vrs:
op += v.ops()
# Subtract 1, if the first member is -1 i.e., -1*x*y -> x*y is
# only 1 op.
if self.vrs[0]._prec == 0 and self.vrs[0].val == -1.0:
op -= 1
return op
def reduce_ops(self):
"Reduce the number of operations to evaluate the product."
# It's not possible to reduce a product if it is already
# expanded and it should be at this stage.
# TODO: Is it safe to return self.expand().reduce_ops() if
# product is not expanded? And do we want to?
# TODO: This should crash if it goes wrong
return self._expanded
def reduce_vartype(self, var_type):
"""Reduce expression with given var_type. It returns a tuple (found,
remain), where 'found' is an expression that only has
variables of type == var_type. If no variables are found,
found=(). The 'remain' part contains the leftover after
division by 'found' such that: self = found*remain.
"""
# Sort variables according to type.
found = []
remains = []
for v in self.vrs:
if v.t == var_type:
found.append(v)
continue
remains.append(v)
# Create appropriate object for found.
if len(found) > 1:
found = create_product(found)
elif found:
found = found.pop()
# We did not find any variables.
else:
return [((), self)]
# Create appropriate object for remains.
if len(remains) > 1:
remains = create_product(remains)
elif remains:
remains = remains.pop()
# We don't have anything left.
else:
return [(self, create_float(1))]
# Return whatever we found.
return [(found, remains)] | PypiClean |
/HADeploy-0.6.1.tar.gz/HADeploy-0.6.1/lib/hadeploy/plugins/kafka/code.py |
import logging
import hadeploy.core.misc as misc
import os
import glob
from hadeploy.core.templator import Templator
from hadeploy.core.plugin import Plugin
from hadeploy.core.const import SRC,DATA,DEFAULT_TOOLS_FOLDER,SCOPE_KAFKA,ACTION_DEPLOY,ACTION_REMOVE
logger = logging.getLogger("hadeploy.plugins.kafka")
HELPER="helper"
KAFKA="kafka"
DIR="dir"
JDCTOPIC_JAR="jdctopic_jar"
HOST_GROUP_BY_NAME="hostGroupByName"
INVENTORY="inventory"
KAFKA_RELAY="kafka_relay"
ZK_HOST_GROUP="zk_host_group"
ZK_PORT="zk_port"
BROKER_ID_MAP="broker_id_map"
KAFKA_VERSION="kafka_version"
TOOLS_FOLDER="tools_folder"
ZK_PATH="zk_path"
KAFKA_TOPICS="kafka_topics"
ASSIGNMENTS="assignments"
PROPERTIES="properties"
NO_REMOVE="no_remove"
BECOME_USER="become_user"
LOGS_USER="logsUser"
class KafkaPlugin(Plugin):
def __init__(self, name, path, context):
Plugin.__init__(self, name, path, context)
def getGroomingPriority(self):
return 5000
def getSupportedScopes(self):
return [SCOPE_KAFKA]
def getSupportedActions(self):
if self.context.toExclude(SCOPE_KAFKA):
return []
else:
return [ACTION_DEPLOY, ACTION_REMOVE]
def getPriority(self, action):
return 5000 if action == ACTION_DEPLOY else 2000 if action == ACTION_REMOVE else misc.ERROR("Plugin 'kafka' called with invalid action: '{0}'".format(action))
def onGrooming(self):
misc.applyWhenOnSingle(self.context.model[SRC], KAFKA_RELAY)
misc.applyWhenOnList(self.context.model[SRC], KAFKA_TOPICS)
if self.context.toExclude(SCOPE_KAFKA):
return
self.buildHelper()
misc.ensureObjectInMaps(self.context.model[DATA], [KAFKA], {})
groomKafkaRelay(self.context.model)
groomKafkaTopics(self.context.model)
def buildAuxTemplates(self, action, priority):
if self.context.toExclude(SCOPE_KAFKA):
return
if KAFKA_TOPICS in self.context.model[SRC] and len(self.context.model[SRC][KAFKA_TOPICS]) > 0 :
templator = Templator([os.path.join(self.path, './helpers/jdctopic')], self.context.model)
if action == ACTION_DEPLOY:
templator.generate("desc_topics.yml.jj2", os.path.join(self.context.workingFolder, "desc_topics.yml.j2"))
elif action == ACTION_REMOVE:
templator.generate("desc_untopics.yml.jj2", os.path.join(self.context.workingFolder, "desc_untopics.yml.j2"))
else:
pass
def getTemplateAsFile(self, action, priority):
if self.context.toExclude(SCOPE_KAFKA):
return []
else:
return [os.path.join(self.path, "install_kafka_relay.yml.jj2"), os.path.join(self.path, "{0}.yml.jj2".format(action))]
def buildHelper(self):
if KAFKA_RELAY in self.context.model[SRC]:
helper = {}
helper[DIR] = os.path.normpath(os.path.join(self.path, "helpers"))
jarPattern = "jdctopic/jdctopic.{}-*-uber.jar".format(self.context.model[SRC][KAFKA_RELAY][KAFKA_VERSION])
jdctopicjars = glob.glob(os.path.join(helper[DIR], jarPattern))
if len(jdctopicjars) < 1:
misc.ERROR("Unable to find helper for Kafka.Please, refer to the documentation about Installation")
if len(jdctopicjars) > 1:
misc.ERROR("Several version of kafka helper jar in {}. Please, cleanup.".format(helper[DIR]))
helper[JDCTOPIC_JAR] = os.path.basename(jdctopicjars[0])
misc.ensureObjectInMaps(self.context.model, [HELPER, KAFKA], helper)
# ------------------------------------------- Static function
def groomKafkaRelay(model):
if KAFKA_RELAY in model[SRC]:
if not KAFKA_TOPICS in model[SRC] or len(model[SRC][KAFKA_TOPICS]) == 0:
del(model[SRC][KAFKA_RELAY])
else:
hg = model[SRC][KAFKA_RELAY][ZK_HOST_GROUP]
if hg not in model[DATA][INVENTORY][HOST_GROUP_BY_NAME]:
misc.ERROR("kafka_relay: host_group '{0}' does not exists!".format(hg))
misc.setDefaultInMap(model[SRC][KAFKA_RELAY], ZK_PORT, 2181)
if BROKER_ID_MAP in model[SRC][KAFKA_RELAY]:
for brokerId in model[SRC][KAFKA_RELAY][BROKER_ID_MAP].itervalues():
if not isinstance(brokerId, int):
misc.ERROR("kafka_relay: BrokerId ({0}) must be integer".format(brokerId))
misc.setDefaultInMap(model[SRC][KAFKA_RELAY], ZK_PATH, '/')
if BECOME_USER in model[SRC][KAFKA_RELAY]:
model[SRC][KAFKA_RELAY][LOGS_USER] = model[SRC][KAFKA_RELAY][BECOME_USER]
misc.setDefaultInMap(model[SRC][KAFKA_RELAY], TOOLS_FOLDER, "/tmp/hadeploy_{}".format(model[SRC][KAFKA_RELAY][BECOME_USER]))
else:
model[SRC][KAFKA_RELAY][LOGS_USER] = "{{ansible_user}}"
misc.setDefaultInMap(model[SRC][KAFKA_RELAY], TOOLS_FOLDER, DEFAULT_TOOLS_FOLDER)
def groomKafkaTopics(model):
if KAFKA_TOPICS in model[SRC] and len(model[SRC][KAFKA_TOPICS]) > 0 :
if not KAFKA_RELAY in model[SRC]:
misc.ERROR("A kafka_relay must be defined if at least one kafka_topic is defined")
for topic in model[SRC][KAFKA_TOPICS]:
if ASSIGNMENTS in topic:
if len(topic[ASSIGNMENTS]) == 0:
misc.ERROR("Topic '{0}': At least one partition must be defined".format(topic['name']))
listPart = []
nbrRep = None
for part in topic[ASSIGNMENTS]:
if not part.isdigit():
misc.ERROR("Topic '{0}': Partition ID must be integer".format(topic['name']))
listPart.append(int(part))
rep = topic[ASSIGNMENTS][part]
if not isinstance(rep, list):
misc.ERROR("Topic '{0}: Each partition must be defined by an array of brokerId': ".format(topic['name']))
if nbrRep == None:
nbrRep = len(rep)
else:
if nbrRep != len(rep):
misc.ERROR("Topic '{0}' All partition must have the same number of replicas: ".format(topic['name']))
if BROKER_ID_MAP in model[SRC][KAFKA_RELAY]:
# Must translate broker_id
rep2 = []
for brokerId in rep:
if not str(brokerId) in model[SRC][KAFKA_RELAY][BROKER_ID_MAP]:
misc.ERROR("Topic '{0}': BrokerId {1} must be defined in kafka_relay.broker_id_map".format(topic['name'], brokerId))
rep2.append(model[SRC][KAFKA_RELAY][BROKER_ID_MAP][str(brokerId)])
topic[ASSIGNMENTS][part] = rep2
else :
for brokerId in rep:
if not isinstance(brokerId, int):
misc.ERROR("Topic '{0}': BrokerId must be digit! Or may be you forget to define a 'broker_id_map' in your 'kafka_relay'?".format(topic['name']))
x = set(listPart)
if len(x) != len(listPart):
misc.ERROR("Topic '{0}': There is duplicated partition ID".format(topic['name']))
listPart = sorted(listPart)
if listPart[0] != 0 or listPart[len(listPart) - 1] != len(listPart) -1:
misc.ERROR("Topic '{0}': Partition ID must be consecutive numbers, from 0 to number of partition".format(topic['name']))
else:
if not 'replication_factor' in topic or not 'partition_factor' in topic:
misc.ERROR("Topic '{0}': If partitions layout is not explicit (using assignment), both replication_factor and partition_factor must be defined".format(topic['name']))
if PROPERTIES in topic:
if not isinstance(topic[PROPERTIES], dict):
misc.ERROR("Topic '{0}': properties: must be a map!".format(topic['name']))
if len(topic[PROPERTIES]) == 0:
del(topic[PROPERTIES])
misc.setDefaultInMap(topic, NO_REMOVE, False) | PypiClean |
/BLECryptracer_BLEMAP-0.0.7.tar.gz/BLECryptracer_BLEMAP-0.0.7/README.md | # BLECryptracer #
These scripts require Python v3+ and have been tested with Androguard v3.3.5. These (and all dependencies) should be installed on your system.
In order to install dependencies you can
```
pip install -r requirements.txt
```
In order to analyse an APK you will need to type the following
```
python BLECryptracer.py -i APK_FILE_TO_ANALYSE [-o OUTPUT_FILE_IN_JSON]
```
If no output file is provided, the result will be saved in a file named output.json.
During execution several files and folders may be created. Those will be deleted after exectuion.
The output JSON file contains the following:
```
FILENAME - The name of the APK file
PACKAGE - The package name (e.g., com.test.app)
XETVALUE_CALL - True if the APK makes calls to one of the android.BluetoothGattCharacteristic setValue or getValue methods. The scripts stop processing an APK if this is False.
CRYPTO_USE - True if the APK contains *any* calls to the javax.crypto or java.security methods. The scripts stop processing an APK if this is False.
CRYPTO_IN_XETVALUE - True if cryptographically-processed BLE data was identified. False otherwise
CONFIDENCE_LEVEL_XETVALUE - One of High, Medium or Low, depending on how certain we are of the result. Only relevant when CRYPTO_IN_XETVALUE is True
NET_USE - True if the APK contains any calls to java.net.URLConnection, java.net.HttpURLConnection or javax.net.ssl.HttpsURLConnection. Only present in the output of the setvalue script.
LOCATION_XETVALUE - The last processed method (that calls setValue or getValue)
LOCATION_CRYPTO_XETVALUE - The method that calls the crypto-library (linked to the BLE data)
NUM_XETVALUE_METHODS - The total number of calls to setValue/getValue. Note that the scripts stop processing at the first instance where crypto is identified.
ALL_XETVALUE_METHODS - A list of all methods that call setValue/getValue
TIME_TAKEN_XETVALUE - The time taken to process an APK
BLE_UUIDS - UUIDs that have been extracted with BLE functionality. These can be of several kinds depending on how they were extracted.
CLASSIC_UUIDS - UUIDs that belong to Classic Bluetooth. These are not relevant at the moment.
``` | PypiClean |
/dyson-1.1.2.tar.gz/dyson-1.1.2/docs/variables.md | Variables
====================
There are four possible ways to specify variables within your tests
and are overriden in this order:
1. `apps/default.yml`
2. `apps/<application>.yml`
3. `tests/<test>/vars/main.yml`
4. `-e "my_variable=something another_variable=something_else"`
## Accessing Variables
From the [examples below](#examples), you can see that we are able to access
variables by using `{{ ... }}` notation. This is [Jinja](http://jinja.pocoo.org/).
You are able to reference these variables anywhere in your tests, or your other variable
files.
All variables are able to be overridden in the presedence [noted above](#variables).
**In order to access variables within `{{ }}`, you need to ensure that they
are surrounded by double quotes.**
```yaml
- goto: url={{ url }}
```
will not work, while this will:
```yaml
- goto: "url={{ url }}"
```
## Examples
Consider the following:
**apps/default.yml**
```yaml
---
application_url: http://localhost:3000
login_page:
txt_username: css=#username
```
**apps/production.yml**
```yaml
---
application_url: https://productionurl.com
login_page:
txt_username: "{{ login_page.txt_username }}-production"
```
**tests/<test>/vars/main.yml**
```yaml
---
test_url: "{{ application_url }}/some/path"
```
**`-e "test_url=http://anotherurl.com/some/path"`**
At the time of execution:
- `application_url` will be `https://productionurl.com` since `production.yml`
will override what is set in `defaults.yml`
- `test_url` will be `https://productionurl.com/`
- `login_page.txt_username` will be `css=#username-production`
| PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/layout/ScrollPane.js | if(!dojo._hasResource["dojox.layout.ScrollPane"]){
dojo._hasResource["dojox.layout.ScrollPane"]=true;
dojo.provide("dojox.layout.ScrollPane");
dojo.experimental("dojox.layout.ScrollPane");
dojo.require("dijit.layout.ContentPane");
dojo.require("dijit._Templated");
dojo.declare("dojox.layout.ScrollPane",[dijit.layout.ContentPane,dijit._Templated],{_line:null,_lo:null,_offset:15,orientation:"vertical",autoHide:true,templateString:dojo.cache("dojox.layout","resources/ScrollPane.html","<div class=\"dojoxScrollWindow\" dojoAttachEvent=\"onmouseenter: _enter, onmouseleave: _leave\">\n <div class=\"dojoxScrollWrapper\" style=\"${style}\" dojoAttachPoint=\"wrapper\" dojoAttachEvent=\"onmousemove: _calc\">\n\t<div class=\"dojoxScrollPane\" dojoAttachPoint=\"containerNode\"></div>\n </div>\n <div dojoAttachPoint=\"helper\" class=\"dojoxScrollHelper\"><span class=\"helperInner\">|</span></div>\n</div>\n"),resize:function(_1){
if(_1){
if(_1.h){
dojo.style(this.domNode,"height",_1.h+"px");
}
if(_1.w){
dojo.style(this.domNode,"width",_1.w+"px");
}
}
var _2=this._dir,_3=this._vertical,_4=this.containerNode[(_3?"scrollHeight":"scrollWidth")];
dojo.style(this.wrapper,this._dir,this.domNode.style[this._dir]);
this._lo=dojo.coords(this.wrapper,true);
this._size=Math.max(0,_4-this._lo[(_3?"h":"w")]);
if(!this._size){
this.helper.style.display="none";
this.wrapper[this._scroll]=0;
return;
}else{
this.helper.style.display="";
}
this._line=new dojo._Line(0-this._offset,this._size+(this._offset*2));
var u=this._lo[(_3?"h":"w")],r=Math.min(1,u/_4),s=u*r,c=Math.floor(u-(u*r));
this._helpLine=new dojo._Line(0,c);
dojo.style(this.helper,_2,Math.floor(s)+"px");
},postCreate:function(){
this.inherited(arguments);
if(this.autoHide){
this._showAnim=dojo._fade({node:this.helper,end:0.5,duration:350});
this._hideAnim=dojo.fadeOut({node:this.helper,duration:750});
}
this._vertical=(this.orientation=="vertical");
if(!this._vertical){
dojo.addClass(this.containerNode,"dijitInline");
this._dir="width";
this._edge="left";
this._scroll="scrollLeft";
}else{
this._dir="height";
this._edge="top";
this._scroll="scrollTop";
}
if(this._hideAnim){
this._hideAnim.play();
}
dojo.style(this.wrapper,"overflow","hidden");
},_set:function(n){
if(!this._size){
return;
}
this.wrapper[this._scroll]=Math.floor(this._line.getValue(n));
dojo.style(this.helper,this._edge,Math.floor(this._helpLine.getValue(n))+"px");
},_calc:function(e){
if(!this._lo){
this.resize();
}
this._set(this._vertical?((e.pageY-this._lo.y)/this._lo.h):((e.pageX-this._lo.x)/this._lo.w));
},_enter:function(e){
if(this._hideAnim){
if(this._hideAnim.status()=="playing"){
this._hideAnim.stop();
}
this._showAnim.play();
}
},_leave:function(e){
if(this._hideAnim){
this._hideAnim.play();
}
}});
} | PypiClean |
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/RMSprop.py | #RMSProp
import math
import numpy as np
import Abhilash1_optimizers.Activation as Activation
import Abhilash1_optimizers.hyperparameters as hyperparameters
import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer
#RMSprop algorithm with momentum on rescaled gradient
class RMSPROP():
def __init__(alpha,b_1,b_2,epsilon,noise_g):
return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g)
def init(m_t,v_t,t,theta):
return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta)
def RMSprop_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale):
alpha,b_1,b_2,epsilon,noise_g=RMSPROP.__init__(alpha,b_1,b_2,epsilon,noise_g)
m_t,v_t,t,theta_0=RMSPROP.init(0,0,0,0)
final_weight_vector=[]
for i in range(len_data):
theta_0=data[i]
for i in range(max_itr):
t+=1
if(act_func=="softPlus"):
g_t=Activation.Activation.softplus(theta_0)
elif (act_func=="relu"):
g_t=Activation.Activation.relu(theta_0)
elif (act_func=="elu"):
g_t=Activation.Activation.elu(theta_0,alpha)
elif (act_func=="selu"):
g_t=Activation.Activation.selu(scale,theta_0,theta)
elif (act_func=="tanh"):
g_t=Activation.Activation.tanh(theta_0)
elif (act_func=="hardSigmoid"):
g_t=Activation.Activation.hard_sigmoid(theta_0)
elif (act_func=="softSign"):
g_t=Activation.Activation.softsign(theta_0)
elif (act_func=="linear"):
g_t=Activation.Activation.linear(theta_0)
elif (act_func=="exponential"):
g_t=Activation.Activation.exponential(theta_0)
v_t=b_2*v_t +(1-b_2)*g_t*g_t
theta_prev=theta_0
alpha_t=(alpha*(g_t/(math.sqrt(v_t) + epsilon)))
theta_0=theta_prev-(alpha_t)
print("Intrermediate gradients")
print("==========================================")
print("Previous gradient",theta_prev)
print("Present gradient",theta_0)
print("==========================================")
#if theta_0==theta_prev:
# break;
final_weight_vector.append(theta_0)
return final_weight_vector
def initialize(data,max_itr):
len_data=len(data)
optimized_weights=RMSPROP.RMSprop_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale)
print("Optimized Weight Vector")
print("=====================================")
#print(len(optimized_weights))
for i in range(len(optimized_weights)):
print("=====",optimized_weights[i])
if __name__=='__main__':
print("Verbose")
#t_0=Adagrad_optimizer()
#print("gradient coefficient",t_0)
#solve_grad=poly_func(t_0)
#print("Gradient Value",solve_grad)
sample_data=[1,0.5,0.7,0.1]
max_itr=100
#RMSPROP.initialize(sample_data,max_itr) | PypiClean |
/LibRPG-0.4.zip/LibRPG-0.4/librpg/map.py | import csv
import operator
import pygame
from librpg.mapobject import *
from librpg.mapview import MapView
from librpg.sound import MapMusic
from librpg.util import *
from librpg.image import *
from librpg.tile import *
from librpg.config import *
from librpg.locals import *
from librpg.movement import Step
from librpg.context import Context, get_context_stack
from librpg.dialog import MessageQueue
class MapController(Context):
# Read-Only Attributes:
# map_view - MapView (View component of MVC)
# map_model - MapModel (Model component of MVC)
def __init__(self, map_model, local_state=None, global_state=None):
Context.__init__(self)
self.map_model = map_model
self.map_model.controller = self
self.map_model.initialize(local_state, global_state)
self.map_view = MapView(self.map_model)
self.map_music = MapMusic(self.map_model)
self.moving_sync = False
self.message_queue = MessageQueue(self)
def initialize(self):
map_model = self.map_model
self.map_view_draw = self.map_view.draw
self.party_avatar = map_model.party_avatar
self.party_movement = map_model.party_movement
self.party_movement_append = self.party_movement.append
self.party_movement_remove = self.party_movement.remove
# Initialize contexts
context_stack = get_context_stack()
context_stack.stack_context(self.message_queue)
for context in map_model.contexts:
context_stack.stack_context(context)
def step(self):
if self.map_model.pause_delay > 0:
self.map_model.pause_delay -= 1
return
if self.moving_sync:
sync_stopped = self.sync_movement_step()
if not sync_stopped:
return
if not self.message_queue.is_busy():
self.flow_object_movement()
self.update_objects()
if self.party_movement and not self.party_avatar.scheduled_movement \
and not self.party_avatar.movement_phase \
and not self.message_queue.is_busy():
action = self.party_movement[0]
if action == ACTIVATE:
del self.party_movement[0]
self.map_model.party_action()
else:
self.party_avatar.schedule_movement(Step(action))
def draw(self):
self.map_view_draw()
self.map_music.update()
def process_event(self, event):
if event.type == QUIT:
get_context_stack().stop()
return True
elif event.type == KEYDOWN:
direction = self.check_direction(event.key)
if direction is not None and\
not direction in self.map_model.party_movement:
self.party_movement_append(direction)
return True
elif event.key in game_config.key_action:
if not ACTIVATE in self.party_movement:
self.party_movement.insert(0, ACTIVATE)
return True
elif event.key in game_config.key_cancel:
get_context_stack().stop()
return True
elif event.type == KEYUP:
direction = self.check_direction(event.key)
if direction is not None and\
direction in self.map_model.party_movement:
self.party_movement_remove(direction)
return True
elif event.key in game_config.key_action \
and ACTIVATE in self.party_movement:
self.party_movement_remove(ACTIVATE)
return True
return False
def check_direction(self, key):
if key in game_config.key_up:
return UP
elif key in game_config.key_down:
return DOWN
elif key in game_config.key_left:
return LEFT
elif key in game_config.key_right:
return RIGHT
else:
return None
def flow_object_movement(self):
party_avatar = self.map_model.party_avatar
for o in self.map_model.objects:
if o is not party_avatar:
o.flow()
party_avatar.flow()
self.trigger_collisions()
def trigger_collisions(self):
party_avatar = self.map_model.party_avatar
if party_avatar.just_completed_movement:
party_avatar.just_completed_movement = False
coming_from_direction = determine_facing(party_avatar.position,
party_avatar.prev_position)
# Trigger below objects' collide_with_party()
for obj in self.map_model.object_layer.\
get_pos(party_avatar.position).below:
obj.collide_with_party(party_avatar,
coming_from_direction)
# Trigger above objects' collide_with_party()
for obj in self.map_model.object_layer.\
get_pos(party_avatar.position).above:
obj.collide_with_party(party_avatar,
coming_from_direction)
# Trigger areas' party_entered and party_moved()
for area in self.map_model.area_layer.get_pos(party_avatar.position):
if area not in party_avatar.prev_areas:
coming_from_outside = True
area.party_entered(party_avatar, party_avatar.position)
else:
coming_from_outside = False
area.party_moved(party_avatar, party_avatar.prev_position,
party_avatar.position, coming_from_outside)
def sync_movement(self, objects):
self.sync_objects = objects
self.moving_sync = True
def sync_movement_step(self):
if all([not o.scheduled_movement for o in self.sync_objects]):
self.moving_sync = False
return True
for o in self.sync_objects:
o.flow()
return False
def update_objects(self):
for o in self.map_model.updatable_objects:
o.update()
def gameover(self):
get_context_stack().stop()
class MapModel(object):
"""
The MapModel is the class that models a map's data and behaviour. It
is the Model component of the MVC pattern. MapModel is a class made to
be inherited, so that specific behavior (objects, areas, parallel
processes) may be added.
"""
def __init__(self, map_file, terrain_tileset_files,
scenario_tileset_files_list):
"""
*Constructor:*
Initialize the MapModel with a layout defined by *map_file* (a .map
file).
The terrain tileset is specified by *terrain_tileset_files*, which
is a tuple (tileset image filename, tileset boundaries filename).
Tileset image filename should be a bitmap file (.png typically)
and tileset boundaries filename) should be a .bnd file.
The scenario tilesets are specified by
*scenario_tileset_files_list*, a list of tuples like the one passed
as *terrain_tileset_files*. Each will correspond to a scenario
layer.
"""
self.world = None
self.id = None
self.music = None
# Set up party
self.party = None
self.party_avatar = None
self.party_movement = []
# Load file data
self.map_file = map_file
self.terrain_tileset_files = terrain_tileset_files
self.scenario_tileset_files_list = scenario_tileset_files_list
self.terrain_tileset = Tileset(self.terrain_tileset_files[0],
self.terrain_tileset_files[1])
self.scenario_tileset = [Tileset(i, j) for i, j in\
self.scenario_tileset_files_list]
self.load_from_map_file()
# Set up local state
self.local_state = None
# Set up objects
self.objects = []
self.below_objects = []
self.obstacle_objects = []
self.above_objects = []
self.updatable_objects = []
self.object_layer = Matrix(self.width, self.height)
object_layer_set = self.object_layer.set
for x in range(self.width):
for y in range(self.height):
object_layer_set(x, y, ObjectCell())
# Set up areas
self.areas = []
self.area_layer = Matrix(self.width, self.height)
for x in range(self.width):
for y in range(self.height):
self.area_layer.set(x, y, [])
# Set up context system
self.pause_delay = 0
self.contexts = []
def load_from_map_file(self):
layout_file = open(self.map_file)
r = csv.reader(layout_file, delimiter=',')
first_line = r.next()
self.width = int(first_line[0])
self.height = int(first_line[1])
self.scenario_number = int(first_line[2])
self.terrain_layer = Matrix(self.width, self.height)
self.scenario_layer = [Matrix(self.width, self.height) for i in\
range(self.scenario_number)]
y = 0
for line in r:
if len(line) == self.width:
for x, value in enumerate(line):
self.terrain_layer.set(x, y, self.terrain_tileset.\
tiles[int(value)])
y += 1
if y >= self.height:
break
for i in xrange(self.scenario_number):
y = 0
for line in r:
if len(line) == self.width:
for x, value in enumerate(line):
tile = self.scenario_tileset[i].tiles[int(value)]
self.scenario_layer[i].set(x, y, tile)
y += 1
if y >= self.height:
break
layout_file.close()
# Virtual, should be implemented.
def initialize(self, local_state, global_state):
"""
*Virtual*
Put the map in an initial, virgin state if the *local_state*
specified is None. Puts the map in a state loaded from the
*local_state*, and the *global_state* otherwise.
*local_state* is the serializable object returned by
MapModel.save_state() when this map was saved. *global_state*
is a dict mapping all feature strings to their local states.
"""
pass
# Virtual, should be implemented.
def save_state(self):
"""
*Virtual*
Save the map's state to a local state and return it.
"""
return None
def add_party(self, party, position, facing=DOWN, speed=NORMAL_SPEED):
"""
Add a *party* (Party instance) to the Map at the given *position*.
Optionally, the starting *facing* and *speed* may be specified. The
defaults are *facing* down and normal *speed*.
"""
assert self.party is None, 'Map already has a party'
self.party = party
self.party_avatar = PartyAvatar(party, facing, speed)
self.add_object(self.party_avatar, position)
def remove_party(self):
"""
Remove the party from the Map, returning a 2-tuple with it as first
element and its position as second element. Return (None, None) if
there is no party in the map.
"""
if self.party is None:
return None, None
result = self.party, self.party_avatar.position
self.remove_object(self.party_avatar)
self.party = None
self.party_avatar = None
return result
def add_object(self, obj, position):
"""
Add an object to the map at the specified position. Returns whether
the operation was successful (it can fail when the position is
occupied by an obstacle and the object to be added is also an
obstacle).
"""
self.object_layer.get_pos(position).add_object(obj)
self.objects.append(obj)
if obj.is_below():
self.below_objects.append(obj)
elif obj.is_obstacle():
self.obstacle_objects.append(obj)
elif obj.is_above():
self.above_objects.append(obj)
else:
raise Exception('Object is neither below, obstacle or above')
if hasattr(obj, 'update'):
self.updatable_objects.append(obj)
obj.position = position
obj.areas = self.area_layer.get_pos(position)
obj.map = self
return True
def remove_object(self, obj):
"""
Remove an object from the map and returns the Position where it was.
Return None if the object was not in the map.
"""
self.objects.remove(obj)
if obj.is_below():
self.below_objects.remove(obj)
elif obj.is_obstacle():
self.obstacle_objects.remove(obj)
elif obj.is_above():
self.above_objects.remove(obj)
else:
raise Exception('Object is neither below, obstacle or above')
if hasattr(obj, 'update'):
self.updatable_objects.remove(obj)
self.object_layer.get_pos(obj.position).remove_object(obj)
result = obj.position
obj.position, obj.map = None, None
return result
def add_area(self, area, positions):
"""
Add a MapArea to the map at the specified positions. *Positions*
should be an iterable that returns the Positions over which the
MapArea extends.
"""
self.areas.append(area)
for pos in positions:
self.area_layer.get_pos(pos).append(area)
area.area = positions
def remove_area(self, area, positions):
"""
Remove a MapArea from the map at the specified positions.
*Positions* should be an iterable that returns the Positions from
which the area should be removed.
"""
self.areas.remove(area)
for pos in area.area:
self.area_layer.get_pos(pos).remove(area)
area.area = list(set(area.area) - set(positions))
def try_to_move_object(self, obj, direction, slide=False, back=False):
"""
Try to move an object to the specified direction (UP, DOWN, LEFT or
RIGHT). Return whether the object could be moved.
If *slide* is True, the movement will use only the static frame of
the object. If *back* is True, the movement will be backwards.
"""
if obj.movement_phase > 0:
return False
if back:
obj.facing = inverse(direction)
else:
obj.facing = direction
old_pos = obj.position
desired = obj.position.step(direction)
if not self.terrain_layer.valid_pos(desired):
return False
old_terrain = self.terrain_layer.get_pos(old_pos)
new_terrain = self.terrain_layer.get_pos(desired)
old_scenario = [self.scenario_layer[i].get_pos(old_pos) for i in\
range(self.scenario_number)]
new_scenario = [self.scenario_layer[i].get_pos(desired) for i in\
range(self.scenario_number)]
old_object = self.object_layer.get_pos(old_pos)
new_object = self.object_layer.get_pos(desired)
if not (obj.is_obstacle() and
self.is_obstructed(old_terrain, old_scenario, new_terrain,
new_scenario, new_object, direction)):
# Move
self.move_object(obj, old_object, new_object, desired, slide, back)
if obj is self.party_avatar:
for area in self.area_layer.get_pos(old_pos):
if area not in self.area_layer.get_pos(desired):
area.party_left(self.party_avatar, old_pos)
return True
else:
# Do not move, something is on the way
if obj is self.party_avatar and new_object.obstacle is not None:
new_object.obstacle.collide_with_party(self.party_avatar,
direction)
return False
def is_obstructed(self, old_terrain, old_scenario_list, new_terrain,
new_scenario_list, new_object, direction):
if new_object.obstacle is not None:
return True
if self.direction_obstructed(old_terrain, old_scenario_list, \
direction):
return True
inv = inverse(direction)
if self.direction_obstructed(new_terrain, new_scenario_list, inv):
return True
return False
def direction_obstructed(self, terrain, scenario_list, direction):
bridge = False
for scenario in reversed(scenario_list):
if scenario.cannot_be_entered(direction)\
or scenario.is_obstacle():
return True
elif scenario.is_below():
return False
if terrain.is_obstacle() or terrain.cannot_be_entered(direction):
return True
else:
return False
def move_object(self, obj, old_object, new_object, new_pos, slide, back):
obj.movement_phase = obj.speed - 1
obj.sliding = slide
obj.going_back = back
old_object.remove_object(obj)
new_object.add_object(obj)
obj.prev_position = obj.position
obj.position = new_pos
obj.prev_areas = obj.areas
obj.areas = self.area_layer.get_pos(new_pos)
def teleport_object(self, obj, new_pos):
old_pos = obj.position
old_object = self.object_layer.get_pos(old_pos)
new_object = self.object_layer.get_pos(new_pos)
old_object.remove_object(obj)
new_object.add_object(obj)
obj.prev_position = old_pos
obj.position = new_pos
obj.prev_areas = obj.areas
obj.areas = self.area_layer.get_pos(new_pos)
def party_action(self):
old_pos = self.party_avatar.position
desired = old_pos.step(self.party_avatar.facing)
# Activate object that the party is looking at
if self.terrain_layer.valid_pos(desired):
obj_in_front = self.object_layer.get_pos(desired).obstacle
if obj_in_front is not None:
obj_in_front.activate(self.party_avatar,
self.party_avatar.facing)
across_pos = desired.step(self.party_avatar.facing)
if (self.terrain_layer.valid_pos(across_pos) and
((obj_in_front is not None and obj_in_front.is_counter()) or
any([layer.get_pos(desired).is_counter() for layer in\
self.scenario_layer]))):
# Counter attribute
obj_across = self.object_layer.get_pos(across_pos).obstacle
if obj_across is not None:
obj_across.activate(self.party_avatar,
self.party_avatar.facing)
# Activate objects that the party is standing on or under
old_object = self.object_layer.get_pos(old_pos)
for obj in old_object.below:
obj.activate(self.party_avatar, self.party_avatar.facing)
for obj in old_object.above:
obj.activate(self.party_avatar, self.party_avatar.facing)
def schedule_message(self, message):
"""
Add a Dialog to the message queue, displaying it as soon as the
messages that were previously there are done.
"""
self.controller.message_queue.push(message)
def pause(self, length):
"""
Stop movement and acting in the map for *length* frames.
"""
self.pause_delay = length
def __repr__(self):
return '(Map width=%s height=%s file=%s)' % (str(self.width),
str(self.height),
self.map_file)
def __str__(self):
result = ''
result += '+' + '-' * self.width + '+\n'
for y in range(self.height):
result += '|'
for x in range(self.width):
if self.party_avatar is not None and\
self.party_avatar.position == Position(x, y):
result += 'P'
else:
result += ' '
result += '|\n'
result += '+' + '-' * self.width + '+\n'
return result
def sync_movement(self, objects):
"""
Stop movement and acting in the map, except for the movement
already scheduled or in progress in the objects specified.
*objects* should be a list of those MapObjects.
"""
self.controller.sync_movement(objects)
def save_world(self, filename):
"""
Save the game to the given file.
"""
self.world.state.save_local(self.id, self.save_state())
party_local_state = (self.id, self.party_avatar.position,
self.party_avatar.facing)
self.world.state.save_local(PARTY_POSITION_LOCAL_STATE,
party_local_state)
self.world.save(filename)
def add_context(self, context):
"""
Add a context to be run over this map and the message queue
context.
"""
self.contexts.append(context)
def set_music(self, music_file):
"""
Set the background for the map.
"""
self.music = music_file
def gameover(self):
"""
End the game.
"""
self.custom_gameover()
self.controller.gameover()
self.world.custom_gameover()
def custom_gameover(self):
"""
*Virtual.*
Overload to perform any reaction necessary to a MapModel.gameover()
call.
"""
pass
class ObjectCell(object):
def __init__(self):
self.below = []
self.obstacle = None
self.above = []
# Reduce the access time of these functions,
self.below_append = self.below.append
self.below_remove = self.below.remove
self.above_append = self.above.append
self.above_remove = self.above.remove
def add_object(self, obj):
if obj.is_obstacle():
self.obstacle = obj
elif obj.is_below():
self.below_append(obj)
else:
self.above_append(obj)
def remove_object(self, obj):
if obj.is_obstacle():
self.obstacle = None
elif obj.is_below():
self.below_remove(obj)
else:
self.above_remove(obj) | PypiClean |
/3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_metaclass.py | from lib2to3 import fixer_base
from ..fixer_util import Name, syms, Node, Leaf, Newline, find_root, indentation, suitify
from lib2to3.pygram import token
def has_metaclass(parent):
results = None
for node in parent.children:
kids = node.children
if node.type == syms.argument:
if kids[0] == Leaf(token.NAME, "metaclass") and \
kids[1] == Leaf(token.EQUAL, "=") and \
kids[2]:
#Hack to avoid "class X(=):" with this case.
results = [node] + kids
break
elif node.type == syms.arglist:
# Argument list... loop through it looking for:
# Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)]
for child in node.children:
if results: break
if child.type == token.COMMA:
#Store the last comma, which precedes the metaclass
comma = child
elif type(child) == Node:
meta = equal = name = None
for arg in child.children:
if arg == Leaf(token.NAME, "metaclass"):
#We have the (metaclass) part
meta = arg
elif meta and arg == Leaf(token.EQUAL, "="):
#We have the (metaclass=) part
equal = arg
elif meta and equal:
#Here we go, we have (metaclass=X)
name = arg
results = (comma, meta, equal, name)
break
return results
class FixMetaclass(fixer_base.BaseFix):
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
meta_results = has_metaclass(node)
if not meta_results: return
for meta in meta_results:
meta.remove()
target = Leaf(token.NAME, "__metaclass__")
equal = Leaf(token.EQUAL, "=", prefix=" ")
# meta is the last item in what was returned by has_metaclass(): name
name = meta
name.prefix = " "
stmt_node = Node(syms.atom, [target, equal, name])
suitify(node)
for item in node.children:
if item.type == syms.suite:
for stmt in item.children:
if stmt.type == token.INDENT:
# Insert, in reverse order, the statement, a newline,
# and an indent right after the first indented line
loc = item.children.index(stmt) + 1
# Keep consistent indentation form
ident = Leaf(token.INDENT, stmt.value)
item.insert_child(loc, ident)
item.insert_child(loc, Newline())
item.insert_child(loc, stmt_node)
break | PypiClean |
/Canto-0.9.8.tar.gz/Canto-0.9.8/canto_next/plugins.py |
#Canto - RSS reader backend
# Copyright (C) 2016 Jack Miller <jack@codezen.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import traceback
import logging
import sys
import os
log = logging.getLogger("PLUGINS")
class CantoWrongProgramException(Exception):
pass
PROGRAM="unset"
def set_program(program_name):
global PROGRAM
PROGRAM=program_name
def check_program(*args):
global PROGRAM
if PROGRAM not in args:
raise CantoWrongProgramException
def try_plugins(topdir, plugin_default=True, disabled_plugins=[], enabled_plugins=[]):
p = topdir + "/plugins"
pinit = p + "/__init__.py"
if not os.path.exists(p):
log.info("Creating plugins directory.")
try:
os.mkdir(p)
except Exception as e:
tb = traceback.format_exc()
log.error("Exception creating plugin directory")
log.error("\n" + "".join(tb))
return
elif not os.path.isdir(p):
log.warn("Plugins file is not directory.")
return
if not os.path.exists(pinit):
log.info("Creating plugin __init__.py")
try:
f = open(pinit, "w")
f.close()
except Exception as e:
tb = traceback.format_exc()
log.error("Exception creating plugin __init__.py")
log.error("\n" + "".join(tb))
return
# Add plugin path to front of Python path.
sys.path.insert(0, topdir)
all_errors = ""
# Go ahead and import all .py
for fname in sorted(os.listdir(p)):
if fname.endswith(".py") and fname != "__init__.py":
try:
proper = fname[:-3]
if plugin_default:
if proper in disabled_plugins:
log.info("[plugin] %s - DISABLED" % proper)
else:
__import__("plugins." + proper)
log.info("[plugin] %s" % proper)
else:
if proper in enabled_plugins:
__import__("plugins." + proper)
log.info("[plugin] %s - ENABLED" % proper)
else:
log.info("[plugin] %s - DISABLED" % proper)
except CantoWrongProgramException:
pass
except Exception as e:
tb = traceback.format_exc()
log.error("Exception importing file %s" % fname)
nice = "".join(tb)
all_errors += nice
log.error(nice)
if all_errors != "":
return all_errors
class PluginHandler(object):
def __init__(self):
self.plugin_attrs = {}
def update_plugin_lookups(self):
# Populate a dict of overridden attributes
self.plugin_attrs = {}
self.plugin_class_instances =\
[ c(self) for c in self.plugin_class.__subclasses__() ]
for iclass in self.plugin_class_instances[:]:
try:
# Warn if we're overriding a previously defined plugin attr
for iclass_attr in list(iclass.plugin_attrs.keys()):
if iclass_attr in self.plugin_attrs:
log.warn("Multiply defined plugin attribute!: %s" %\
iclass_attr)
self.plugin_attrs.update(iclass.plugin_attrs)
except Exception as e:
log.error("Error initializing plugins:")
log.error(traceback.format_exc())
# Malformed plugins removed from instances
self.plugin_class_instances.remove(iclass)
continue
def __getattribute__(self, name):
if name == "plugin_attrs" or name not in self.plugin_attrs:
return object.__getattribute__(self, name)
return self.plugin_attrs[name]
# Plugin is the base class for all of the separate plugin classes for each Gui
# object. There are two reasons to pin plugins to an empty class:
#
# - 'object' in the hierarchy via PluginHandler means we can use
# __subclasses__, the cornerstone of the plugins system
#
# - This allows the plugins to have a hard distinction between self (the
# instantiated class object) and obj (the instantiated main object that's
# being overridden). This means that plugins don't have to worry about
# clobbering anything.
#
# As a side effect, using the separate plugin architecture, we also can
# enable/disable pluggability on a class basis. For example, if TagList
# didn't specify a plugin_class, then it could not be overridden or hooked.
class Plugin(object):
pass | PypiClean |
/DeepCell-CPU-0.12.9.tar.gz/DeepCell-CPU-0.12.9/deepcell/image_generators/semantic.py | import os
import numpy as np
from skimage.transform import rescale, resize
from tensorflow.keras import backend as K
from tensorflow.keras.preprocessing.image import array_to_img
from tensorflow.keras.preprocessing.image import Iterator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.platform import tf_logging as logging
try:
import scipy
# scipy.linalg cannot be accessed until explicitly imported
from scipy import linalg
except ImportError:
scipy = None
from deepcell.image_generators import _transform_masks
class SemanticIterator(Iterator):
"""Iterator yielding data from Numpy arrays (``X`` and ``y``).
Args:
train_dict (dict): Consists of numpy arrays for ``X`` and ``y``.
image_data_generator (ImageDataGenerator): For random transformations
and normalization.
batch_size (int): Size of a batch.
min_objects (int): Images with fewer than ``min_objects`` are ignored.
shuffle (bool): Whether to shuffle the data between epochs.
seed (int): Random seed for data shuffling.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
save_to_dir (str): Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix (str): Prefix to use for saving sample
images (if ``save_to_dir`` is set).
save_format (str): Format to use for saving sample images
(if ``save_to_dir`` is set).
"""
def __init__(self,
train_dict,
image_data_generator,
batch_size=1,
shuffle=False,
transforms=['outer-distance'],
transforms_kwargs={},
seed=None,
min_objects=3,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png'):
# Load data
if 'X' not in train_dict:
raise ValueError('No training data found in train_dict')
if 'y' not in train_dict:
raise ValueError('Instance masks are required for the '
'SemanticIterator')
X, y = train_dict['X'], train_dict['y']
if X.shape[0] != y.shape[0]:
raise ValueError('Training batches and labels should have the same'
f'length. Found X.shape: {X.shape} y.shape: {y.shape}')
if X.ndim != 4:
raise ValueError('Input data in `SemanticIterator` '
'should have rank 4. You passed an array '
'with shape', X.shape)
self.x = np.asarray(X, dtype=K.floatx())
self.y = np.asarray(y, dtype='int32')
self.transforms = transforms
self.transforms_kwargs = transforms_kwargs
self.channel_axis = 3 if data_format == 'channels_last' else 1
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.min_objects = min_objects
# Remove images with small numbers of cells
invalid_batches = []
for b in range(self.x.shape[0]):
if len(np.unique(self.y[b])) - 1 < self.min_objects:
invalid_batches.append(b)
invalid_batches = np.array(invalid_batches, dtype='int')
if invalid_batches.size > 0:
logging.warning('Removing %s of %s images with fewer than %s '
'objects.', invalid_batches.size, self.x.shape[0],
self.min_objects)
self.x = np.delete(self.x, invalid_batches, axis=0)
self.y = np.delete(self.y, invalid_batches, axis=0)
super().__init__(
self.x.shape[0], batch_size, shuffle, seed)
def _transform_labels(self, y):
y_semantic_list = []
# loop over channels axis of labels in case there are multiple label types
for label_num in range(y.shape[self.channel_axis]):
if self.channel_axis == 1:
y_current = y[:, label_num:label_num + 1, ...]
else:
y_current = y[..., label_num:label_num + 1]
for transform in self.transforms:
transform_kwargs = self.transforms_kwargs.get(transform, dict())
y_transform = _transform_masks(y_current, transform,
data_format=self.data_format,
**transform_kwargs)
y_semantic_list.append(y_transform)
return y_semantic_list
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=self.x.dtype)
batch_y = []
for i, j in enumerate(index_array):
x = self.x[j]
# _transform_labels expects batch dimension
y_semantic_list = self._transform_labels(self.y[j:j + 1])
# initialize batch_y
if len(batch_y) == 0:
for ys in y_semantic_list:
shape = tuple([len(index_array)] + list(ys.shape[1:]))
batch_y.append(np.zeros(shape, dtype=ys.dtype))
# random_transform does not expect batch dimension
y_semantic_list = [ys[0] for ys in y_semantic_list]
# Apply transformation
x, y_semantic_list = self.image_data_generator.random_transform(
x, y_semantic_list)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
for k, ys in enumerate(y_semantic_list):
batch_y[k][i] = ys
if self.save_to_dir:
for i, j in enumerate(index_array):
if self.data_format == 'channels_first':
img_x = np.expand_dims(batch_x[i, 0, ...], 0)
else:
img_x = np.expand_dims(batch_x[i, ..., 0], -1)
img = array_to_img(img_x, self.data_format, scale=True)
fname = f'{self.save_prefix}_{j}_{np.random.randint(1e4)}.{self.save_format}'
img.save(os.path.join(self.save_to_dir, fname))
if self.y is not None:
# Save argmax of y batch
for k, y_sem in enumerate(batch_y):
if y_sem[i].shape[self.channel_axis - 1] == 1:
img_y = y_sem[i]
else:
img_y = np.argmax(y_sem[i],
axis=self.channel_axis - 1)
img_y = np.expand_dims(img_y,
axis=self.channel_axis - 1)
img = array_to_img(img_y, self.data_format, scale=True)
fname = 'y_{sem}_{prefix}_{index}_{hash}.{format}'.format(
sem=k,
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
return batch_x, batch_y
def next(self):
"""For python 2.x. Returns the next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
class SemanticDataGenerator(ImageDataGenerator):
"""Generates batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
Args:
featurewise_center (bool): Set input mean to 0 over the dataset,
feature-wise.
samplewise_center (bool): Set each sample mean to 0.
featurewise_std_normalization (bool): Divide inputs by std
of the dataset, feature-wise.
samplewise_std_normalization (bool): Divide each input by its std.
zca_epsilon (float): Epsilon for ZCA whitening. Default is 1e-6.
zca_whitening (bool): Apply ZCA whitening.
rotation_range (int): Degree range for random rotations.
width_shift_range (float): 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
``(-width_shift_range, +width_shift_range)``
- With ``width_shift_range=2`` possible values are integers
``[-1, 0, +1]``, same as with ``width_shift_range=[-1, 0, +1]``,
while with ``width_shift_range=1.0`` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
``(-height_shift_range, +height_shift_range)``
- With ``height_shift_range=2`` possible values
are integers ``[-1, 0, +1]``,
same as with ``height_shift_range=[-1, 0, +1]``,
while with ``height_shift_range=1.0`` possible values are floats
in the interval [-1.0, +1.0).
shear_range (float): Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range (float): float or [lower, upper], Range for random zoom.
If a float, ``[lower, upper] = [1-zoom_range, 1+zoom_range]``.
channel_shift_range (float): range for random channel shifts.
fill_mode (str): One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'. Points outside the boundaries of the input
are filled according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval (float): Value used for points outside the boundaries
when ``fill_mode = "constant"``.
horizontal_flip (bool): Randomly flip inputs horizontally.
vertical_flip (bool): Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None. If None or 0, no rescaling
is applied, otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
validation_split (float): Fraction of images reserved for validation
(strictly between 0 and 1).
"""
def flow(self,
train_dict,
batch_size=1,
transforms=['outer-distance'],
transforms_kwargs={},
min_objects=3,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png'):
"""Generates batches of augmented/normalized data with given arrays.
Args:
train_dict (dict): Consists of numpy arrays for ``X`` and ``y``.
batch_size (int): Size of a batch. Defaults to 1.
shuffle (bool): Whether to shuffle the data between epochs.
Defaults to ``True``.
seed (int): Random seed for data shuffling.
min_objects (int): Minumum number of objects allowed per image
save_to_dir (str): Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix (str): Prefix to use for saving sample
images (if ``save_to_dir`` is set).
save_format (str): Format to use for saving sample images
(if ``save_to_dir`` is set).
Returns:
SemanticIterator: An ``Iterator`` yielding tuples of ``(x, y)``,
where ``x`` is a numpy array of image data and ``y`` is list of
numpy arrays of transformed masks of the same shape.
"""
return SemanticIterator(
train_dict,
self,
batch_size=batch_size,
transforms=transforms,
transforms_kwargs=transforms_kwargs,
shuffle=shuffle,
min_objects=min_objects,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def random_transform(self, x, y=None, seed=None):
"""Applies a random transformation to an image.
Args:
x (numpy.array): 3D tensor or list of 3D tensors,
single image.
y (numpy.array): 3D tensor or list of 3D tensors,
label mask(s) for ``x``, optional.
seed (int): Random seed.
Returns:
numpy.array: A randomly transformed copy of the input (same shape).
If ``y`` is passed, it is transformed if necessary and returned.
"""
params = self.get_random_transform(x.shape, seed)
if isinstance(x, list):
x = [self.apply_transform(x_i, params) for x_i in x]
else:
x = self.apply_transform(x, params)
if y is None:
return x
# Nullify the transforms that don't affect `y`
params['brightness'] = None
params['channel_shift_intensity'] = None
_interpolation_order = self.interpolation_order
self.interpolation_order = 0
if isinstance(y, list):
y_new = []
for y_i in y:
if y_i.shape[self.channel_axis - 1] > 1:
y_t = self.apply_transform(y_i, params)
# Keep original interpolation order if it is a
# regression task
elif y_i.shape[self.channel_axis - 1] == 1:
self.interpolation_order = _interpolation_order
y_t = self.apply_transform(y_i, params)
self.interpolation_order = 0
y_new.append(y_t)
y = y_new
else:
y = self.apply_transform(y, params)
self.interpolation_order = _interpolation_order
return x, y
class SemanticMovieIterator(Iterator):
"""Iterator yielding data from Numpy arrays (``X`` and ``y``).
Args:
train_dict (dict): Dictionary consisting of numpy arrays
for ``X`` and ``y``.
movie_data_generator (SemanticMovieGenerator): ``SemanticMovieGenerator``
to use for random transformations and normalization.
batch_size (int): Size of a batch.
frames_per_batch (int): Size of z-axis in generated batches.
shuffle (boolean): Whether to shuffle the data between epochs.
seed (int): Random seed for data shuffling.
min_objects (int): Minumum number of objects allowed per image.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
save_to_dir (str): Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix (str): Prefix to use for saving sample
images (if ``save_to_dir`` is set).
save_format (str): Format to use for saving sample images
(if ``save_to_dir`` is set).
"""
def __init__(self,
train_dict,
movie_data_generator,
batch_size=1,
frames_per_batch=5,
shuffle=False,
transforms=['outer-distance'],
transforms_kwargs={},
seed=None,
min_objects=3,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png'):
# Load data
if 'X' not in train_dict:
raise ValueError('No training data found in train_dict')
if 'y' not in train_dict:
raise ValueError('Instance masks are required for the '
'SemanticMovieIterator')
X, y = train_dict['X'], train_dict['y']
if X.shape[0] != y.shape[0]:
raise ValueError('Training batches and labels should have the same'
f'length. Found X.shape: {X.shape} y.shape: {y.shape}')
if X.ndim != 5:
raise ValueError('Input data in `SemanticMovieIterator` '
'should have rank 5. You passed an array '
'with shape', X.shape)
self.x = np.asarray(X, dtype=K.floatx())
self.y = np.asarray(y, dtype='int32')
self.frames_per_batch = frames_per_batch
self.transforms = transforms
self.transforms_kwargs = transforms_kwargs
self.channel_axis = 4 if data_format == 'channels_last' else 1
self.time_axis = 1 if data_format == 'channels_last' else 2
self.row_axis = 2 if data_format == 'channels_last' else 3
self.col_axis = 3 if data_format == 'channels_last' else 4
self.movie_data_generator = movie_data_generator
self.data_format = data_format
self.min_objects = min_objects
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
if X.shape[self.time_axis] - frames_per_batch < 0:
raise ValueError(
'The number of frames used in each training batch should '
'be less than the number of frames in the training data!')
# Remove images with small numbers of cells
invalid_batches = []
for b in range(self.x.shape[0]):
if len(np.unique(self.y[b])) - 1 < self.min_objects:
invalid_batches.append(b)
invalid_batches = np.array(invalid_batches, dtype='int')
if invalid_batches.size > 0:
logging.warning('Removing %s of %s images with fewer than %s '
'objects.', invalid_batches.size, self.x.shape[0],
self.min_objects)
self.x = np.delete(self.x, invalid_batches, axis=0)
self.y = np.delete(self.y, invalid_batches, axis=0)
super().__init__(
self.x.shape[0], batch_size, shuffle, seed)
def _transform_labels(self, y):
y_semantic_list = []
# loop over channels axis of labels in case there are multiple label types
for label_num in range(y.shape[self.channel_axis]):
if self.channel_axis == 1:
y_current = y[:, label_num:label_num + 1, ...]
else:
y_current = y[..., label_num:label_num + 1]
for transform in self.transforms:
transform_kwargs = self.transforms_kwargs.get(transform, dict())
y_transform = _transform_masks(y_current, transform,
data_format=self.data_format,
**transform_kwargs)
y_semantic_list.append(y_transform)
return y_semantic_list
def _get_batches_of_transformed_samples(self, index_array):
if self.data_format == 'channels_first':
shape = (len(index_array), self.x.shape[1], self.frames_per_batch,
self.x.shape[3], self.x.shape[4])
else:
shape = tuple([len(index_array), self.frames_per_batch] +
list(self.x.shape)[2:])
batch_x = np.zeros(shape, dtype=self.x.dtype)
batch_y = []
for i, j in enumerate(index_array):
last_frame = self.x.shape[self.time_axis] - self.frames_per_batch
time_start = np.random.randint(0, high=last_frame)
time_end = time_start + self.frames_per_batch
if self.time_axis == 1:
x = self.x[j, time_start:time_end, ...]
y = self.y[j:j + 1, time_start:time_end, ...]
else:
x = self.x[j, :, time_start:time_end, ...]
y = self.y[j:j + 1, :, time_start:time_end, ...]
# _transform_labels expects batch dimension
y_semantic_list = self._transform_labels(y)
# initialize batch_y
if len(batch_y) == 0:
for ys in y_semantic_list:
shape = tuple([len(index_array)] + list(ys.shape[1:]))
batch_y.append(np.zeros(shape, dtype=ys.dtype))
# random_transform does not expect batch dimension
y_semantic_list = [ys[0] for ys in y_semantic_list]
# Apply transformation
x, y_semantic_list = self.movie_data_generator.random_transform(
x, y_semantic_list)
x = self.movie_data_generator.standardize(x)
batch_x[i] = x
for k, ys in enumerate(y_semantic_list):
batch_y[k][i] = ys
if self.save_to_dir:
time_axis = 2 if self.data_format == 'channels_first' else 1
for i, j in enumerate(index_array):
for frame in range(batch_x.shape[time_axis]):
if time_axis == 2:
img = array_to_img(batch_x[i, :, frame],
self.data_format, scale=True)
else:
img = array_to_img(batch_x[i, frame],
self.data_format, scale=True)
fname = f'{self.save_prefix}_{j}_{np.random.randint(1e4)}.{self.save_format}'
img.save(os.path.join(self.save_to_dir, fname))
if self.y is not None:
# Save argmax of y batch
if self.time_axis == 2:
img_y = np.argmax(batch_y[0][i, :, frame],
axis=0)
img_channel_axis = 0
img_y = batch_y[0][i, :, frame]
else:
img_channel_axis = -1
img_y = batch_y[0][i, frame]
img_y = np.argmax(img_y, axis=img_channel_axis)
img_y = np.expand_dims(img_y, axis=img_channel_axis)
img = array_to_img(img_y, self.data_format, scale=True)
fname = 'y_{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
return batch_x, batch_y
def next(self):
"""For python 2.x. Returns the next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
class SemanticMovieGenerator(ImageDataGenerator):
"""Generates batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
Args:
featurewise_center (bool): Set input mean to 0 over the dataset,
feature-wise.
samplewise_center (bool): Set each sample mean to 0.
featurewise_std_normalization (bool): Divide inputs by std
of the dataset, feature-wise.
samplewise_std_normalization (bool): Divide each input by its std.
zca_epsilon (float): Epsilon for ZCA whitening. Default is 1e-6.
zca_whitening (bool): Apply ZCA whitening.
rotation_range (int): Degree range for random rotations.
width_shift_range (float): 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
``(-width_shift_range, +width_shift_range)``
- With ``width_shift_range=2`` possible values are integers
``[-1, 0, +1]``, same as with ``width_shift_range=[-1, 0, +1]``,
while with ``width_shift_range=1.0`` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
``(-height_shift_range, +height_shift_range)``
- With ``height_shift_range=2`` possible values
are integers ``[-1, 0, +1]``,
same as with ``height_shift_range=[-1, 0, +1]``,
while with ``height_shift_range=1.0`` possible values are floats
in the interval [-1.0, +1.0).
shear_range (float): Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range (float): float or [lower, upper], Range for random zoom.
If a float, ``[lower, upper] = [1-zoom_range, 1+zoom_range]``.
channel_shift_range (float): range for random channel shifts.
fill_mode (str): One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'. Points outside the boundaries of the input
are filled according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval (float): Value used for points outside the boundaries
when ``fill_mode = "constant"``.
horizontal_flip (bool): Randomly flip inputs horizontally.
vertical_flip (bool): Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None. If None or 0, no rescaling
is applied, otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
validation_split (float): Fraction of images reserved for validation
(strictly between 0 and 1).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Change the axes for 5D data
if self.data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 3
self.col_axis = 4
self.time_axis = 2
if self.data_format == 'channels_last':
self.channel_axis = 4
self.row_axis = 2
self.col_axis = 3
self.time_axis = 1
def flow(self,
train_dict,
batch_size=1,
frames_per_batch=5,
transforms=['outer-distance'],
transforms_kwargs={},
shuffle=True,
min_objects=3,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png'):
"""Generates batches of augmented/normalized data with given arrays.
Args:
train_dict (dict): Consists of numpy arrays for ``X`` and ``y``.
batch_size (int): Size of a batch.
frames_per_batch (int): Size of z axis in generated batches.
shuffle (bool): Whether to shuffle the data between epochs.
seed (int): Random seed for data shuffling.
min_objects (int): Images with fewer than ``min_objects``
are ignored.
save_to_dir (str): Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix (str): Prefix to use for saving sample
images (if ``save_to_dir`` is set).
save_format (str): Format to use for saving sample images
(if ``save_to_dir`` is set).
Returns:
SemanticMovieIterator: An ``Iterator`` yielding tuples of
``(x, y)``, where ``x`` is a numpy array of image data and
``y`` is list of numpy arrays of transformed masks of the
same shape.
"""
return SemanticMovieIterator(
train_dict,
self,
batch_size=batch_size,
frames_per_batch=frames_per_batch,
transforms=transforms,
transforms_kwargs=transforms_kwargs,
shuffle=shuffle,
min_objects=min_objects,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
Args:
x (numpy.array): batch of inputs to be normalized.
Returns:
numpy.array: The normalized inputs.
"""
# TODO: standardize each image, not all frames at once
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_axis = self.channel_axis - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_axis, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_axis, keepdims=True) +
K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
logging.warning('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it '
'hasn\'t been fit on any training data. '
'Fit it first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
logging.warning('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it '
'hasn\'t been fit on any training data. Fit '
'it first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
logging.warning('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def fit(self, x, augment=False, rounds=1, seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
Args:
x (numpy.array): The data to fit on. Should have rank 5.
augment (bool): Whether to fit on randomly augmented samples.
rounds (bool): If augment,
how many augmentation passes to do over the data.
seed (int): Random seed for data shuffling.
Raises:
ValueError: If input rank is not 5.
ImportError: If zca_whitening is used and scipy is not available.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 5:
raise ValueError('Input to `.fit()` should have rank 5. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
logging.warning(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=self.dtype)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
axis = (0, self.time_axis, self.row_axis, self.col_axis)
self.mean = np.mean(x, axis=axis)
broadcast_shape = [1, 1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
axis = (0, self.time_axis, self.row_axis, self.col_axis)
self.std = np.std(x, axis=axis)
broadcast_shape = [1, 1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
if scipy is None:
raise ImportError('Using zca_whitening requires SciPy. '
'Install SciPy.')
flat_x = np.reshape(
x, (x.shape[0],
x.shape[1] * x.shape[2] * x.shape[3] * x.shape[4]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
def random_transform(self, x, y=None, seed=None):
"""Applies a random transformation to an image.
Args:
x (numpy.array): 4D tensor or list of 4D tensors.
y (numpy.array): 4D tensor or list of 4D tensors,
label mask(s) for x, optional.
seed (int): Random seed.
Returns:
numpy.array: A randomly transformed copy of the input (same shape).
If ``y`` is passed, it is transformed if necessary and returned.
"""
self.row_axis -= 1
self.col_axis -= 1
self.time_axis -= 1
self.channel_axis -= 1
x = x if isinstance(x, list) else [x]
params = self.get_random_transform(x[0].shape, seed)
for i in range(len(x)):
x_i = x[i]
for frame in range(x_i.shape[self.time_axis]):
if self.data_format == 'channels_first':
x_trans = self.apply_transform(x_i[:, frame], params)
x_i[:, frame] = np.rollaxis(x_trans, -1, 0)
else:
x_i[frame] = self.apply_transform(x_i[frame], params)
x[i] = x_i
x = x[0] if len(x) == 1 else x
if y is not None:
params['brightness'] = None
params['channel_shift_intensity'] = None
_interpolation_order = self.interpolation_order
y = y if isinstance(y, list) else [y]
for i in range(len(y)):
y_i = y[i]
order = 0 if y_i.shape[self.channel_axis] > 1 else _interpolation_order
self.interpolation_order = order
for frame in range(y_i.shape[self.time_axis]):
if self.data_format == 'channels_first':
y_trans = self.apply_transform(y_i[:, frame], params)
y_i[:, frame] = np.rollaxis(y_trans, 1, 0)
else:
y_i[frame] = self.apply_transform(y_i[frame], params)
y[i] = y_i
self.interpolation_order = _interpolation_order
y = y[0] if len(y) == 1 else y
# Note: Undo workaround
self.row_axis += 1
self.col_axis += 1
self.time_axis += 1
self.channel_axis += 1
if y is None:
return x
return x, y
class Semantic3DIterator(Iterator):
"""Iterator yielding data from Numpy arrays (X and y).
Args:
train_dict (dict): Dictionary consisting of numpy arrays for ``X`` and ``y``.
3d_data_generator (Semantic3DGenerator): ``Semantic3DGenerator``
to use for random transformations and normalization.
batch_size (int): Size of a batch.
frames_per_batch (int): Size of z-axis in generated batches.
frame_shape (tuple): Shape of the cropped frames.
shuffle (bool): Whether to shuffle the data between epochs.
seed (int): Random seed for data shuffling.
min_objects (int): Minumum number of objects allowed per image.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
save_to_dir (str): Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix (str): Prefix to use for saving sample
images (if ``save_to_dir`` is set).
save_format (str): Format to use for saving sample images
(if ``save_to_dir`` is set).
"""
def __init__(self,
train_dict,
data_generator_3d,
batch_size=1,
frames_per_batch=5,
frame_shape=None,
shuffle=False,
transforms=['outer-distance'],
transforms_kwargs={},
aug_3d=False,
rotation_3d=0,
sampling=None,
z_scale=None,
seed=None,
min_objects=3,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png'):
# Load data
if 'X' not in train_dict:
raise ValueError('No training data found in train_dict')
if 'y' not in train_dict:
raise ValueError('Instance masks are required for the '
'Semantic3DIterator')
X, y = train_dict['X'], train_dict['y']
if X.shape[0] != y.shape[0]:
raise ValueError('Training batches and labels should have the same'
f'length. Found X.shape: {X.shape} y.shape: {y.shape}')
if X.ndim != 5:
raise ValueError('Input data in `Semantic3DIterator` '
'should have rank 5. You passed an array '
'with shape', X.shape)
if rotation_3d > 0 and not z_scale:
raise ValueError('z_scaling factor required to rotate in 3d')
def _scale_im(input_im, scale, order):
dtype = input_im.dtype
batch_list = []
for batch_num in range(input_im.shape[0]):
batch = input_im[batch_num, ...]
if data_format == 'channels_first':
batch = np.moveaxis(batch, 0, -1)
rescaled = rescale(batch, scale,
order=order,
preserve_range=True,
channel_axis=-1)
rescaled = np.moveaxis(rescaled, -1, 0)
else:
rescaled = rescale(batch, scale,
order=order,
preserve_range=True,
channel_axis=-1)
batch_list.append(rescaled)
return np.stack(batch_list, axis=0).astype(dtype)
if aug_3d and rotation_3d > 0:
scale = tuple([z_scale, 1, 1])
X = _scale_im(X, scale, order=1)
y = _scale_im(y, scale, order=0)
self.output_frames = frames_per_batch
frames_per_batch = int(round(frames_per_batch * z_scale))
self.x = np.asarray(X, dtype=K.floatx())
self.y = np.asarray(y, dtype='int32')
self.frames_per_batch = frames_per_batch
self.frame_shape = frame_shape
self.transforms = transforms
self.transforms_kwargs = transforms_kwargs
self.aug_3d = aug_3d # TODO: Add documentation
self.rotation_3d = rotation_3d # TODO: Add documentation
self.z_scale = z_scale # TODO: Add documentation
self.channel_axis = 4 if data_format == 'channels_last' else 1
self.time_axis = 1 if data_format == 'channels_last' else 2
self.row_axis = 2 if data_format == 'channels_last' else 3
self.col_axis = 3 if data_format == 'channels_last' else 4
self.data_generator_3d = data_generator_3d
self.data_format = data_format
self.min_objects = min_objects
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
if X.shape[self.time_axis] - frames_per_batch < 0:
raise ValueError(
'The number of frames used in each training batch should '
'be less than the number of frames in the training data!'
f'fpb is {frames_per_batch} and timeaxis is {X.shape[self.time_axis]}')
invalid_batches = []
# Remove images with small numbers of cells
# TODO: make this work with the cropping implementation
for b in range(self.x.shape[0]):
if len(np.unique(self.y[b])) - 1 < self.min_objects:
invalid_batches.append(b)
invalid_batches = np.array(invalid_batches, dtype='int')
if invalid_batches.size > 0:
logging.warning('Removing %s of %s images with fewer than %s '
'objects.', invalid_batches.size, self.x.shape[0],
self.min_objects)
self.x = np.delete(self.x, invalid_batches, axis=0)
self.y = np.delete(self.y, invalid_batches, axis=0)
super().__init__(
self.x.shape[0], batch_size, shuffle, seed)
def _transform_labels(self, y):
y_semantic_list = []
# loop over channels axis of labels in case there are multiple label types
for label_num in range(y.shape[self.channel_axis]):
if self.channel_axis == 1:
y_current = y[:, label_num:label_num + 1, ...]
else:
y_current = y[..., label_num:label_num + 1]
for transform in self.transforms:
transform_kwargs = self.transforms_kwargs.get(transform, dict())
y_transform = _transform_masks(y_current, transform,
data_format=self.data_format,
**transform_kwargs)
y_semantic_list.append(y_transform)
return y_semantic_list
def _get_batches_of_transformed_samples(self, index_array):
if self.frame_shape:
rows = self.frame_shape[0]
cols = self.frame_shape[1]
else:
rows = self.x.shape[self.row_axis]
cols = self.x.shape[self.col_axis]
if self.data_format == 'channels_first':
shape = (len(index_array), self.x.shape[1], self.frames_per_batch,
rows, cols)
else:
shape = (len(index_array), self.frames_per_batch,
rows, cols, self.x.shape[4])
batch_x = np.zeros(shape, dtype=self.x.dtype)
batch_y = []
for i, j in enumerate(index_array):
last_frame = self.x.shape[self.time_axis] - self.frames_per_batch
if last_frame == 0:
time_start = 0
else:
time_start = np.random.randint(0, high=last_frame)
time_end = time_start + self.frames_per_batch
if self.frame_shape:
last_row = self.x.shape[self.row_axis] - self.frame_shape[0]
last_col = self.x.shape[self.col_axis] - self.frame_shape[1]
row_start = 0 if last_row == 0 else np.random.randint(0, high=last_row)
col_start = 0 if last_col == 0 else np.random.randint(0, high=last_col)
row_end = row_start + self.frame_shape[0]
col_end = col_start + self.frame_shape[1]
else:
row_start, row_end = 0, self.x.shape[self.row_axis]
col_start, col_end = 0, self.x.shape[self.col_axis]
if self.time_axis == 1:
x = self.x[j, time_start:time_end, row_start:row_end, col_start:col_end, :]
y = self.y[j:j + 1, time_start:time_end, row_start:row_end, col_start:col_end, :]
else:
x = self.x[j, :, time_start:time_end, row_start:row_end, col_start:col_end]
y = self.y[j:j + 1, :, time_start:time_end, row_start:row_end, col_start:col_end]
# _transform_labels expects batch dimension
y_semantic_list = self._transform_labels(y)
# initialize batch_y
if len(batch_y) == 0:
for ys in y_semantic_list:
shape = tuple([len(index_array)] + list(ys.shape[1:]))
batch_y.append(np.zeros(shape, dtype=ys.dtype))
# random_transform does not expect batch dimension
y_semantic_list = [ys[0] for ys in y_semantic_list]
# Apply transformation
x, y_semantic_list = self.data_generator_3d.random_transform(
x, y_semantic_list,
aug_3d=self.aug_3d,
rotation_3d=self.rotation_3d)
x = self.data_generator_3d.standardize(x)
batch_x[i] = x
for k, y_sem in enumerate(y_semantic_list):
batch_y[k][i] = y_sem
if self.save_to_dir:
time_axis = 2 if self.data_format == 'channels_first' else 1
for i, j in enumerate(index_array):
for frame in range(batch_x.shape[time_axis]):
if time_axis == 2:
img = array_to_img(batch_x[i, :, frame],
self.data_format, scale=True)
else:
img = array_to_img(batch_x[i, frame],
self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is not None:
# Save argmax of y batch
if self.time_axis == 2:
img_y = np.argmax(batch_y[0][i, :, frame],
axis=0)
img_channel_axis = 0
img_y = batch_y[0][i, :, frame]
else:
img_channel_axis = -1
img_y = batch_y[0][i, frame]
img_y = np.argmax(img_y, axis=img_channel_axis)
img_y = np.expand_dims(img_y,
axis=img_channel_axis)
img = array_to_img(img_y, self.data_format,
scale=True)
fname = 'y_{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
def _resize_im(input_im, shape, order):
dtype = input_im.dtype
batch_list = []
for batch_num in range(input_im.shape[0]):
batch = input_im[batch_num, ...]
if self.data_format == 'channels_first':
batch = np.moveaxis(batch, 0, -1)
resized = resize(batch, shape, order=order, preserve_range=True)
resized = np.moveaxis(resized, -1, 0)
if resized.shape[0] > 1:
resized = np.around(resized, decimals=0)
else:
resized = resize(batch, shape, order=order, preserve_range=True)
if resized.shape[-1] > 1:
resized = np.around(resized, decimals=0)
batch_list.append(resized)
return np.stack(batch_list, axis=0).astype(dtype)
if self.aug_3d and self.rotation_3d > 0:
out_shape = tuple([self.output_frames, self.frame_shape[0], self.frame_shape[1]])
batch_x = _resize_im(batch_x, out_shape, order=1)
for y in range(len(batch_y)):
batch_y[y] = _resize_im(batch_y[y], out_shape, order=0)
return batch_x, batch_y
def next(self):
"""For python 2.x. Returns the next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
class Semantic3DGenerator(ImageDataGenerator):
"""Generates batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
Args:
featurewise_center (bool): Set input mean to 0 over the dataset,
feature-wise.
samplewise_center (bool): Set each sample mean to 0.
featurewise_std_normalization (bool): Divide inputs by std
of the dataset, feature-wise.
samplewise_std_normalization (bool): Divide each input by its std.
zca_epsilon (float): Epsilon for ZCA whitening. Default is 1e-6.
zca_whitening (bool): Apply ZCA whitening.
rotation_range (int): Degree range for random rotations.
width_shift_range (float): 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
``(-width_shift_range, +width_shift_range)``
- With ``width_shift_range=2`` possible values are integers
``[-1, 0, +1]``, same as with ``width_shift_range=[-1, 0, +1]``,
while with ``width_shift_range=1.0`` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
``(-height_shift_range, +height_shift_range)``
- With ``height_shift_range=2`` possible values
are integers ``[-1, 0, +1]``,
same as with ``height_shift_range=[-1, 0, +1]``,
while with ``height_shift_range=1.0`` possible values are floats
in the interval [-1.0, +1.0).
shear_range (float): Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range (float): float or [lower, upper], Range for random zoom.
If a float, ``[lower, upper] = [1-zoom_range, 1+zoom_range]``.
channel_shift_range (float): range for random channel shifts.
fill_mode (str): One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'. Points outside the boundaries of the input
are filled according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval (float): Value used for points outside the boundaries
when ``fill_mode = "constant"``.
horizontal_flip (bool): Randomly flip inputs horizontally.
vertical_flip (bool): Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None. If None or 0, no rescaling
is applied, otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
validation_split (float): Fraction of images reserved for validation
(strictly between 0 and 1).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Change the axes for 5D data
if self.data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 3
self.col_axis = 4
self.time_axis = 2
if self.data_format == 'channels_last':
self.channel_axis = 4
self.row_axis = 2
self.col_axis = 3
self.time_axis = 1
def flow(self,
train_dict,
batch_size=1,
frames_per_batch=5,
frame_shape=None,
transforms=['outer-distance'],
transforms_kwargs={},
aug_3d=False,
rotation_3d=0,
z_scale=None,
shuffle=True,
min_objects=3,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png'):
"""Generates batches of augmented/normalized data with given arrays.
Args:
train_dict (dict): Consists of numpy arrays for ``X`` and ``y``.
batch_size (int): Size of a batch.
frames_per_batch (int): Size of z axis in generated batches.
shuffle (bool): Whether to shuffle the data between epochs.
seed (int): Random seed for data shuffling.
min_objects (int): Images with fewer than ``min_objects``
are ignored.
save_to_dir (str): Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix (str): Prefix to use for saving sample
images (if ``save_to_dir`` is set).
save_format (str): Format to use for saving sample images
(if ``save_to_dir`` is set).
Returns:
Semantic3DIterator: An ``Iterator`` yielding tuples of ``(x, y)``,
where ``x`` is a numpy array of image data and ``y`` is list of
numpy arrays of transformed masks of the same shape.
"""
return Semantic3DIterator(
train_dict,
self,
batch_size=batch_size,
frames_per_batch=frames_per_batch,
frame_shape=frame_shape,
transforms=transforms,
transforms_kwargs=transforms_kwargs,
aug_3d=aug_3d,
rotation_3d=rotation_3d,
z_scale=z_scale,
shuffle=shuffle,
min_objects=min_objects,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
Args:
x (numpy.array): batch of inputs to be normalized.
Returns:
numpy.array: The normalized inputs.
"""
# TODO: standardize each image, not all frames at once
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_axis = self.channel_axis - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_axis, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_axis, keepdims=True) +
K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
logging.warning('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it '
'hasn\'t been fit on any training data. '
'Fit it first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
logging.warning('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it '
'hasn\'t been fit on any training data. Fit '
'it first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
logging.warning('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def fit(self, x, augment=False, rounds=1, seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
Args:
x (numpy.array): The data to fit on. Should have rank 5.
augment (bool): Whether to fit on randomly augmented samples.
rounds (bool): If augment,
how many augmentation passes to do over the data.
seed (int): Random seed for data shuffling.
Raises:
ValueError: If input rank is not 5.
ImportError: If zca_whitening is used and scipy is not available.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 5:
raise ValueError('Input to `.fit()` should have rank 5. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
logging.warning(
'Expected input to be images (as Numpy array) following the '
'data format convention "{0}" (channels on axis {1}), i.e. '
'expected either 1, 3, or 4 channels on axis {1}. '
'However, it was passed an array with shape {2} ({3}) '
'channels.'.format(
self.data_format,
self.channel_axis,
x.shape,
x.shape[self.channel_axis]
))
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=self.dtype)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
axis = (0, self.time_axis, self.row_axis, self.col_axis)
self.mean = np.mean(x, axis=axis)
broadcast_shape = [1, 1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
axis = (0, self.time_axis, self.row_axis, self.col_axis)
self.std = np.std(x, axis=axis)
broadcast_shape = [1, 1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
if scipy is None:
raise ImportError('Using zca_whitening requires SciPy. '
'Install SciPy.')
shape = (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3] * x.shape[4])
flat_x = np.reshape(x, shape)
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
def random_transform(self, x, y=None, seed=None, aug_3d=False, rotation_3d=0):
"""Applies a random transformation to an image.
Args:
x (numpy.array): 4D tensor or list of 4D tensors.
y (numpy.array): 4D tensor or list of 4D tensors,
label mask(s) for ``x``, optional.
seed (int): Random seed.
Returns:
numpy.array: A randomly transformed copy of the input (same shape).
If ``y`` is passed, it is transformed if necessary and returned.
"""
self.row_axis -= 1
self.col_axis -= 1
self.time_axis -= 1
self.channel_axis -= 1
x = x if isinstance(x, list) else [x]
params = self.get_random_transform(x[0].shape, seed)
# Don't want to brighten or zoom multiple times
_brightness_range = self.brightness_range
_zoom_range = self.zoom_range
_rotation_range = self.rotation_range
self.brightness_range = None
self.zoom_range = (1, 1)
self.rotation_range = rotation_3d
# Set params for 3d_augmentation with rotation set to 0
# Compatible with anisotropic data (with sampling not 1:1:1)
params_3d = self.get_random_transform(np.moveaxis(x[0], 0, 1).shape, seed)
self.brightness_range = _brightness_range
self.zoom_range = _zoom_range
self.rotation_range = _rotation_range
for i in range(len(x)):
x_i = x[i]
for frame in range(x_i.shape[self.time_axis]):
if self.data_format == 'channels_first':
x_trans = self.apply_transform(x_i[:, frame], params)
x_i[:, frame] = np.rollaxis(x_trans, -1, 0)
else:
x_i[frame] = self.apply_transform(x_i[frame], params)
if aug_3d:
for frame in range(x_i.shape[self.row_axis]):
if self.data_format == 'channels_first':
x_trans = self.apply_transform(x_i[:, :, frame], params_3d)
x_i[:, :, frame] = np.rollaxis(x_trans, -1, 0)
else:
x_i[:, frame] = self.apply_transform(x_i[:, frame], params_3d)
for frame in range(x_i.shape[self.col_axis]):
if self.data_format == 'channels_first':
x_trans = self.apply_transform(x_i[..., frame], params_3d)
x_i[..., frame] = np.rollaxis(x_trans, -1, 0)
else:
x_i[:, :, frame] = self.apply_transform(x_i[:, :, frame], params_3d)
x[i] = x_i
x = x[0] if len(x) == 1 else x
if y is not None:
params['brightness'] = None
params['channel_shift_intensity'] = None
_interpolation_order = self.interpolation_order
y = y if isinstance(y, list) else [y]
for i in range(len(y)):
y_i = y[i]
order = 0 if y_i.shape[self.channel_axis] > 1 else _interpolation_order
self.interpolation_order = order
for frame in range(y_i.shape[self.time_axis]):
if self.data_format == 'channels_first':
y_trans = self.apply_transform(y_i[:, frame], params)
y_i[:, frame] = np.rollaxis(y_trans, 1, 0)
else:
y_i[frame] = self.apply_transform(y_i[frame], params)
# Augment masks in 3D
if aug_3d:
for frame in range(y_i.shape[self.row_axis]):
if self.data_format == 'channels_first':
y_trans = self.apply_transform(y_i[:, :, frame], params_3d)
y_i[:, :, frame] = np.moveaxis(y_trans, -1, 0)
else:
y_i[:, frame] = self.apply_transform(y_i[:, frame], params_3d)
for frame in range(y_i.shape[self.col_axis]):
if self.data_format == 'channels_first':
y_trans = self.apply_transform(y_i[..., frame], params_3d)
y_i[..., frame] = np.moveaxis(y_trans, -1, 0)
else:
y_i[:, :, frame] = self.apply_transform(y_i[:, :, frame], params_3d)
y[i] = y_i
self.interpolation_order = _interpolation_order
y = y[0] if len(y) == 1 else y
# Note: Undo workaround
self.row_axis += 1
self.col_axis += 1
self.time_axis += 1
self.channel_axis += 1
if y is None:
return x
return x, y | PypiClean |
/BurnerOnFire-0.1.tar.gz/BurnerOnFire-0.1/docs/build/html/_static/doctools.js | if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir", "dirxml",
"group", "groupEnd", "time", "timeEnd", "count", "trace", "profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {}
}
/**
* small helper function to urldecode strings
*/
jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' ');
}
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s == 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
}
/**
* small function to check if an array contains
* a given item.
*/
jQuery.contains = function(arr, item) {
for (var i = 0; i < arr.length; i++) {
if (arr[i] == item)
return true;
}
return false;
}
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node) {
if (node.nodeType == 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 && !jQuery.className.has(node.parentNode, className)) {
var span = document.createElement("span");
span.className = className;
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this)
});
}
}
return this.each(function() {
highlight(this);
});
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initModIndex();
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can savely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated == 'undefined')
return string;
return (typeof translated == 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated == 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlight');
});
}, 10);
$('<li class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></li>')
.appendTo($('.sidebar .this-page-menu'));
}
},
/**
* init the modindex toggle buttons
*/
initModIndex : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
console.log($('tr.cg-' + idnum).toggle());
if (src.substr(-9) == 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_MODINDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('.sidebar .this-page-menu li.highlight-link').fadeOut(300);
$('span.highlight').removeClass('highlight');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this == '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
}); | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/nodes/StatementNodes.py | from .NodeBases import StatementBase, StatementChildHavingBase
def checkStatements(value):
"""Check that statements list value property.
Must not be None, must not contain None, and of course only statements,
may be empty.
"""
assert value is not None
assert None not in value
for statement in value:
assert (
statement.isStatement() or statement.isStatementsFrame()
), statement.asXmlText()
return tuple(value)
class StatementsSequence(StatementChildHavingBase):
kind = "STATEMENTS_SEQUENCE"
named_child = "statements"
checker = checkStatements
def __init__(self, statements, source_ref):
StatementChildHavingBase.__init__(
self, value=tuple(statements), source_ref=source_ref
)
def finalize(self):
del self.parent
for s in self.subnode_statements:
s.finalize()
@staticmethod
def isStatementsSequence():
return True
@staticmethod
def isStatement():
return False
@staticmethod
def isExpression():
return False
def trimStatements(self, statement):
assert statement.parent is self
old_statements = list(self.subnode_statements)
assert statement in old_statements, (statement, self)
new_statements = old_statements[: old_statements.index(statement) + 1]
self.setChild("statements", new_statements)
def removeStatement(self, statement):
assert statement.parent is self
statements = list(self.subnode_statements)
statements.remove(statement)
self.setChild("statements", statements)
if statements:
return self
else:
return None
def replaceStatement(self, statement, statements):
old_statements = list(self.subnode_statements)
merge_index = old_statements.index(statement)
new_statements = (
tuple(old_statements[:merge_index])
+ tuple(statements)
+ tuple(old_statements[merge_index + 1 :])
)
self.setChild("statements", new_statements)
def mayHaveSideEffects(self):
# Statement sequences have a side effect if one of the statements does.
for statement in self.subnode_statements:
if statement.mayHaveSideEffects():
return True
return False
def mayRaiseException(self, exception_type):
for statement in self.subnode_statements:
if statement.mayRaiseException(exception_type):
return True
return False
def needsFrame(self):
for statement in self.subnode_statements:
if statement.needsFrame():
return True
return False
def mayReturn(self):
for statement in self.subnode_statements:
if statement.mayReturn():
return True
return False
def mayBreak(self):
for statement in self.subnode_statements:
if statement.mayBreak():
return True
return False
def mayContinue(self):
for statement in self.subnode_statements:
if statement.mayContinue():
return True
return False
def mayRaiseExceptionOrAbort(self, exception_type):
return (
self.mayRaiseException(exception_type)
or self.mayReturn()
or self.mayBreak()
or self.mayContinue()
)
def isStatementAborting(self):
return self.subnode_statements[-1].isStatementAborting()
def computeStatement(self, trace_collection):
# Don't want to be called like this.
assert False, self
def computeStatementsSequence(self, trace_collection):
new_statements = []
statements = self.subnode_statements
assert statements, self
for count, statement in enumerate(statements):
# May be frames embedded.
if statement.isStatementsFrame():
new_statement = statement.computeStatementsSequence(trace_collection)
else:
new_statement = trace_collection.onStatement(statement=statement)
if new_statement is not None:
if (
new_statement.isStatementsSequence()
and not new_statement.isStatementsFrame()
):
new_statements.extend(new_statement.subnode_statements)
else:
new_statements.append(new_statement)
if (
statement is not statements[-1]
and new_statement.isStatementAborting()
):
trace_collection.signalChange(
"new_statements",
statements[count + 1].getSourceReference(),
"Removed dead statements.",
)
for s in statements[statements.index(statement) + 1 :]:
s.finalize()
break
if statements != new_statements:
if new_statements:
self.setChild("statements", new_statements)
return self
else:
return None
else:
return self
@staticmethod
def getStatementNiceName():
return "statements sequence"
class StatementExpressionOnly(StatementChildHavingBase):
kind = "STATEMENT_EXPRESSION_ONLY"
named_child = "expression"
def __init__(self, expression, source_ref):
assert expression.isExpression()
StatementChildHavingBase.__init__(self, value=expression, source_ref=source_ref)
def mayHaveSideEffects(self):
return self.subnode_expression.mayHaveSideEffects()
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseException(exception_type)
def computeStatement(self, trace_collection):
expression = trace_collection.onExpression(expression=self.subnode_expression)
return expression.computeExpressionDrop(
statement=self, trace_collection=trace_collection
)
@staticmethod
def getStatementNiceName():
return "expression only statement"
def getDetailsForDisplay(self):
return {"expression": self.subnode_expression.kind}
class StatementPreserveFrameException(StatementBase):
kind = "STATEMENT_PRESERVE_FRAME_EXCEPTION"
__slots__ = ("preserver_id",)
def __init__(self, preserver_id, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
self.preserver_id = preserver_id
def finalize(self):
del self.parent
def getDetails(self):
return {"preserver_id": self.preserver_id}
def getPreserverId(self):
return self.preserver_id
def computeStatement(self, trace_collection):
# For Python2 generators, it's not necessary to preserve, the frame
# decides it. TODO: This check makes only sense once.
if self.getParentStatementsFrame().needsExceptionFramePreservation():
return self, None, None
else:
return (
None,
"new_statements",
"Removed frame preservation for generators.",
)
@staticmethod
def mayRaiseException(exception_type):
return False
@staticmethod
def needsFrame():
return True
class StatementRestoreFrameException(StatementBase):
kind = "STATEMENT_RESTORE_FRAME_EXCEPTION"
__slots__ = ("preserver_id",)
def __init__(self, preserver_id, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
self.preserver_id = preserver_id
def finalize(self):
del self.parent
def getDetails(self):
return {"preserver_id": self.preserver_id}
def getPreserverId(self):
return self.preserver_id
def computeStatement(self, trace_collection):
return self, None, None
@staticmethod
def mayRaiseException(exception_type):
return False
class StatementPublishException(StatementBase):
kind = "STATEMENT_PUBLISH_EXCEPTION"
def __init__(self, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def computeStatement(self, trace_collection):
# TODO: Determine the need for it.
return self, None, None
@staticmethod
def mayRaiseException(exception_type):
return False | PypiClean |
/ESMValCore-2.9.0rc1.tar.gz/ESMValCore-2.9.0rc1/esmvalcore/cmor/_fixes/emac/emac.py | import logging
from shutil import copyfile
import dask.array as da
import iris.analysis
import iris.util
from iris import NameConstraint
from iris.aux_factory import HybridPressureFactory
from iris.cube import CubeList
from netCDF4 import Dataset
from scipy import constants
from ..shared import add_aux_coords_from_cubes
from ._base_fixes import EmacFix, NegateData
logger = logging.getLogger(__name__)
class AllVars(EmacFix):
"""Fixes for all variables."""
# Dictionary to map invalid units in the data to valid entries
INVALID_UNITS = {
'kg/m**2s': 'kg m-2 s-1',
}
def fix_file(self, filepath, output_dir, add_unique_suffix=False):
"""Fix file.
Fixes hybrid pressure level coordinate.
Note
----
This fix removes the ``formula_terms`` attribute of the hybrid pressure
level variables to make the corresponding coefficients appear correctly
in the class:`iris.cube.CubeList` object returned by :mod:`iris.load`.
"""
if 'alevel' not in self.vardef.dimensions:
return filepath
new_path = self.get_fixed_filepath(
output_dir, filepath, add_unique_suffix=add_unique_suffix
)
copyfile(filepath, new_path)
with Dataset(new_path, mode='a') as dataset:
if 'formula_terms' in dataset.variables['lev'].ncattrs():
del dataset.variables['lev'].formula_terms
if 'formula_terms' in dataset.variables['ilev'].ncattrs():
del dataset.variables['ilev'].formula_terms
return new_path
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = self.get_cube(cubes)
# Fix time, latitude, and longitude coordinates
self.fix_regular_time(cube)
self.fix_regular_lat(cube)
self.fix_regular_lon(cube)
# Fix regular pressure levels (considers plev19, plev39, etc.)
if self.vardef.has_coord_with_standard_name('air_pressure'):
self._fix_plev(cube)
# Fix hybrid pressure levels
if 'alevel' in self.vardef.dimensions:
cube = self._fix_alevel(cube, cubes)
# Fix scalar coordinates
self.fix_scalar_coords(cube)
# Fix metadata of variable
self.fix_var_metadata(cube)
return CubeList([cube])
def _fix_plev(self, cube):
"""Fix regular pressure level coordinate of cube."""
for coord in cube.coords():
coord_type = iris.util.guess_coord_axis(coord)
if coord_type != 'Z':
continue
if not coord.units.is_convertible('Pa'):
continue
self.fix_plev_metadata(cube, coord)
return
raise ValueError(
f"Cannot find requested pressure level coordinate for variable "
f"'{self.vardef.short_name}', searched for Z-coordinates with "
f"units that are convertible to Pa")
@staticmethod
def _fix_alevel(cube, cubes):
"""Fix hybrid pressure level coordinate of cube."""
# Add coefficients for hybrid pressure level coordinate
coords_to_add = {
'hyam': 1,
'hybm': 1,
'aps_ave': (0, 2, 3),
}
add_aux_coords_from_cubes(cube, cubes, coords_to_add)
# Reverse entire cube along Z-axis so that index 0 is surface level
# Note: This would automatically be fixed by the CMOR checker, but this
# fails to fix the bounds of ap and b
cube = iris.util.reverse(cube, cube.coord(var_name='lev'))
# Adapt metadata of coordinates
lev_coord = cube.coord(var_name='lev')
ap_coord = cube.coord(var_name='hyam')
b_coord = cube.coord(var_name='hybm')
ps_coord = cube.coord(var_name='aps_ave')
lev_coord.var_name = 'lev'
lev_coord.standard_name = 'atmosphere_hybrid_sigma_pressure_coordinate'
lev_coord.long_name = 'hybrid sigma pressure coordinate'
lev_coord.units = '1'
lev_coord.attributes['positive'] = 'down'
ap_coord.var_name = 'ap'
ap_coord.standard_name = None
ap_coord.long_name = 'vertical coordinate formula term: ap(k)'
ap_coord.attributes = {}
b_coord.var_name = 'b'
b_coord.standard_name = None
b_coord.long_name = 'vertical coordinate formula term: b(k)'
b_coord.attributes = {}
ps_coord.var_name = 'ps'
ps_coord.standard_name = 'surface_air_pressure'
ps_coord.long_name = 'Surface Air Pressure'
ps_coord.attributes = {}
# Add bounds for coefficients
# (make sure to reverse cubes beforehand so index 0 is surface level)
ap_bnds_cube = iris.util.reverse(
cubes.extract_cube(NameConstraint(var_name='hyai')),
0,
)
b_bnds_cube = iris.util.reverse(
cubes.extract_cube(NameConstraint(var_name='hybi')),
0,
)
ap_bounds = da.stack(
[ap_bnds_cube.core_data()[:-1], ap_bnds_cube.core_data()[1:]],
axis=-1,
)
b_bounds = da.stack(
[b_bnds_cube.core_data()[:-1], b_bnds_cube.core_data()[1:]],
axis=-1,
)
ap_coord.bounds = ap_bounds
b_coord.bounds = b_bounds
# Convert arrays to float64
for coord in (ap_coord, b_coord, ps_coord):
coord.points = coord.core_points().astype(
float, casting='same_kind')
if coord.bounds is not None:
coord.bounds = coord.core_bounds().astype(
float, casting='same_kind')
# Fix values of lev coordinate
# Note: lev = a + b with a = ap / p0 (p0 = 100000 Pa)
lev_coord.points = (ap_coord.core_points() / 100000.0 +
b_coord.core_points())
lev_coord.bounds = (ap_coord.core_bounds() / 100000.0 +
b_coord.core_bounds())
# Add HybridPressureFactory
pressure_coord_factory = HybridPressureFactory(
delta=ap_coord,
sigma=b_coord,
surface_air_pressure=ps_coord,
)
cube.add_aux_factory(pressure_coord_factory)
return cube
class Clwvi(EmacFix):
"""Fixes for ``clwvi``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['xlvi_cav', 'xlvi_ave',
'xlvi']) +
self.get_cube(cubes, var_name=['xivi_cav', 'xivi_ave',
'xivi'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
Evspsbl = NegateData
Hfls = NegateData
Hfss = NegateData
class Od550aer(EmacFix):
"""Fixes for ``od550aer``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cubes = super().fix_metadata(cubes)
cube = self.get_cube(cubes)
z_coord = cube.coord(axis='Z')
cube = cube.collapsed(z_coord, iris.analysis.SUM)
return CubeList([cube])
class Pr(EmacFix):
"""Fixes for ``pr``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['aprl_cav', 'aprl_ave',
'aprl']) +
self.get_cube(cubes, var_name=['aprc_cav', 'aprc_ave',
'aprc'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
class Rlds(EmacFix):
"""Fixes for ``rlds``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['flxtbot_cav', 'flxtbot_ave',
'flxsbot']) -
self.get_cube(cubes, var_name=['tradsu_cav', 'tradsu_ave',
'tradsu'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
Rlus = NegateData
Rlut = NegateData
Rlutcs = NegateData
class Rsds(EmacFix):
"""Fixes for ``rsds``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['flxsbot_cav', 'flxsbot_ave',
'flxsbot']) -
self.get_cube(cubes, var_name=['sradsu_cav', 'sradsu_ave',
'sradsu'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
class Rsdt(EmacFix):
"""Fixes for ``rsdt``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['flxstop_cav', 'flxstop_ave',
'flxstop']) -
self.get_cube(cubes, var_name=['srad0u_cav', 'srad0u_ave',
'srad0u'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
Rsus = NegateData
Rsut = NegateData
Rsutcs = NegateData
class Rtmt(EmacFix):
"""Fixes for ``rtmt``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['flxttop_cav', 'flxttop_ave',
'flxttop']) +
self.get_cube(cubes, var_name=['flxstop_cav', 'flxstop_ave',
'flxstop'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
class Sithick(EmacFix):
"""Fixes for ``sithick``."""
def fix_data(self, cube):
"""Fix data."""
cube.data = da.ma.masked_equal(cube.core_data(), 0.0)
return cube
class Toz(EmacFix):
"""Fixes for ``tosga``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
# Convert DU to mm
# Note: 1 mm = 100 DU
cube = self.get_cube(cubes)
cube.data = cube.core_data() / 100.0
cube.units = 'mm'
return CubeList([cube])
class Zg(EmacFix):
"""Fixes for ``zg``."""
def fix_metadata(self, cubes):
"""Fix metadata.
Convert geopotential Phi given by EMAC to geopotential height Z using
Z = Phi / g0 (g0 is standard acceleration of gravity)
"""
g0_value = constants.value('standard acceleration of gravity')
g0_units = constants.unit('standard acceleration of gravity')
cube = self.get_cube(cubes)
cube.data = cube.core_data() / g0_value
cube.units /= g0_units
return cubes
# Tracers
class MP_BC_tot(EmacFix): # noqa: N801
"""Fixes for ``MP_BC_tot``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['MP_BC_ki_cav', 'MP_BC_ki_ave',
'MP_BC_ki']) +
self.get_cube(cubes, var_name=['MP_BC_ks_cav', 'MP_BC_ks_ave',
'MP_BC_ks']) +
self.get_cube(cubes, var_name=['MP_BC_as_cav', 'MP_BC_as_ave',
'MP_BC_as']) +
self.get_cube(cubes, var_name=['MP_BC_cs_cav', 'MP_BC_cs_ave',
'MP_BC_cs'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
class MP_DU_tot(EmacFix): # noqa: N801
"""Fixes for ``MP_DU_tot``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['MP_DU_ai_cav', 'MP_DU_ai_ave',
'MP_DU_ai']) +
self.get_cube(cubes, var_name=['MP_DU_as_cav', 'MP_DU_as_ave',
'MP_DU_as']) +
self.get_cube(cubes, var_name=['MP_DU_ci_cav', 'MP_DU_ci_ave',
'MP_DU_ci']) +
self.get_cube(cubes, var_name=['MP_DU_cs_cav', 'MP_DU_cs_ave',
'MP_DU_cs'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
class MP_SO4mm_tot(EmacFix): # noqa: N801
"""Fixes for ``MP_SO4mm_tot``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(
cubes, var_name=['MP_SO4mm_ns_cav', 'MP_SO4mm_ns_ave',
'MP_SO4mm_ns']) +
self.get_cube(
cubes, var_name=['MP_SO4mm_ks_cav', 'MP_SO4mm_ks_ave',
'MP_SO4mm_ks']) +
self.get_cube(
cubes, var_name=['MP_SO4mm_as_cav', 'MP_SO4mm_as_ave',
'MP_SO4mm_as']) +
self.get_cube(
cubes, var_name=['MP_SO4mm_cs_cav', 'MP_SO4mm_cs_ave',
'MP_SO4mm_cs'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube])
class MP_SS_tot(EmacFix): # noqa: N801
"""Fixes for ``MP_SS_tot``."""
def fix_metadata(self, cubes):
"""Fix metadata."""
cube = (
self.get_cube(cubes, var_name=['MP_SS_ks_cav', 'MP_SS_ks_ave',
'MP_SS_ks']) +
self.get_cube(cubes, var_name=['MP_SS_as_cav', 'MP_SS_as_ave',
'MP_SS_as']) +
self.get_cube(cubes, var_name=['MP_SS_cs_cav', 'MP_SS_cs_ave',
'MP_SS_cs'])
)
cube.var_name = self.vardef.short_name
return CubeList([cube]) | PypiClean |
/OctoPrint-1.9.2.tar.gz/OctoPrint-1.9.2/src/octoprint/plugins/corewizard/__init__.py | __license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html"
__copyright__ = "Copyright (C) 2015 The OctoPrint Project - Released under terms of the AGPLv3 License"
from flask_babel import gettext
import octoprint.plugin
from .subwizards import Subwizards
class CoreWizardPlugin(
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.WizardPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.BlueprintPlugin,
Subwizards,
):
# ~~ TemplatePlugin API
def get_template_configs(self):
required = self._get_subwizard_attrs("_is_", "_wizard_required")
names = self._get_subwizard_attrs("_get_", "_wizard_name")
additional = self._get_subwizard_attrs(
"_get_", "_additional_wizard_template_data"
)
firstrunonly = self._get_subwizard_attrs("_is_", "_wizard_firstrunonly")
firstrun = self._settings.global_get(["server", "firstRun"])
if not firstrun:
required = {
key: value
for key, value in required.items()
if not firstrunonly.get(key, lambda: False)()
}
result = list()
for key, method in required.items():
if not callable(method):
continue
if not method():
continue
if key not in names:
continue
name = names[key]()
if not name:
continue
config = {
"type": "wizard",
"name": name,
"template": f"corewizard_{key}_wizard.jinja2",
"div": f"wizard_plugin_corewizard_{key}",
"suffix": f"_{key}",
}
if key in additional:
additional_result = additional[key]()
if additional_result:
config.update(additional_result)
result.append(config)
return result
# ~~ AssetPlugin API
def get_assets(self):
if self.is_wizard_required():
return {"js": ["js/corewizard.js"], "css": ["css/corewizard.css"]}
else:
return {}
# ~~ BlueprintPlugin API
def is_blueprint_csrf_protected(self):
return True
# ~~ WizardPlugin API
def is_wizard_required(self):
required = self._get_subwizard_attrs("_is_", "_wizard_required")
firstrunonly = self._get_subwizard_attrs("_is_", "_wizard_firstrunonly")
firstrun = self._settings.global_get(["server", "firstRun"])
if not firstrun:
required = {
key: value
for key, value in required.items()
if not firstrunonly.get(key, lambda: False)()
}
any_required = any(map(lambda m: m(), required.values()))
return any_required
def get_wizard_details(self):
result = {}
def add_result(key, method):
result[key] = method()
self._get_subwizard_attrs("_get_", "_wizard_details", add_result)
return result
def get_wizard_version(self):
return 4
# ~~ helpers
def _get_subwizard_attrs(self, start, end, callback=None):
result = {}
for item in dir(self):
if not item.startswith(start) or not item.endswith(end):
continue
key = item[len(start) : -len(end)]
if not key:
continue
attr = getattr(self, item)
if callable(callback):
callback(key, attr)
result[key] = attr
return result
__plugin_name__ = "Core Wizard"
__plugin_author__ = "Gina Häußge"
__plugin_description__ = "Provides wizard dialogs for core components and functionality"
__plugin_disabling_discouraged__ = gettext(
"Without this plugin OctoPrint will no longer be able to perform "
"setup steps that might be required after an update."
)
__plugin_license__ = "AGPLv3"
__plugin_pythoncompat__ = ">=3.7,<4"
__plugin_implementation__ = CoreWizardPlugin() | PypiClean |
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/axelrod/strategies/qlearner.py | from collections import OrderedDict
from typing import Dict, Union
from axelrod.action import Action, actions_to_str
from axelrod.player import Player
Score = Union[int, float]
C, D = Action.C, Action.D
class RiskyQLearner(Player):
"""A player who learns the best strategies through the q-learning
algorithm.
This Q learner is quick to come to conclusions and doesn't care about the
future.
Names:
- Risky Q Learner: Original name by Geraint Palmer
"""
name = "Risky QLearner"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
learning_rate = 0.9
discount_rate = 0.9
action_selection_parameter = 0.1
memory_length = 12
def __init__(self) -> None:
"""Initialises the player by picking a random strategy."""
super().__init__()
# Set this explicitly, since the constructor of super will not pick it up
# for any subclasses that do not override methods using random calls.
self.classifier["stochastic"] = True
self.prev_action = None # type: Action
self.original_prev_action = None # type: Action
self.score = 0
self.Qs = OrderedDict({"": OrderedDict(zip([C, D], [0, 0]))})
self.Vs = OrderedDict({"": 0})
self.prev_state = ""
def receive_match_attributes(self):
(R, P, S, T) = self.match_attributes["game"].RPST()
self.payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}}
def strategy(self, opponent: Player) -> Action:
"""Runs a qlearn algorithm while the tournament is running."""
if len(self.history) == 0:
self.prev_action = self._random.random_choice()
self.original_prev_action = self.prev_action
state = self.find_state(opponent)
reward = self.find_reward(opponent)
if state not in self.Qs:
self.Qs[state] = OrderedDict(zip([C, D], [0, 0]))
self.Vs[state] = 0
self.perform_q_learning(
self.prev_state, state, self.prev_action, reward
)
action = self.select_action(state)
self.prev_state = state
self.prev_action = action
return action
def select_action(self, state: str) -> Action:
"""
Selects the action based on the epsilon-soft policy
"""
rnd_num = self._random.random()
p = 1.0 - self.action_selection_parameter
if rnd_num < p:
return max(self.Qs[state], key=lambda x: self.Qs[state][x])
return self._random.random_choice()
def find_state(self, opponent: Player) -> str:
"""
Finds the my_state (the opponents last n moves +
its previous proportion of playing C) as a hashable state
"""
prob = "{:.1f}".format(opponent.cooperations)
action_str = actions_to_str(opponent.history[-self.memory_length :])
return action_str + prob
def perform_q_learning(
self, prev_state: str, state: str, action: Action, reward
):
"""
Performs the qlearning algorithm
"""
self.Qs[prev_state][action] = (1.0 - self.learning_rate) * self.Qs[
prev_state
][action] + self.learning_rate * (
reward + self.discount_rate * self.Vs[state]
)
self.Vs[prev_state] = max(self.Qs[prev_state].values())
def find_reward(
self, opponent: Player
) -> Dict[Action, Dict[Action, Score]]:
"""
Finds the reward gained on the last iteration
"""
if len(opponent.history) == 0:
opp_prev_action = self._random.random_choice()
else:
opp_prev_action = opponent.history[-1]
return self.payoff_matrix[self.prev_action][opp_prev_action]
class ArrogantQLearner(RiskyQLearner):
"""A player who learns the best strategies through the q-learning
algorithm.
This Q learner jumps to quick conclusions and cares about the future.
Names:
- Arrogant Q Learner: Original name by Geraint Palmer
"""
name = "Arrogant QLearner"
learning_rate = 0.9
discount_rate = 0.1
class HesitantQLearner(RiskyQLearner):
"""A player who learns the best strategies through the q-learning algorithm.
This Q learner is slower to come to conclusions and does not look ahead much.
Names:
- Hesitant Q Learner: Original name by Geraint Palmer
"""
name = "Hesitant QLearner"
learning_rate = 0.1
discount_rate = 0.9
class CautiousQLearner(RiskyQLearner):
"""A player who learns the best strategies through the q-learning algorithm.
This Q learner is slower to come to conclusions and wants to look ahead
more.
Names:
- Cautious Q Learner: Original name by Geraint Palmer
"""
name = "Cautious QLearner"
learning_rate = 0.1
discount_rate = 0.1 | PypiClean |
/GTW-1.2.6.tar.gz/GTW-1.2.6/__test__/Query_Filter.py |
from __future__ import print_function
def show (q) :
return sorted (str (x) for x in q)
_composite = r"""
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> EVT = scope.EVT
>>> SWP = scope.SWP
>>> p1 = SWP.Page ("event-1-text", text = "Text for the 1. event")
>>> p2 = SWP.Page ("event-2-text", text = "Text for the 2. event")
>>> p3 = SWP.Page ("event-3-text", text = "Text for the 3. event")
>>> p4 = SWP.Page ("event-4-text", text = "Text for the 4. event")
>>> e1 = EVT.Event (p1.epk_raw, ("1.4.2010", ), raw = True)
>>> e2 = EVT.Event (p2.epk_raw, ("1.3.2010", ), raw = True)
>>> e3 = EVT.Event (p3.epk_raw, ("1.2.2010", ), raw = True)
>>> e4 = EVT.Event (p4.epk_raw, ("1.1.2010", ), raw = True)
>>> date = datetime.date (2010, 3, 1)
>>> q = EVT.Event.query ()
>>> for e in show (q) : print (e) ### all
(('event-1-text', ), ('2010-04-01', ), (), '')
(('event-2-text', ), ('2010-03-01', ), (), '')
(('event-3-text', ), ('2010-02-01', ), (), '')
(('event-4-text', ), ('2010-01-01', ), (), '')
>>> q = EVT.Event.query ().filter (Q.date.start > date)
>>> for e in show (q) : print (e) ### filtered 1
(('event-1-text', ), ('2010-04-01', ), (), '')
>>> q = EVT.Event.query ().filter (Q.date.start >= date)
>>> for e in show (q) : print (e) ### filtered 2
(('event-1-text', ), ('2010-04-01', ), (), '')
(('event-2-text', ), ('2010-03-01', ), (), '')
>>> q = EVT.Event.query ().filter (left = p1)
>>> for e in show (q) : print (e) ### filtered 3
(('event-1-text', ), ('2010-04-01', ), (), '')
"""
_link1_role = r"""
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> EVT = scope.EVT
>>> SWP = scope.SWP
>>> p1 = SWP.Page ("event-1-text", text = "Text for the 1. event")
>>> p2 = SWP.Page ("event-2-text", text = "Text for the 2. event")
>>> p3 = SWP.Page ("event-3-text", text = "Text for the 3. event")
>>> p4 = SWP.Page ("event-4-text", text = "Text for the 4. event")
>>> e1 = EVT.Event (p1.epk_raw, ("1.4.2010", ), raw = True)
>>> e2 = EVT.Event (p2.epk_raw, ("1.3.2010", ), raw = True)
>>> e3 = EVT.Event (p3.epk_raw, ("1.2.2010", ), raw = True)
>>> e4 = EVT.Event (p4.epk_raw, ("1.1.2010", ), raw = True)
>>> date = datetime.date (2010, 3, 1)
>>> q = EVT.Event_occurs.query ()
>>> for e in show (q) : print (e) ### all
((('event-1-text', ), ('2010-04-01', ), (), ''), '2010-04-01', ())
((('event-2-text', ), ('2010-03-01', ), (), ''), '2010-03-01', ())
((('event-3-text', ), ('2010-02-01', ), (), ''), '2010-02-01', ())
((('event-4-text', ), ('2010-01-01', ), (), ''), '2010-01-01', ())
>>> q = EVT.Event_occurs.query ().filter (Q.event.date.start > date)
>>> for e in show (q) : print (e) ### filter 1
((('event-1-text', ), ('2010-04-01', ), (), ''), '2010-04-01', ())
>>> q = EVT.Event_occurs.query ().filter (Q.event.date.start >= date)
>>> for e in show (q) : print (e) ### filter 2
((('event-1-text', ), ('2010-04-01', ), (), ''), '2010-04-01', ())
((('event-2-text', ), ('2010-03-01', ), (), ''), '2010-03-01', ())
>>> q = EVT.Event_occurs.query ().filter (event = e1)
>>> for e in show (q) : print (e) ### filter 3
((('event-1-text', ), ('2010-04-01', ), (), ''), '2010-04-01', ())
>>> q = EVT.Event.query ().filter (Q.date.alive)
>>> for e in show (q) : print (e) ### filter 4
(('event-1-text', ), ('2010-04-01', ), (), '')
(('event-2-text', ), ('2010-03-01', ), (), '')
(('event-3-text', ), ('2010-02-01', ), (), '')
(('event-4-text', ), ('2010-01-01', ), (), '')
"""
_link2_link1 = r"""
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> PAP = scope.PAP
>>> SRM = scope.SRM
>>> bc = SRM.Boat_Class ("Optimist", max_crew = 1)
>>> b = SRM.Boat.instance_or_new ('Optimist', "1107", "AUT", raw = True)
>>> p = PAP.Person.instance_or_new ("Tanzer", "Christian")
>>> s = SRM.Sailor.instance_or_new (p.epk_raw, nation = "AUT", mna_number = "29676", raw = True) ### 1
>>> rev = SRM.Regatta_Event (u"Himmelfahrt", ("20080501", ), raw = True)
>>> reg = SRM.Regatta_C (rev.epk_raw, boat_class = bc.epk_raw, raw = True)
>>> bir = SRM.Boat_in_Regatta (b.epk_raw, reg.epk_raw, skipper = s.epk_raw, raw = True)
>>> rev = SRM.Regatta_Event (u"Himmelfahrt", ("20090521", ), raw = True)
>>> reg = SRM.Regatta_C (rev.epk_raw, boat_class = bc.epk_raw, raw = True)
>>> bir = SRM.Boat_in_Regatta (b.epk_raw, reg.epk_raw, skipper = s.epk_raw, raw = True)
>>> rev = SRM.Regatta_Event (u"Himmelfahrt", ("20100513", ), raw = True)
>>> reg = SRM.Regatta_C (rev.epk_raw, boat_class = bc.epk_raw, raw = True)
>>> bir = SRM.Boat_in_Regatta (b.epk_raw, reg.epk_raw, skipper = s.epk_raw, raw = True)
>>> date = datetime.date (2009, 1, 1)
>>> q = scope.SRM.Boat_in_Regatta.query ().order_by (Q.pid)
>>> for r in show (q.filter (Q.right.left.date.start > date)) : print (r) ### SRM.Boat_in_Regatta
((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2009-05-21', '2009-05-21')), ('optimist', )))
((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2010-05-13', '2010-05-13')), ('optimist', )))
>>> q = scope.SRM.Boat_in_Regatta.query ()
>>> for r in q.filter (Q.right.left.date.start < date) : print (r)
((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', )))
>>> date2 = datetime.date (2009, 12, 31)
>>> qf = (Q.right.left.date.start >= date ) \
... & (Q.right.left.date.start <= date2)
>>> for r in q.filter (qf) : print (r)
((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2009-05-21', '2009-05-21')), ('optimist', )))
>>> date3 = datetime.date (2010, 5, 13)
>>> for r in q.filter (Q.right.left.date.start == date3) : print (r)
((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2010-05-13', '2010-05-13')), ('optimist', )))
>>> for r in q.filter (Q.RAW.right.left.date.start == "2010-05-13") : print (r)
((('optimist', ), 1107, 'AUT', ''), (('himmelfahrt', ('2010-05-13', '2010-05-13')), ('optimist', )))
"""
_query_attr = r"""
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> PAP = scope.PAP
>>> SRM = scope.SRM
>>> bc = SRM.Boat_Class ("Optimist", max_crew = 1)
>>> b = SRM.Boat.instance_or_new ('Optimist', "1107", "AUT", raw = True)
>>> p = PAP.Person.instance_or_new ("Tanzer", "Christian")
>>> s = SRM.Sailor.instance_or_new (p.epk_raw, nation = "AUT", mna_number = "29676", raw = True) ### 1
>>> rev = SRM.Regatta_Event (u"Himmelfahrt", ("20080501", ), raw = True)
>>> reg = SRM.Regatta_C (rev.epk_raw, boat_class = bc.epk_raw, raw = True)
>>> bir = SRM.Boat_in_Regatta (b.epk_raw, reg.epk_raw, skipper = s.epk_raw, raw = True)
>>> rev = SRM.Regatta_Event (u"Himmelfahrt", ("20090521", ), raw = True)
>>> reg = SRM.Regatta_C (rev.epk_raw, boat_class = bc.epk_raw, raw = True)
>>> bir = SRM.Boat_in_Regatta (b.epk_raw, reg.epk_raw, skipper = s.epk_raw, raw = True)
>>> rev = SRM.Regatta_Event (u"Himmelfahrt", ("20100513", ), raw = True)
>>> reg = SRM.Regatta_C (rev.epk_raw, boat_class = bc.epk_raw, raw = True)
>>> bir = SRM.Boat_in_Regatta (b.epk_raw, reg.epk_raw, skipper = s.epk_raw, raw = True)
>>> q = SRM.Regatta_C.query ().order_by (Q.pid)
>>> for r in q : print (r.year, r)
2008 (('himmelfahrt', ('2008-05-01', '2008-05-01')), ('optimist', ))
2009 (('himmelfahrt', ('2009-05-21', '2009-05-21')), ('optimist', ))
2010 (('himmelfahrt', ('2010-05-13', '2010-05-13')), ('optimist', ))
>>> for r in q.filter (Q.event.date.start.D.YEAR (2010)) : print (r.year, r)
2010 (('himmelfahrt', ('2010-05-13', '2010-05-13')), ('optimist', ))
>>> for r in q.filter (Q.event.date.start.D.YEAR (2009)) : print (r.year, r)
2009 (('himmelfahrt', ('2009-05-21', '2009-05-21')), ('optimist', ))
>>> for r in q.filter (Q.event.date.start.year == 2010) : print (r.year, r)
2010 (('himmelfahrt', ('2010-05-13', '2010-05-13')), ('optimist', ))
>>> for r in q.filter (Q.event.date.start >= "2010-01-01", Q.event.date.start <= "2010-12-31") : print (r.year, r)
2010 (('himmelfahrt', ('2010-05-13', '2010-05-13')), ('optimist', ))
>>> PAP.Person.query (Q.last_name == "tanzer").all ()
[PAP.Person ('tanzer', 'christian', '', '')]
>>> PAP.Person.query (Q.last_name == "Tanzer").all ()
[]
>>> PAP.Person.query (Q.RAW.last_name == "Tanzer").all ()
[PAP.Person ('tanzer', 'christian', '', '')]
"""
_date_queries = """
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> p = scope.PAP.Person ("LN 1", "FN 1", lifetime = ("2010-01-01", ))
>>> p = scope.PAP.Person ("LN 2", "FN 2", lifetime = ("2010-01-03", ))
>>> p = scope.PAP.Person ("LN 3", "FN 3", lifetime = ("2010-02-01", ))
>>> p = scope.PAP.Person ("LN 4", "FN 4", lifetime = ("2011-01-03", ))
>>> scope.commit ()
>>> print (scope.PAP.Person.query_s (Q.lifetime.start.year == 2010).all ())
[PAP.Person ('ln 1', 'fn 1', '', ''), PAP.Person ('ln 2', 'fn 2', '', ''), PAP.Person ('ln 3', 'fn 3', '', '')]
>>> print (scope.PAP.Person.query_s (Q.lifetime.start.year <= 2010).all ())
[PAP.Person ('ln 1', 'fn 1', '', ''), PAP.Person ('ln 2', 'fn 2', '', ''), PAP.Person ('ln 3', 'fn 3', '', '')]
>>> print (scope.PAP.Person.query_s (Q.lifetime.start.year >= 2010).all ())
[PAP.Person ('ln 1', 'fn 1', '', ''), PAP.Person ('ln 2', 'fn 2', '', ''), PAP.Person ('ln 3', 'fn 3', '', ''), PAP.Person ('ln 4', 'fn 4', '', '')]
>>> print (scope.PAP.Person.query_s (Q.lifetime.start.year > 2010).all ())
[PAP.Person ('ln 4', 'fn 4', '', '')]
"""
_sub_query = """
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> p = scope.PAP.Person ("LN 1", "FN 1", lifetime = ("2010-01-01", ))
>>> p = scope.PAP.Person ("LN 1", "FN 2", lifetime = ("2010-01-03", ))
>>> p = scope.PAP.Person ("LN 2", "FN 3", lifetime = ("2010-02-01", ))
>>> p = scope.PAP.Person ("LN 2", "FN 4", lifetime = ("2011-01-03", ))
>>> scope.commit ()
>>> q1 = scope.PAP.Person.query (last_name = "ln 1").attr ("pid")
>>> q2 = scope.PAP.Person.query (last_name = "ln 2").attr ("pid")
>>> print (q1.order_by ("pid").all ())
[1, 2]
>>> print (q2.order_by ("pid").all ())
[3, 4]
>>> q = scope.PAP.Person.query_s (Q.pid.IN (q1))
>>> print (q.all ())
[PAP.Person ('ln 1', 'fn 1', '', ''), PAP.Person ('ln 1', 'fn 2', '', '')]
"""
_sub_query_sql = """
>>> from _GTW.__test__._SAW_test_functions import show_query
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> _ = scope.PAP.Person ("LN 1", "FN 1", lifetime = ("2010-01-01", ))
>>> _ = scope.PAP.Person ("LN 1", "FN 2", lifetime = ("2010-01-03", ))
>>> _ = scope.PAP.Person ("LN 2", "FN 3", lifetime = ("2010-02-01", ))
>>> _ = scope.PAP.Person ("LN 2", "FN 4", lifetime = ("2011-01-03", ))
>>> scope.commit ()
>>> q1 = scope.PAP.Person.query (last_name = "ln 1").attr ("pid")
>>> qe = scope.PAP.Person.query (Q.pid.IN ([]))
>>> qs = scope.PAP.Person.query (Q.pid.IN (q1))
>>> show_query (qe)
SQL: SELECT
mom_id_entity.electric AS mom_id_entity_electric,
mom_id_entity.last_cid AS mom_id_entity_last_cid,
mom_id_entity.pid AS mom_id_entity_pid,
mom_id_entity.type_name AS mom_id_entity_type_name,
mom_id_entity.x_locked AS mom_id_entity_x_locked,
pap_person.__raw_first_name AS pap_person___raw_first_name,
pap_person.__raw_last_name AS pap_person___raw_last_name,
pap_person.__raw_middle_name AS pap_person___raw_middle_name,
pap_person.__raw_title AS pap_person___raw_title,
pap_person.first_name AS pap_person_first_name,
pap_person.last_name AS pap_person_last_name,
pap_person.lifetime__finish AS pap_person_lifetime__finish,
pap_person.lifetime__start AS pap_person_lifetime__start,
pap_person.middle_name AS pap_person_middle_name,
pap_person.pid AS pap_person_pid,
pap_person.sex AS pap_person_sex,
pap_person.title AS pap_person_title
FROM mom_id_entity
JOIN pap_person ON mom_id_entity.pid = pap_person.pid
WHERE false
>>> show_query (qs)
SQL: SELECT
mom_id_entity.electric AS mom_id_entity_electric,
mom_id_entity.last_cid AS mom_id_entity_last_cid,
mom_id_entity.pid AS mom_id_entity_pid,
mom_id_entity.type_name AS mom_id_entity_type_name,
mom_id_entity.x_locked AS mom_id_entity_x_locked,
pap_person.__raw_first_name AS pap_person___raw_first_name,
pap_person.__raw_last_name AS pap_person___raw_last_name,
pap_person.__raw_middle_name AS pap_person___raw_middle_name,
pap_person.__raw_title AS pap_person___raw_title,
pap_person.first_name AS pap_person_first_name,
pap_person.last_name AS pap_person_last_name,
pap_person.lifetime__finish AS pap_person_lifetime__finish,
pap_person.lifetime__start AS pap_person_lifetime__start,
pap_person.middle_name AS pap_person_middle_name,
pap_person.pid AS pap_person_pid,
pap_person.sex AS pap_person_sex,
pap_person.title AS pap_person_title
FROM mom_id_entity
JOIN pap_person ON mom_id_entity.pid = pap_person.pid
WHERE mom_id_entity.pid IN (SELECT DISTINCT mom_id_entity.pid AS mom_id_entity_pid
FROM mom_id_entity
JOIN pap_person ON mom_id_entity.pid = pap_person.pid
WHERE pap_person.last_name = :last_name_1)
Parameters:
last_name_1 : 'ln 1'
"""
_type_name_query = r"""
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> p1 = scope.SWP.Page (perma_name = "page-1", text = "page-1")
>>> p2 = scope.SWP.Page (perma_name = "page-2", text = "page-2")
>>> y1 = scope.SWP.Page_Y (perma_name = "year-1", text = "year-1", year = 2011)
>>> y2 = scope.SWP.Page_Y (perma_name = "year-2", text = "year-2", year = 2012)
>>> c1 = scope.SWP.Clip_O (left = p1, abstract = "abstract-p1.1")
>>> c2 = scope.SWP.Clip_O (left = p2, abstract = "abstract-p2.1")
>>> c3 = scope.SWP.Clip_O (left = y1, abstract = "abstract-y1.1")
>>> c4 = scope.SWP.Clip_O (left = y2, abstract = "abstract-y2.1")
>>> scope.commit ()
>>> scope.SWP.Clip_O.query (Q.left.type_name == "SWP.Page").all ()
[SWP.Clip_O (('page-1', ), ()), SWP.Clip_O (('page-2', ), ())]
>>> scope.SWP.Clip_O.query (Q.left.type_name == "SWP.Page_Y").all ()
[SWP.Clip_O (('year-1', 2011), ()), SWP.Clip_O (('year-2', 2012), ())]
"""
from _GTW.__test__.model import *
from _MOM.import_MOM import Q
import datetime
__test__ = Scaffold.create_test_dict \
( dict
( composite = _composite
, date_queries = _date_queries
, link1_role = _link1_role
, link2_link1 = _link2_link1
, query_attr = _query_attr
, sub_query = _sub_query
, type_name = _type_name_query
)
)
__test__.update \
( Scaffold.create_test_dict
( dict
( sub_query_swl = _sub_query_sql
)
, ignore = ("HPS", )
)
)
### __END__ GTW.__test__.Query_Filter | PypiClean |
/Grammaticomastix-0.0.1rc2-py3-none-any.whl/grammaticomastix/dchars/languages/bod/transliterations/bodsan/bodsan_symbols.py | from dchars.utilities.dicttools import invertdict
#
# * CAVEAT ! If you modify these dictionaries, don't forget to modify their
# corresponding symbols' dictionaries in symbols.py !
#
# * CAVEAT ! No duplicate value allowed in these dictionaries !
#
CONSONANTS = {
'K' : chr(0x0915),
'KH' : chr(0x0916),
'G' : chr(0x0917),
'GH' : chr(0x0918),
'NG' : chr(0x0919),
# 'C' : ???
# 'CH' : ???
# 'J' : ???
# 'JH' : ???
'NY' : chr(0x091E),
'TT' : chr(0x091F),
'TTH' : chr(0x0920),
'DD' : chr(0x0921),
'DDH' : chr(0x0922),
'NN' : chr(0x0923),
'T' : chr(0x0924),
'TH' : chr(0x0925),
'D' : chr(0x0926),
'DH' : chr(0x0927),
'N' : chr(0x0928),
'P' : chr(0x092A),
'PH' : chr(0x092B),
'B' : chr(0x092C),
'BH' : chr(0x092D),
'M' : chr(0x092E),
'TS' : chr(0x091A),
'TSH' : chr(0x091B),
'DZ' : chr(0x091C),
'DZH' : chr(0x091D),
'W' : chr(0x0935),
# 'ZH' : 'zh',
# 'Z' : 'z',
# '-' : "'",
'Y' : chr(0x092F),
'R' : chr(0x0930),
'L' : chr(0x0932),
'SH' : chr(0x0936),
'SS' : chr(0x0937),
'S' : chr(0x0938),
'H' : chr(0x0939),
# 'KSS' : (chr(0x0F69),),
# 'FIXED-FORM R' : (chr(0x0F6A),),
# 'KK' : (chr(0x0F6B),),
# 'RR' : (chr(0x0F6C),),
# pseudo-consonant :
'A' : 'FAKE_CONSONANT_A',
# # Tibetan transliteration of Chinese sound 'F' :
# 'TIB. TRANS. OF CHIN. SOUND F' : (chr(0x0F55)+chr(0x0F39),),
# 'TIB. TRANS. OF CHIN. SOUND V' : (chr(0x0F56)+chr(0x0F39),),
}
# the name of the vowels (the keys of this dictionary) must be consistent
# with symbols.py::SYMB_(IN)DEPENDENT_VOWELS
DEPENDENT_VOWELS = {
'A' : 'FAKE_A',
'AA' : chr(0x093E),
'I' : chr(0x093F),
'II' : chr(0x0940),
'U' : chr(0x0941),
'UU' : chr(0x0942),
'VOCALIC R' : chr(0x0943),
'VOCALIC RR' : chr(0x0944),
'VOCALIC L' : chr(0x0962),
'VOCALIC LL' : chr(0x0963),
'E' : chr(0x0947),
'AI' : chr(0x0948),
'O' : chr(0x094B),
'AU' : chr(0x094C),
# 'REVERSED I' : "-i",
# 'REVERSED II' : "-I",
}
INDEPENDENT_VOWELS = {
'A' : chr(0x0905),
'AA' : chr(0x0906),
'I' : chr(0x0907),
'II' : chr(0x0908),
'U' : chr(0x0909),
'UU' : chr(0x090A),
'VOCALIC R' : chr(0x090B),
'VOCALIC RR' : chr(0x0960),
'VOCALIC L' : chr(0x090C),
'VOCALIC LL' : chr(0x0961),
'E' : chr(0x090F),
'AI' : chr(0x0910),
'O' : chr(0x0913),
'AU' : chr(0x0914),
# 'REVERSED I' : "-i",
# 'REVERSED II' : "-I",
}
OTHER_SYMBOLS = {
'SYLLABLE OM' : chr(0x0950),
'DIGIT ZERO' : chr(0x0966),
'DIGIT ONE' : chr(0x0967),
'DIGIT TWO' : chr(0x0968),
'DIGIT THREE' : chr(0x0969),
'DIGIT FOUR' : chr(0x096A),
'DIGIT FIVE' : chr(0x096B),
'DIGIT SIX' : chr(0x096C),
'DIGIT SEVEN' : chr(0x096D),
'DIGIT HEIGHT' : chr(0x096E),
'DIGIT NINE' : chr(0x096F),
# 'DIGIT HALF ZERO' : "\\u0F2A",
# 'DIGIT HALF ONE' : "\\u0F2B",
# 'DIGIT HALF TWO' : "\\u0F2C",
# 'DIGIT HALF THREE' : "\\u0F2D",
# 'DIGIT HALF FOUR' : "\\u0F2F",
# 'DIGIT HALF FIVE' : "\\u0F30",
# 'DIGIT HALF SIX' : "\\u0F31",
# 'DIGIT HALF SEVEN' : "\\u0F32",
# 'DIGIT HALF HEIGHT' : "\\u0F33",
# 'DIGIT HALF NINE' : "\\u0F34",
# = Sanskrit avagraha (अवग्रह) = ऽ
'MARK PALUTA' : "ऽ",
}
PUNCTUATION = {
'MARK INTERSYLLABIC TSHEG' : " ",
'MARK SHAD' : chr(0x0964), # = Sanskrit danda
}
DIACRITICS = {
'SIGN RNAM BCAD' : chr(0x0903),
'MARK HALANTA' : chr(0x094D),
'SIGN RJES SU NGA RO' : chr(0x0902),
# 'SIGN NYI ZLA NAA DA' : '???',
'SIGN SNA LDAN' : chr(0x0901),
}
CONSONANTS_INVERSED = invertdict(CONSONANTS)
DEPENDENT_VOWELS_INVERSED = invertdict(DEPENDENT_VOWELS)
INDEPENDENT_VOWELS_INVERSED = invertdict(INDEPENDENT_VOWELS)
OTHER_SYMBOLS_INVERSED = invertdict(OTHER_SYMBOLS)
PUNCTUATION_INVERSED = invertdict(PUNCTUATION)
DIACRITICS_INVERSED = invertdict(DIACRITICS) | PypiClean |
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/cmorizers/data/downloaders/datasets/cds_satellite_soil_moisture.py |
import calendar
import datetime
from dateutil import relativedelta
from esmvaltool.cmorizers.data.downloaders.cds import CDSDownloader
from esmvaltool.cmorizers.data.utilities import unpack_files_in_folder
def download_dataset(config, dataset, dataset_info, start_date, end_date,
overwrite):
"""Download dataset.
Parameters
----------
config : dict
ESMValTool's user configuration
dataset : str
Name of the dataset
dataset_info : dict
Dataset information from the datasets.yml file
start_date : datetime
Start of the interval to download
end_date : datetime
End of the interval to download
overwrite : bool
Overwrite already downloaded files
"""
if not start_date:
start_date = datetime.datetime(1991, 9, 1)
if not end_date:
end_date = datetime.datetime(2020, 6, 30)
loop_date = start_date
downloader = CDSDownloader(
product_name='satellite-soil-moisture',
request_dictionary={
'format': 'tgz',
'variable': 'volumetric_surface_soil_moisture',
'type_of_sensor': 'combined_passive_and_active',
'type_of_record': 'cdr',
'version': 'v201912.0.0',
'time_aggregation': 'month_average',
'day': ['01']
},
config=config,
dataset=dataset,
dataset_info=dataset_info,
overwrite=overwrite,
)
monthly_downloaders = {}
daily_downloaders = {}
for sensor in ['combined_passive_and_active', 'passive', 'active']:
monthly_downloaders[sensor] = get_downloader(config, dataset,
dataset_info, overwrite,
sensor, 'month')
daily_downloaders[sensor] = get_downloader(config, dataset,
dataset_info, overwrite,
sensor, 'day')
while loop_date <= end_date:
for sensor, downloader in monthly_downloaders.items():
pattern = f'cds-satellite-soil-moisture_cdr_{sensor}_monthly'
downloader.download(loop_date.year,
loop_date.month,
file_pattern=pattern)
loop_date += relativedelta.relativedelta(months=1)
loop_date = start_date
while loop_date <= end_date:
for sensor, downloader in daily_downloaders.items():
downloader.download(
loop_date.year, loop_date.month, [
f'{i+1:02d}' for i in range(
calendar.monthrange(loop_date.year, loop_date.month)
[1])
], f'cds-satellite-soil-moisture_cdr_{sensor}_daily')
loop_date += relativedelta.relativedelta(months=1)
unpack_files_in_folder(downloader.local_folder)
def get_downloader(config, dataset, dataset_info, overwrite, sensor,
frequency):
"""Create download request.
Parameters
----------
config : dict
ESMValTool's user configuration
dataset : str
Name of the dataset
dataset_info : dict
Dataset information from the datasets.yml file
overwrite : bool
Overwrite already downloaded files
sensor : str
Type of sensor
frequency : str
Time aggregation
"""
if sensor == 'active':
variable = 'surface_soil_moisture'
else:
variable = 'volumetric_surface_soil_moisture'
downloader = CDSDownloader(
product_name='satellite-soil-moisture',
request_dictionary={
'format': 'tgz',
'variable': variable,
'type_of_sensor': sensor,
'day': '01',
'type_of_record': 'cdr',
'version': 'v201912.0.0',
'time_aggregation': f'{frequency}_average',
},
config=config,
dataset=dataset,
dataset_info=dataset_info,
overwrite=overwrite,
)
return downloader | PypiClean |
/BinTut-0.3.3.tar.gz/BinTut-0.3.3/bintut/courses/init.py | from __future__ import division, absolute_import, print_function
from logging import (
getLogger, Formatter, DEBUG, INFO, WARNING, ERROR, CRITICAL)
from colorama import Fore, Back, Style
class LoggingMixIn(object):
@property
def logger(self):
return getLogger(self.__class__.__name__)
# TODO: Write a wrapper.
# TODO: Use other libraries.
def green(text, **kwargs):
return color(text, Fore.GREEN, **kwargs)
def yellow(text, **kwargs):
return color(text, Fore.YELLOW, **kwargs)
def red(text, **kwargs):
return color(text, Fore.RED, **kwargs)
def cyan(text, **kwargs):
return color(text, Fore.CYAN, **kwargs)
def blue(text, **kwargs):
return color(text, Fore.BLUE, **kwargs)
def color(text, fore='', back='', res=True):
prefix = fore + Style.BRIGHT if fore else ''
prefix += getattr(Back, back.upper()) if back else ''
suffix = Style.RESET_ALL if res else ''
return prefix + text + suffix
class LevelFormatter(Formatter):
"""Logging formatter."""
critical_formatter = Formatter(red('critical: %(message)s'))
error_formatter = Formatter(
red('error: ') + blue('%(name)s.%(funcName)s: ') + \
red('%(message)s'))
warning_formatter = Formatter(yellow('warning: %(message)s'))
info_formatter = Formatter(cyan('%(message)s'))
debug_formatter = Formatter(
green('debug: ') + blue('%(name)s.%(funcName)s: ') +
green('%(message)s'))
def __init__(self):
Formatter.__init__(self)
def format(self, record):
"""Format the record using the corresponding formatter."""
if record.levelno == DEBUG:
return self.debug_formatter.format(record)
if record.levelno == INFO:
return self.info_formatter.format(record)
if record.levelno == ERROR:
return self.error_formatter.format(record)
if record.levelno == WARNING:
return self.warning_formatter.format(record)
if record.levelno == CRITICAL:
return self.critical_formatter.format(record) | PypiClean |
/Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/testing/core/user_service.py | from acolyte.testing import EasemobFlowTestCase
from acolyte.core.storage.user import UserDAO
class UserServiceTestCase(EasemobFlowTestCase):
def setUp(self):
self._user_service = self._("UserService")
self._new_user_id_collector = [] # 用于收集测试所产生的新用户ID,在teardown中集中处理
def testLogin(self):
"""测试登录操作
"""
# 正常登录
rs = self._user_service.login("chihz@easemob.com", "123456")
self.assertResultSuccess(rs)
self.assertEqual(rs.data["id"], 1)
# 账号密码不匹配
rs = self._user_service.login("chihz@easemob.com", "654321")
self.assertResultBadRequest(rs, "no_match")
def testAddUser(self):
"""测试添加用户操作的各种情况
"""
# 正常添加
rs = self._user_service.add_user(
email="chihongze@gmail.com",
password="123456",
name="SamChi",
role=1,
github_account="chihongze",
operator=1
)
self.assertResultSuccess(rs)
self.assertTrue(rs.data.id > 0)
self._new_user_id_collector.append(rs.data.id)
# 邮件不符合规则
rs = self._user_service.add_user(
email="hhhhh",
password="123456",
name="SamChi",
role=1,
github_account="chihongze",
operator=1
)
self.assertResultBadRequest(rs, "email_invalid_format")
# 重复注册
rs = self._user_service.add_user(
email="chihongze@gmail.com",
password="654321",
name="Jackson",
role=1,
github_account="chihongze",
operator=1
)
self.assertResultBadRequest(rs, "email_exist")
# 指定一个不存在的角色
rs = self._user_service.add_user(
email="xiaoze@gmail.com",
password="789101",
name="Jackson",
role=10000,
github_account="chihongze",
operator=1
)
self.assertResultBadRequest(rs, "role_not_found")
# 指定一个不存在的operator
rs = self._user_service.add_user(
email="xiaoze@gmail.com",
password="789101",
name="Jackson",
role=1,
github_account="chihongze",
operator=10000
)
self.assertResultBadRequest(rs, "operator_not_found")
def testCheckToken(self):
"""测试token检查操作
"""
rs = self._user_service.login(
"chihz@easemob.com", "123456")
# 正常的token检测
token = rs.data["token"]
rs = self._user_service.check_token(token)
self.assertResultSuccess(rs)
self.assertEqual(rs.data["id"], 1)
self.assertEqual(rs.data["session_data"]["name"], "Sam")
# 错误token检测
rs = self._user_service.check_token("你们啊!naive!")
self.assertResultBadRequest(rs, "invalid_token")
def testLogout(self):
"""测试退出接口
"""
rs = self._user_service.login(
"chihz@easemob.com", "123456")
self.assertResultSuccess(rs)
token = rs.data["token"]
rs = self._user_service.check_token(token)
self.assertResultSuccess(rs)
rs = self._user_service.logout(token)
self.assertResultSuccess(rs)
rs = self._user_service.check_token(token)
self.assertResultBadRequest(rs, "invalid_token")
def testModifyPassword(self):
"""测试修改密码
"""
rs = self._user_service.modify_password(1, "123456", "654321")
self.assertResultSuccess(rs)
rs = self._user_service.modify_password(1, "123456", "654321")
self.assertResultBadRequest(rs, "old_password_incorrect")
self._user_service.modify_password(1, "654321", "123456")
def tearDown(self):
user_dao = UserDAO(self._("db"))
if self._new_user_id_collector:
user_dao.delete_by_id(self._new_user_id_collector) | PypiClean |
/Flask-State-1.1.4.tar.gz/Flask-State-1.1.4/src/flask_state/utils/logger.py | import copy
import logging
from logging import config
from flask import app
from ..utils.constants import AnsiColor, LogLevels
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, GREY = range(9)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[%dm"
BOLD_SEQ = "\033[1m"
COLOR_MAP = {
logging.DEBUG: GREY,
logging.INFO: WHITE,
logging.WARNING: YELLOW,
logging.CRITICAL: RED,
logging.ERROR: RED,
}
FLASK_STATE = "flaskstate"
class ColorizeFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None, style="%"):
super(ColorizeFormatter, self).__init__(fmt, datefmt, style)
def format(self, record: logging.LogRecord):
color = COLOR_MAP.get(record.levelno)
if not color:
color = BLUE
tmp = copy.copy(record)
name_color = self._wrap_color(f"{tmp.name}", CYAN)
funcName_color = self._wrap_color(f"{tmp.funcName}", CYAN)
level_color = self._wrap_color(tmp.levelname, color)
message_color = self._wrap_color(f"{tmp.msg}", GREEN)
tmp.name = name_color
tmp.funcName = funcName_color
tmp.msg = message_color
tmp.levelname = level_color
return logging.Formatter.format(self, tmp)
@staticmethod
def _wrap_color(string: str, color):
return COLOR_SEQ % (30 + color) + string + RESET_SEQ
def _has_config(logger):
return (
logger.level != logging.NOTSET
or logger.handlers
or logger.filters
or not logger.propagate
)
class LoggerAllocator:
def __init__(self):
self._logger = logging.getLogger(FLASK_STATE)
@property
def logger(self):
return self._logger
@logger.setter
def logger(self, out_logger: logging.Logger = None):
if out_logger:
self._logger = out_logger
elif not _has_config(self._logger):
default_handler = logging.StreamHandler()
default_handler.setFormatter(
ColorizeFormatter(
fmt="%(asctime)s | %(levelname)-8s | %(name)s:%(funcName)s:%(lineno)d | %(message)s"
)
)
self._logger.setLevel(logging.INFO)
self._logger.addHandler(default_handler)
flask_logger = LoggerAllocator() | PypiClean |
/DocOnce-1.5.15-py3-none-any.whl/doconce/rst.py | from __future__ import absolute_import
from builtins import zip
from builtins import str
from builtins import range
from past.builtins import basestring
import os, sys
import regex as re
import shlex
from .common import insert_code_blocks, insert_tex_blocks, \
indent_lines, table_analysis, plain_exercise, bibliography, \
cite_with_multiple_args2multiple_cites, fix_ref_section_chapter
from .html import html_movie, html_quiz
from doconce import globals
from .misc import _doconce_header, _doconce_command, option, errwarn, debugpr, _abort
def rst_abstract(m):
# r'\n*\g<type>.* \g<text>\n\g<rest>'
name = m.group('type').strip()
text = m.group('text').strip()
rest = m.group('rest').strip()
if option('rst_uio'):
s = """
.. uio-introduction::
%s
.. contents::
.. section-numbering::
%s
""" % (indent_lines(text, 'rst'), rest)
return s
else:
if name.lower() == 'preface':
# Drop heading (short abstract for books)
return '\n%(text)s\n\n%(rest)s' % vars()
else:
return '\n*%(name)s.* %(text)s\n\n%(rest)s' % vars()
# replacement patterns for substitutions of inline tags
def rst_figure(m):
"""Format figures for the rst format
Return rst code to embed a figure in rst output. The syntax is
`FIGURE:[filename[, options][, sidecap=BOOL][, frac=NUM]] [caption]`.
Keywords: `sidecap` (default is False), `frac` (default is ),
:param _regex.Match m: regex match object
:return: rst code
:rtype: str
"""
filename = m.group('filename').strip()
caption = m.group('caption').strip().strip('"').strip("'")
opts = m.group('options').strip()
info = dict()
result = ''
# Stubstitute DocOnce label by rst label in caption
# (also, remove final period in caption since caption is used as hyperlink
# text to figures).
m_label = re.search(r'label\{(.+?)\}', caption)
if m_label:
label = m_label.group(1)
result += '\n.. _%s:\n' % label
# remove . at the end of the caption text
parts = caption.split('label')
parts[0] = parts[0].rstrip()
if parts[0] and parts[0][-1] == '.':
parts[0] = parts[0][:-1]
# insert emphasize marks
parts[0] = '*' + parts[0].strip() + '*'
caption = ' label'.join(parts)
caption = re.sub(r'label\{(.+?)\}', '(\g<1>)', caption)
else:
if caption and caption[-1] == '.':
caption = caption[:-1]
link = filename if filename.startswith('http') else None
if not link and not os.path.isfile(filename):
raise IOError('no figure file %s' % filename)
result += '\n.. figure:: ' + filename + '\n' # utilize flexibility
if opts:
# opts: width=600 frac=0.5 align=center
# opts: width=600, frac=0.5, align=center
info = shlex.split(opts)
info = dict(s.strip(',').split('=') for s in info)
# String of options
fig_info = [' :%s: %s' % (opt, val.replace(',', ''))
for opt, val in info.items()
if opt not in ['frac', 'sidecap']]
result += '\n'.join(fig_info)
# remove final period in caption since caption is used as hyperlink
# text to figures
if caption and caption[-1] == '.':
caption = caption[:-1]
if caption:
result += '\n\n ' + caption + '\n'
else:
result += '\n\n'
return result
def rst_movie(m):
html_text = html_movie(m)
html_text = indent_lines(html_text, 'sphinx')
rst_text = '.. raw:: html\n' + html_text + '\n'
filename = m.group('filename')
if not filename.startswith('http') and not filename.startswith('mov'):
errwarn('*** warning: movie file %s' % filename)
errwarn(' is not in mov* subdirectory - this will give problems with sphinx')
return rst_text
# these global patterns are used in st, epytext, plaintext as well:
bc_regex_pattern = r'''([a-zA-Z0-9)'"`.*_\[\]{}#@=-^~+-])[\n:.?!, ]\s*?^!bc.*?$'''
bt_regex_pattern = r'''([a-zA-Z0-9)'"`.*_}=-^~])[\n:.?!, ]\s*?^!bt.*?$'''
def rst_code(filestr, code_blocks, code_block_types,
tex_blocks, format):
# In rst syntax, code blocks are typeset with :: (verbatim)
# followed by intended blocks. This function indents everything
# inside code (or TeX) blocks.
for i in range(len(code_blocks)):
code_blocks[i] = indent_lines(code_blocks[i], format)
for i in range(len(tex_blocks)):
tex_blocks[i] = indent_lines(tex_blocks[i], format)
# Fix labels
if option('rst_mathjax'):
for i in range(len(tex_blocks)):
tex_blocks[i] = tex_blocks[i].replace(' label{', ' \\label{')
filestr = insert_code_blocks(filestr, code_blocks, format, complete_doc=True, remove_hid=True)
filestr = insert_tex_blocks(filestr, tex_blocks, format, complete_doc=True)
# substitute !bc and !ec appropriately:
# the line before the !bc block must end in [a-zA-z0-9)"...]
# followed by [\n:.?!,] see the bc_regex_pattern global variable above
# (problems with substituting !bc and !bt may be caused by
# missing characters in these two families)
filestr = re.sub(bc_regex_pattern, r'\g<1>::\n\n', filestr, flags=re.MULTILINE|re.DOTALL)
# Need a fix for :: appended to special comment lines (---:: -> ---\nCode::)
filestr = re.sub(r' ---::\n\n', ' ---\nCode::\n\n', filestr)
filestr = re.sub(r'^!ec\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '', filestr, flags=re.MULTILINE)
#c = re.compile(r'([a-zA-Z0-9)"])[:.]?\s*?!bt\n', re.DOTALL)
#filestr = c.sub(r'\g<1>:\n\n', filestr)
#filestr = re.sub(r'^!bt\n', '.. latex-math::\n\n', filestr, re.MULTILINE)
#filestr = re.sub(r'^!bt\n', '.. latex::\n\n', filestr, re.MULTILINE)
if option('rst_mathjax') and (re.search(r'^!bt', filestr, flags=re.MULTILINE) or re.search(r'\\\( .+ \\\)', filestr)):
# First add MathJax script in the very beginning of the file
from .html import mathjax_header
latex = indent_lines(mathjax_header(filestr).lstrip(), 'rst')
filestr = '\n.. raw:: html\n\n' + latex + '\n\n' + filestr
# Replace all the !bt parts by raw html directive (make sure
# the coming block is sufficiently indented, we used 8 chars above)[[[
filestr = re.sub(bt_regex_pattern, r'\g<1>\n\n.. raw:: html\n\n $$', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'^!et *\n', ' $$\n\n', filestr, flags=re.MULTILINE)
# Remove inner \[..\] from equations $$ \[ ... \] $$
filestr = re.sub(r'\$\$\s*\\\[', '$$', filestr)
filestr = re.sub(r'\\\]\s*\$\$', '$$', filestr)
# Equation references (ref{...}) must be \eqref{...} in MathJax
# (note: this affects also (ref{...}) syntax in verbatim blocks...)
filestr = re.sub(r'\(ref\{(.+?)\}\)', r'\eqref{\g<1>}', filestr)
else:
# just use the same substitution for tex blocks as for code blocks:
filestr = re.sub(bt_regex_pattern, r'\g<1>::\n', filestr,
flags=re.MULTILINE)
#filestr = re.sub(r'^!et *\n', '\n\n', filestr, flags=re.MULTILINE)
filestr = re.sub(r'^!et *\n', '\n', filestr, flags=re.MULTILINE)
# Fix: if there are !bc-!ec or other environments after each
# other without text in between, there is a difficulty with the
# :: symbol before the code block. In these cases, we get
# !ec::, !et::, !bbox:: etc. from the above substitutions.
# We just replace these by empty text.
filestr = re.sub(r'^(!(b|e)[a-z]+)::', r'\g<1>', filestr,
flags=re.MULTILINE)
# Check
for pattern in '^!bt', '^!et':
c = re.compile(pattern, re.MULTILINE)
m = c.search(filestr)
if m:
errwarn("""
Still %s left after handling of code and tex blocks. Problem is probably
that %s is not preceded by text which can be extended with :: (required).
""" % (pattern, pattern))
_abort()
# Final fixes
filestr = fix_underlines_in_headings(filestr)
# Ensure blank line before and after comments
filestr = re.sub(r'([.:;?!])\n^\.\. ', r'\g<1>\n\n.. ',
filestr, flags=re.MULTILINE)
filestr = re.sub(r'(^\.\. .+)\n([^ \n]+)', r'\g<1>\n\n\g<2>',
filestr, flags=re.MULTILINE)
# Line breaks interfer with tables and needs a final blank line too
lines = filestr.splitlines()
inside_block = False
for i in range(len(lines)):
if lines[i].startswith('<linebreakpipe>') and not inside_block:
inside_block = True
lines[i] = lines[i].replace('<linebreakpipe> ', '') + '\n'
continue
if lines[i].startswith('<linebreakpipe>') and inside_block:
lines[i] = '|' + lines[i].replace('<linebreakpipe>', '')
continue
if inside_block and not lines[i].startswith('<linebreakpipe>'):
inside_block = False
lines[i] = '| ' + lines[i] + '\n'
filestr = '\n'.join(lines)
# Remove too much vertical space
filestr = re.sub(r'\n\n\n+', '\n\n', filestr)
return filestr
def fix_underlines_in_headings(filestr):
"""
Expansion of math, verbatim, etc. in headings might lead to
wrong number of characters in the line under headings.
"""
lines = filestr.splitlines()
for i in range(1, len(lines)-1):
section_markers = '===', '---', '~~~'
for section_marker in section_markers:
if lines[i+1].startswith(section_marker) and \
' ' not in lines[i+1] and lines[i].strip():
# (lines[i] must not be empty, because then ----- may
# be a horizontal rule)
if len(lines[i+1]) != len(lines[i]):
lines[i+1] = section_marker[0]*len(lines[i])
filestr = '\n'.join(lines)
return filestr
def rst_footnotes(filestr, format, pattern_def, pattern_footnote):
# We use autonumbered named labels such that the footnotes have numbers
# like [2], [3] etc. (just use hash before name in the syntax)
def subst_def(m):
text = indent_lines(m.group('text'), format, ' '*3)
name = m.group('name')
start = '.. [#%s] ' % name
return start + text.lstrip()
filestr = re.sub(pattern_def, subst_def, filestr,
flags=re.MULTILINE|re.DOTALL)
filestr = re.sub(pattern_footnote, ' [#\g<name>]_', filestr)
return filestr
def rst_table(table):
# Note: rst and sphinx do not offer alignment of cell
# entries, everything is always left-adjusted (Nov. 2011)
# Math in column headings may be significantly expanded and
# this must be done first
column_width = table_analysis(table['rows'])
ncolumns = len(column_width)
column_spec = table.get('columns_align', 'c'*ncolumns).replace('|', '')
heading_spec = table.get('headings_align', 'c'*ncolumns).replace('|', '')
a2py = {'r': 'rjust', 'l': 'ljust', 'c': 'center'}
s = '' # '\n'
for i, row in enumerate(table['rows']):
#s += ' ' # indentation of tables
if row == ['horizontal rule']:
for w in column_width:
s += '='*w + ' '
else:
# check if this is a headline between two horizontal rules:
if i == 1 and \
table['rows'][i-1] == ['horizontal rule'] and \
table['rows'][i+1] == ['horizontal rule']:
headline = True
else:
headline = False
for w, c, ha, ca in \
zip(column_width, row, heading_spec, column_spec):
if headline:
s += getattr(c, a2py[ha])(w) + ' '
else:
s += getattr(c, a2py[ca])(w) + ' '
s += '\n'
s += '\n'
return s
def rst_author(authors_and_institutions, auth2index,
inst2index, index2inst, auth2email):
if option('rst_uio'):
if authors_and_institutions:
# Use first author and email
responsible = authors_and_institutions[0][0]
email = authors_and_institutions[0][2]
text = """
.. uio-meta::
:responsible-name: %s
""" % responsible
if email:
text += ' :responsible-email: %s\n\n' % email
else:
errwarn('*** error: with --rst_uio there must be an AUTHOR:')
errwarn(' field with (at least) one author w/email who will be')
errwarn(' listed as the resposible under uio-meta::')
_abort()
else:
authors = []
for author, i, email in authors_and_institutions:
if email:
email = email.replace('@', ' at ')
authors.append(author + ' (%s)' % email)
else:
authors.append(author)
text = ':Authors: ' + ', '.join(authors) # (text is already r-stripped in typeset_authors)
# we skip institutions in rst
return text
def ref_and_label_commoncode(section_label2title, format, filestr):
filestr = fix_ref_section_chapter(filestr, format)
# Deal with the problem of identical titles, which makes problem
# with non-unique links in reST: add a counter to the title
debugtext = ''
section_pattern = r'^\s*(={3,9})(.+?)(={3,9})(\s*label\{(.+?)\})?'
all_sections = re.findall(section_pattern, filestr, flags=re.MULTILINE)
# First count the no of titles with the same wording
titles = {}
max_heading = 1 # track the top heading level for correct TITLE typesetting
for heading, title, dummy2, dummy3, label in all_sections:
entry = None if label == '' else label
if title in titles:
titles[title].append(entry)
else:
titles[title] = [entry]
max_heading = max(max_heading, len(heading))
# Typeset TITLE so that it gets the highest+1 (but no higher) section sevel
max_heading += 2 # one level up (2 =)
max_heading = min(max_heading, 9)
pattern = r'^TITLE:\s*(.+)$'
if format == 'sphinx':
# Title cannot be more than 63 chars...
m = re.search(pattern, filestr, flags=re.MULTILINE)
if m:
title = m.group(1).strip()
if len(title) > 63:
errwarn('*** error: sphinx title cannot be longer than 63 characters')
errwarn(' current title: "%s" (%d characters)' % (title, len(title)))
_abort()
filestr = re.sub(pattern, '.. Document title:\n\n%s \g<1> %s\n' %
('='*max_heading, '='*max_heading),
filestr, flags=re.MULTILINE)
# Make new titles
title_counter = {} # count repeated titles (need to append counter to make unique links)
sections = []
for heading, title, dummy2, dummy3, label in all_sections:
label = None if label == '' else label
if len(titles[title]) > 1:
if title in title_counter:
title_counter[title] += 1
else:
title_counter[title] = 1
# Add much whitespace so we can recognize the titles after
# formats are compiled and remove the number
new_title = title + ' (%d) ' % title_counter[title]
sections.append((heading, new_title, label, title))
if label in section_label2title:
section_label2title[label] = new_title
else:
sections.append((heading, title, label, title))
# Make replacements
for heading, title, label, old_title in sections:
if title != old_title:
debugtext += '\nchanged title: %s -> %s\n' % (old_title, title)
# Avoid trouble with \t, \n in replacement
title = title.replace('\\', '\\\\')
# The substitution depends on whether we have a label or not
if label is not None:
title_pattern = r'%s\s*%s\s*%s\s*label\{%s\}' % (heading, re.escape(old_title), heading, label)
# title may contain ? () etc., that's why we take re.escape
replacement = '.. _%s:\n\n' % label + r'%s %s %s' % \
(heading, title, heading)
else:
title_pattern = r'%s\s*%s\s*%s' % (heading, re.escape(old_title), heading)
replacement = r'%s %s %s' % (heading, title, heading)
filestr, n = re.subn(title_pattern, replacement, filestr, count=1)
if n > 1:
raise ValueError('Replaced more than one title. BUG!')
# remove label{...} from output
#filestr = re.sub(r'^label\{.+?\}\s*$', '', filestr, flags=re.MULTILINE)
cpattern = re.compile(r'^label\{[^}]+?\}\s*$', flags=re.MULTILINE)
filestr = cpattern.sub('', filestr)
filestr = re.sub(r'label\{[^}]+?\}', '', filestr) # all the remaining
debugpr(debugtext)
return filestr
def rst_ref_and_label(section_label2title, format, filestr):
filestr = ref_and_label_commoncode(section_label2title, format, filestr)
# replace all references to sections:
for label in section_label2title:
filestr = filestr.replace('ref{%s}' % label,
'`%s`_' % section_label2title[label])
from .common import ref2equations
filestr = ref2equations(filestr)
# replace remaining ref{x} as x_
filestr = re.sub(r'ref\{(.+?)\}', '`\g<1>`_', filestr)
return filestr
def rst_bib(filestr, citations, pubfile, pubdata, numbering=True):
"""
Replace doconce citations and bibliography with reST syntax.
If numbering is True, the keys used in the bibliography are
replaced by numbers (RefX). This will often look better.
"""
if not citations:
return filestr
filestr = cite_with_multiple_args2multiple_cites(filestr)
if numbering:
# Find max no of digits
n = len(str(max(citations.values())))
cite = '[Ref%%0%dd]' % n # cannot have blanks in ref label
for label in citations:
if numbering:
filestr = filestr.replace('cite{%s}' % label,
cite % citations[label] + '_')
else:
filestr = filestr.replace('cite{%s}' % label, '[%s]_' % label)
if pubfile is not None:
# Could use rst format, but we stick to the common doconce format
bibtext = bibliography(pubdata, citations, format='rst')
if numbering:
for label in citations:
try:
bibtext = bibtext.replace(
'[%s]' % label, cite % citations[label])
except UnicodeDecodeError as e:
if "can't decode byte" in str(e):
try:
bibtext = bibtext.replace('[%s]' % label,
cite % citations[label])
except UnicodeDecodeError as e:
errwarn('UnicodeDecodeError: ' + e)
errwarn('*** error: problems in %s' % pubfile)
errwarn(' with key ' + label)
errwarn(' tried to do decode("utf-8"), but it did not work')
else:
errwarn(e)
errwarn('*** error: problems in %s' % pubfile)
errwarn(' with key ' + label)
_abort()
filestr = re.sub(r'^BIBFILE:.+$', bibtext, filestr, flags=re.MULTILINE)
return filestr
def rst_index_bib(filestr, index, citations, pubfile, pubdata):
filestr = rst_bib(filestr, citations, pubfile, pubdata)
# reStructuredText does not have index/glossary
filestr = re.sub(r'idx\{.+?\}\n?', '', filestr)
return filestr
def rst_box(block, format, text_size='normal'):
return """
.. The below box could be typeset as .. admonition: Attention
but we have decided not to do so since the admon needs a title
(the box formatting is therefore just ignored)
%s
""" % block
# return rst_quote(block, format, text_size)
# return """
#.. addmonition:: some title
#%s
#""" % (indent_lines(block, format, ' '*4))
def rst_quote(block, format, text_size='normal'):
# Insert empty comment to distinguish from possibly
# previous list, code, etc.
return """
..
%s
""" % (indent_lines(block, format, ' '*4))
# Admon:
# reST has native admons, but only the warning applies color.
def rst_admon(block, format, title='Admonition', text_size='normal'):
if title == '' or title.lower() == 'none':
title = 'Notice' # dummy title: with title as '', nothing comes out
if title[-1] in ('!', ':', '?', ';', '.'):
# : is always added to the title - remove other punctuation
title = title[:-1]
return """
.. admonition:: %s
%s
""" % (title, indent_lines(block, format, ' '*3))
def rst_summary(block, format, title='Summary', text_size='normal'):
return rst_admon(block, format, title, text_size)
def rst_block(block, format, title='', text_size='normal'):
return rst_admon(block, format, title, text_size)
def rst_warning(block, format, title='Warning', text_size='normal'):
if title.startswith('Warning'):
# Use pre-defined admonition that coincides with our needs
return """
.. warning::
%s
""" % (indent_lines(block, format, ' '*4))
else:
return rst_admon(block, format, title, text_size)
def rst_question(block, format, title='Question', text_size='normal'):
return rst_admon(block, format, title, text_size)
def rst_notice(block, format, title='Notice', text_size='normal'):
if title.startswith('Notice'):
return """
.. note::
%s
""" % (indent_lines(block, format, ' '*3))
else:
return rst_admon(block, format, title, text_size)
def rst_quiz(quiz):
import string
question_prefix = quiz.get('question prefix',
option('quiz_question_prefix=', 'Question:'))
common_choice_prefix = option('quiz_choice_prefix=', 'Choice')
quiz_expl = option('quiz_explanations=', 'on')
# Sphinx tooltop: :abbr:`TERM (explanation in tooltip)`
# Can e.g. just have the right answer number as tooltip!
text = '\n\n'
if 'new page' in quiz:
text += '.. !split\n%s\n%s' % (quiz['new page'], '-'*len(quiz['new page']))
text += '.. begin quiz\n\n'
# Don't write Question: ... if inside an exercise section
if quiz.get('embedding', 'None') in ['exercise',]:
pass
else:
text += '\n\n'
if question_prefix:
text += '**%s** ' % (question_prefix)
if quiz['question'].lstrip().startswith('..'):
# block, add extra \n
text += '\n\n'
text += quiz['question'] + '\n\n\n'
# List choices as paragraphs
for i, choice in enumerate(quiz['choices']):
#choice_no = i+1
choice_no = string.ascii_uppercase[i]
answer = choice[0].capitalize() + '!'
choice_prefix = common_choice_prefix
if 'choice prefix' in quiz:
if isinstance(quiz['choice prefix'][i], basestring):
choice_prefix = quiz['choice prefix'][i]
if choice_prefix == '' or choice_prefix[-1] in ['.', ':', '?']:
pass # don't add choice number/letter
else:
choice_prefix += ' %s:' % choice_no
expl = ''
if len(choice) == 3 and quiz_expl == 'on':
expl = choice[2]
if '.. figure::' in expl or 'math::' in expl or '.. code-block::' in expl:
errwarn('*** warning: quiz explanation contains block (fig/code/math)')
errwarn(' and is therefore skipped')
errwarn(expl + '\n')
expl = '' # drop explanation when it needs blocks
# Should remove markup
pattern = r'`(.+?) (<https?.+?)>`__' # URL
expl = re.sub(pattern, '\g<1> (\g<2>)', expl)
pattern = r'``(.+?)``' # verbatim
expl = re.sub(pattern, '\g<1>', expl)
pattern = r':math:`(.+?)`' # inline math
expl = re.sub(pattern, '\g<1>', expl) # mimic italic....
pattern = r':\*\*(.+?)\*\*' # bold
expl = re.sub(pattern, '\g<1>', expl, flags=re.DOTALL)
pattern = r':\*(.+?)\*' # emphasize
expl = re.sub(pattern, '\g<1>', expl, flags=re.DOTALL)
tooltip = ' '.join(expl.splitlines())
if expl:
text += '**%s** %s\n\n:abbr:`? (%s)` :abbr:`# (%s)`\n\n' % (choice_prefix, choice[1], answer, tooltip)
else: # no explanation
text += '**%s** %s\n\n:abbr:`? (%s)`\n\n' % (choice_prefix, choice[1], answer)
text += '.. end quiz\n\n'
return text
def define(FILENAME_EXTENSION,
BLANKLINE,
INLINE_TAGS_SUBST,
CODE,
LIST,
ARGLIST,
TABLE,
EXERCISE,
FIGURE_EXT,
CROSS_REFS,
INDEX_BIB,
TOC,
ENVIRS,
QUIZ,
INTRO,
OUTRO,
filestr):
# all arguments are dicts and accept in-place modifications (extensions)
FILENAME_EXTENSION['rst'] = '.rst'
BLANKLINE['rst'] = '\n'
encoding = 'utf-8' # 'latin-1'
INLINE_TAGS_SUBST['rst'] = {
'math': r'\g<begin>\g<subst>\g<end>',
'math2': r'\g<begin>\g<puretext>\g<end>',
# math and math2 are redefined below if --rst_mathjax
#'math': r'\g<begin>:math:`\g<subst>`\g<end>', # sphinx
#'math2': r'\g<begin>:math:`\g<latexmath>`\g<end>',
'emphasize': None, # => just use doconce markup (*emphasized words*)
'bold': r'\g<begin>**\g<subst>**\g<end>',
'verbatim': r'\g<begin>``\g<subst>``\g<end>',
'label': r'\g<subst>', # should be improved, rst has cross ref
'reference': r'\g<subst>',
#colortext cannot employ pure HTML code. Recipe: https://stackoverflow.com/questions/4669689/how-to-use-color-in-text-with-restructured-text-rst2html-py-or-how-to-insert-h (this is too comprehensive). Use bold instead.
#'colortext': r'<font color="\g<color>">\g<text></font>',
'colortext': r'**\g<text>**',
# Use anonymous hyperlink references to avoid warnings if the link
# name appears twice
#'linkURL': r'\g<begin>`\g<link> <\g<url>>`__\g<end>',
#'linkURL': r'\g<begin>`\g<link>`_\g<end>' + '\n\n.. ' + r'__\g<link>: \g<url>' + '\n\n', # better (?): make function instead that stacks up the URLs and dumps them at the end; can be used for citations as well
'linkURL2': r'`\g<link> <\g<url>>`__',
'linkURL3': r'`\g<link> <\g<url>>`__',
'linkURL2v': r'`\g<link> <\g<url>>`__', # no verbatim, does not work well
'linkURL3v': r'`\g<link> <\g<url>>`__', # same
'plainURL': r'`<\g<url>>`_',
'inlinecomment': r'color{red}{(**\g<name>**: \g<comment>})',
# the replacement string differs, depending on the match object m:
# (note len(m.group('subst')) gives wrong length for latin-1 strings,
# seems to work for utf-8, if problems: replace lambda function
# with an ordinary function where you can debug and test!
#'chapter': lambda m: '%s\n%s' % (m.group('subst'), '%'*len(m.group('subst'))),
'chapter': lambda m: '%s\n%s' % (m.group('subst'), '%'*len(m.group('subst'))),
'section': lambda m: '%s\n%s' % (m.group('subst'), '='*len(m.group('subst'))),
'subsection': lambda m: '%s\n%s' % (m.group('subst'), '-'*len(m.group('subst'))),
'subsubsection': lambda m: '%s\n%s\n' % (m.group('subst'), '~'*len(m.group('subst'))),
'paragraph': r'**\g<subst>**' + '\n', # extra newline
'abstract': rst_abstract,
#'title': r'======= \g<subst> =======\n', # doconce top section, must be the highest section level (but no higher than others, need more code)
'title': None, # taken care of in ref_and_label_commoncode
'date': r':Date: \g<subst>\n',
'author': rst_author,
'figure': rst_figure,
'movie': rst_movie,
#'comment': '.. %s', # rst does not like empty comment lines:
# so therefore we introduce a function to remove empty comment lines
# (we insert an extra blank first to be safe)
'comment': lambda c: '' if c.isspace() or c == '' else '\n.. %s\n' % c,
#'linebreak': r'| \g<text>', # does not work: interfers with tables and requires a final blank line after block
'linebreak': r'<linebreakpipe> \g<text>', # fixed in rst_code/sphinx_code as a hack
'footnote': rst_footnotes,
'non-breaking-space': ' |nbsp| ',
'horizontal-rule': '---------',
'ampersand2': r' \g<1>&\g<2>',
}
if option('rst_mathjax'):
# rst2html conversion requires four backslashes here for one of them
# to survive
INLINE_TAGS_SUBST['rst']['math'] = r'\g<begin>\\\\( \g<subst> \\\\)\g<end>'
INLINE_TAGS_SUBST['rst']['math2'] = r'\g<begin>\\\\( \g<latexmath> \\\\)\g<end>'
ENVIRS['rst'] = {
'quote': rst_quote,
'warning': rst_warning,
'question': rst_question,
'notice': rst_notice,
'summary': rst_summary,
'block': rst_block,
'box': rst_box,
}
CODE['rst'] = rst_code # function for typesetting code
LIST['rst'] = {
'itemize':
{'begin': '', 'item': '*', 'end': '\n'},
# lists must end with a blank line - we insert one extra,
'enumerate':
{'begin': '', 'item': '%d.', 'end': '\n'},
'description':
{'begin': '', 'item': '%s', 'end': '\n'},
'separator': '\n',
}
from .common import DEFAULT_ARGLIST
ARGLIST['rst'] = DEFAULT_ARGLIST
FIGURE_EXT['rst'] = {
'search': ('.png', '.gif', '.jpg', '.jpeg', '.pdf', '.eps', '.ps'),
'convert': ('.png', '.gif', '.jpg')}
CROSS_REFS['rst'] = rst_ref_and_label
INDEX_BIB['rst'] = rst_index_bib
TABLE['rst'] = rst_table
EXERCISE['rst'] = plain_exercise
TOC['rst'] = lambda s, f: '.. contents:: %s\n :depth: 2' % globals.locale_dict[globals.locale_dict['language']].get('toc', 'Table of contents')
QUIZ['rst'] = rst_quiz
# Prepend the doconce header and command
INTRO['rst'] = '.. ' + _doconce_header + '\n'
INTRO['rst'] += '.. ' + _doconce_command % ('rst', globals.filename, ' '.join(sys.argv[1:])) + '\n'
INTRO['rst'] +='\n'
# https://stackoverflow.com/questions/11830242/non-breaking-space
from .common import INLINE_TAGS
if re.search(INLINE_TAGS['non-breaking-space'], filestr):
nbsp = ('\n'
'.. |nbsp| unicode:: 0xA0\n'
' :trim:\n'
'\n')
if 'TITLE:' not in filestr:
from . import common
if globals.format in ('rst', 'sphinx'):
errwarn('*** error: non-breaking space character ~ is used,')
errwarn(' but this will give an error when the document does')
errwarn(' not have a title.')
_abort()
else:
INTRO['rst'] += nbsp | PypiClean |
/ConStrain-0.3.0.tar.gz/ConStrain-0.3.0/constrain/lab/transformation.py | # standard libraries
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pydna.dseqrecord import Dseqrecord
# for plotting with pyplot
import matplotlib.pyplot as plt
def ng_to_nmol(ng: float, bp: float):
"""Calculates nanogram to nanomol for transformation mixes.
To do a transformation it is important to have the right ratio
of plasmid to insert. In other words this is done by calculating
the nanomolar ratios and this tool can do that
Parameters
----------
ng : float
eg. nanogram
param: float
eg. number of basepairs. Can also be an int
Returns
-------
ng_to_nmol : float
Note
----
It calculates the nmol in the following way:
nmol = ng/(bp*650)
"""
if ng > 0 and bp > 0:
return ng / (bp * 650)
else:
return "non-valid_input"
def ODtime(initialOD: float, time: float, td: float = 0.5):
"""Calculates the OD based on doupling time.
Parameters
----------
initialOD : float
in OD
time : float
in hours
td : float
doupling time i.e. td in h^-1
Returns
-------
OD : float
the OD after a certain time()
"""
if initialOD >= 0 and time >= 0:
return round(initialOD * 2 ** (time * td), 3)
else:
return "non-valid_input"
def time_to_inculate(
initialOD=0.0025, td=0.4, verbose=False, transformation_time: int = 12
):
"""Calculates when a starter culture is ready to be inoculated
with a transformation mixture.
Parameters
----------
initialOD : float
td : float
is doubling time
transformation_time : int
The time you want to transform
verbose : Bool
Provides extra information
Returns
-------
A plot of cell growth at different td
Notes
-----
This is used to calculate when the cells should be used for transformation.
For example:
OD_1 = 1 * 10^7 cells / ml
For a succesfull S.cerevisiae transformation between 1 to 2 × 10^7 cells/ml should be used
Normal doupling time is between 3-6 hours
"""
if verbose:
print("GOAL: to get enough cells in exponential phase for transformation")
print("Assumed that: ")
print(
"- transformation time "
+ str(transformation_time)
+ " (reached OD=1 the day after)"
)
times = list(range(0, 37))
ods_025_3 = [ODtime(initialOD, time, td=0.3) for time in times]
ods_025_input = [ODtime(initialOD, time, td=td) for time in times]
ods_025_5 = [ODtime(initialOD, time, td=0.5) for time in times]
fig = plt.figure()
ax = plt.axes()
# ax.set_xlim(lims)
ax.set_ylim([0, 2.2])
ax.plot(times, [2] * len(times), "r-", label="end of exponential phase")
ax.plot(times, [1] * len(times), "k-", label="target")
ax.plot(times, ods_025_3, label="iOD=" + str(initialOD) + ", td=0.3")
ax.plot(times, ods_025_input, label="iOD=" + str(initialOD) + ", td=" + str(td))
ax.plot(times, ods_025_5, label="iOD=" + str(initialOD) + ", td=0.5")
plt.xlabel("time, h^-1")
plt.ylabel("OD")
plt.legend()
plt.show()
def inoculation_time(times, ods):
def find_closest(A, target):
# A must be sorted
idx = A.searchsorted(target)
idx = np.clip(idx, 1, len(A) - 1)
left = A[idx - 1]
right = A[idx]
idx -= target - left < right - target
return idx
# In how many hours will the cells have reached OD 1?
hours_to_OD1 = times[find_closest(np.array(ods), 1)]
print("Hours to OD = 1: \t" + str(hours_to_OD1) + " hours")
### When do u need to innoculate?
when_to_inoculate = transformation_time - hours_to_OD1
if when_to_inoculate < 0:
print("Transformation time has been set to ", transformation_time)
print(
"Time of inoculation: \t"
+ str(when_to_inoculate + 24)
+ " (the day before)"
)
else:
print("Transformation time has been set to ", transformation_time)
print(
"Time of inoculation: \t" + str(when_to_inoculate) + "(the day before)"
)
# If i innoculate now?
print(
"\nIf you innoculate now, the cells will have reached OD= 1 by: ",
datetime.now() + timedelta(hours=hours_to_OD1),
)
inoculation_time(times, ods_025_input)
if verbose:
print()
print(
"How to hit initialOD = 0.0025 (e.g. from colony)? Guess. Inoculate 9/10 + 1/10 'normal' colony per ~10 ml"
)
print("How much volume? ~2 ml per transformation")
def transformation_mix(
reaction_names, reaction_participants, wanted_amounts, water_dna_p_reac, media=""
):
"""This function makes a pandas dataframe of the parts(their location)
that needs to be put into the transformation mixes
Parameters
----------
reaction_names : list
list of reaction names
reaction_participants : list
list of pydna.Dseqrecord objects of Bio.seqrecord objects
wanted_concentrations : dict
dict of the names of the reactants with their calculated nmol
water_dna_p_reac : int
the amount of water wanted for the reaction
media : list
list of names of the media used. e.g. ['LB_AMP']
Returns
-------
pandas.DataFrame
with a transformation scheme showing which parts should be
mixed for each reaction including positive and negative
controls.
Examples
--------
# 1. Mention which reacion names you have
reaction_names = ["insert", "n.ctr", "n.ctr", "n.ctr", "p. ctr"]
# 2. Add reaction reaction_participants
reaction_participants = [[vector, gRNA1_pcr_prod,gRNA2_pcr_prod], #the insert we want
[vector], #negative control
[gRNA1_pcr_prod], #negative control
[gRNA2_pcr_prod], #negative control
[LEU_plasmid]] #positive control
# 2. Calculate nmol:
nmol_vector = ng_to_nmol(ng = 15, bp = len(vector))
nmol_gRNA = ng_to_nmol(ng = 30, bp = len(gRNA1_pcr_prod))
nmol_pctr = ng_to_nmol(ng = 10, bp = len(LEU_plasmid))
# 3. Add the concentrations
wanted_concentrations = {'p0056\\(pESC-LEU-ccdB-USER)' : nmol_vector,
'ATF1' : nmol_gRNA,
'CroCPR' : nmol_gRNA,
'LEU_plasmid' : nmol_pctr}
# 4. what media the transformants are plated on (5 transformations here)
media = ['LB_AMP'] * 5
# 5. initate the function
transformation_mix(reaction_names, reaction_participants, wanted_amounts =
(wanted_concentrations, water_dna_p_reac = 7, media = media)
Return:
#these are freezer locations
name l4_I06 l4_I07 l4_I08 p1_F06 water plate on
0 insert 0.1 0.6 0.6 NaN 5.7 LB_AMP
1 n.ctr 0.1 NaN NaN NaN 6.9 LB_AMP
2 n.ctr NaN 0.6 NaN NaN 6.4 LB_AMP
3 n.ctr NaN NaN 0.6 NaN 6.4 LB_AMP
4 p. ctr NaN NaN NaN 0.1 6.9 LB_AMP
"""
df_comb = pd.DataFrame()
for name, parts in zip(reaction_names, reaction_participants):
names = [part.name for part in parts]
locations = [part.annotations["batches"][0]["location"] for part in parts]
concentrations = [
part.annotations["batches"][0]["concentration"] for part in parts
] # ng/ul
part_names = [part.name for part in parts] # ng/ul
sizes = [len(part) for part in parts] # in bp
part_mass = [
round(wanted_amounts.get(pname, "") * int(size) * 650, 1)
for pname, size in zip(part_names, sizes)
] # in ng = nmol * bp * 650 ng/(nmol * bp)
part_volume = [
round(mass / con, 1) for mass, con in zip(part_mass, concentrations)
] # in µl
di = dict(zip(names, part_volume))
df = pd.DataFrame(data=di, index=[name]) # ,index = reagents_plus_total
df_comb = pd.concat([df_comb, df], sort=False)
df_comb["water"] = water_dna_p_reac - df_comb.sum(axis=1)
df_comb = df_comb.reset_index()
df_comb = df_comb.rename(columns={"index": "name"})
if media != "":
df_comb["plate on"] = media
return df_comb
def wanted_mass(wanted_moles, size):
"""
Parameters
----------
wanted_moles : int
wanted moles in nmol
size : int
size in bp
Returns
-------
w_mass_rounded : int
in ng. Mass wanted for the reaction.
"""
w_mass = wanted_moles * size * 650
w_mass_rounded = round(w_mass, 1)
return w_mass_rounded
def wanted_volume(wanted_mass, actual_concentration):
"""
Parameters
----------
wanted_mass : int
wanted mass in ng
actual_concentration : int
actual_concentration in ng/ul
Returns
-------
wanted_volume_rounded : int
return in ul
"""
wanted_volume = wanted_mass / actual_concentration
wanted_volume_rounded = round(wanted_volume, 1)
return wanted_volume_rounded
def transformation_partitipants(reaction_participants, amnt =0.0005 , sgRNA_plasmid_name= None, sgRNA_plasmid_conc= None):
"""Returns a dict with the µl amounts needed in a transformation reaction.
Parameters
----------
reaction_participants : list of list of Dseqrecord
List of lists of Dseqrecord objects representing the reaction participants.
amnt : float, optional
Amount in µl of the reagents other than `sgRNA_plasmid_name`. Default is 0.0005.
sgRNA_plasmid_name : str, optional
Name of the sgRNA plasmid. If not provided, `amnt` is used for all reaction participants.
sgRNA_plasmid_conc : float, optional
Concentration in µl of the sgRNA plasmid. If not provided, `amnt` is used for all reaction participants.
Returns
-------
dict
Dict with the µl amounts needed for the transformation reaction,
with keys being the names of the reaction participants and values being the corresponding µl amounts.
"""
...
# Initialize two lists
wanted_amounts = [[] for i in range(len(reaction_participants))]
names_matrix = [[] for i in range(len(reaction_participants))]
for reac_no, reac in enumerate(reaction_participants):
for parti_no, parti in enumerate(reac):
if sgRNA_plasmid_name == None and sgRNA_plasmid_conc == None:
wanted_amounts[reac_no].append(amnt)
names_matrix[reac_no].append(parti.name)
else:
names_matrix[reac_no].append(parti.name)
if parti.name == sgRNA_plasmid_name:
wanted_amounts[reac_no].append(sgRNA_plasmid_conc)
names_matrix[reac_no].append(parti.name)
else:
wanted_amounts[reac_no].append(amnt)
# making the reaction participants into Dseqrecords and changing names
new_dict_with_wanted_amounts = dict()
for i in range(len(reaction_participants)):
for j in range(len(reaction_participants[i])):
reaction_participants[i][j] = Dseqrecord(reaction_participants[i][j])
new_dict_with_wanted_amounts[reaction_participants[i][j].name] = wanted_amounts[i][j]
return new_dict_with_wanted_amounts
def calculate_volume_and_total_concentration(amplicons, amplicon_parts_amounts_total, n= 1):
"""
Calculates the volume and total concentration of a list of DNA parts.Parameters
----------
amplicons : list
A list of amplicon objects
amplicon_parts_amounts_total : dict
A dictionary of amplicon names and their respective total amounts
n : int (optional)
Gives the option of multiplying the volume is needed. Optional set to 1.
Returns
-------
volumes : list
List of volumes of each amplicon
ngs : list
List of ngs of each amplicon
total_conc : float
Total concentration of all amplicons
"""
print('name, volume, concentration, location')
volumes = []
ngs = []
for amp in amplicons:
w_moles = amplicon_parts_amounts_total[amp.name]
w_mass = wanted_mass(wanted_moles=w_moles, size=len(amp))
act_conc = amp.annotations['batches'][0]['concentration']
w_volume = wanted_volume(w_mass, act_conc)*n
volumes.append(w_volume)
ngs.append(w_volume * act_conc)
print(amp.name, w_volume, act_conc, '\t', amp.annotations['batches'][0]['location'])
#Count total concentrtaion expected
total_vol = sum(volumes)
total_ngs = sum(ngs)
total_conc = total_ngs/total_vol
print('total volume: ', sum(volumes))
print()
print('total ngs: ', sum(ngs))
print('total conc: ', total_conc)
return volumes, ngs, total_conc
def pool_parts(amplicons:list, part_names:list,part_amounts:list, pool_names:list, pool_lengths)->dict:
"""Pools amplicon parts and returns a dictionary of pooled volumes.
Parameters
----------
amplicons : list
List of amplicon objects.
part_names : list
List of part names.
part_amounts : list
List of amounts of each part.
pool_names : list
List of pool names.
pool_lengths : list
List of pool lengths.
Returns
-------
pooled_volumes : dict
Dictionary containing the pooled volumes for each amplicon part.
"""
# intialize
pooled_volumes = {}
#Iterate through the parts that are avilable
for amplicon in amplicons:
if amplicon.template.name in part_names:
# calculate volume needed
ind1 = part_names.index(amplicon.template.name)
amount = part_amounts[ind1]
ind2 = pool_names.index(amplicon.template.name)
vol = (pool_lengths[ind2]*650*amount)/amplicon.annotations['batches'][0]['concentration']
# add it to the dictionary
if amplicon.template.name in pooled_volumes:
pooled_volumes[amplicon.template.name][amplicon.name] = {'volume_to_mix':round(vol,1),'location':amplicon.annotations['batches'][0]['location'], 'concentration':amplicon.annotations['batches'][0]['concentration']}
else:
pooled_volumes[amplicon.template.name] = {amplicon.name: {'volume_to_mix':round(vol,1),'location':amplicon.annotations['batches'][0]['location'], 'concentration':amplicon.annotations['batches'][0]['concentration']}}
return pooled_volumes
def print_pooled_parts(pooled_volumes: dict) -> None:
"""
print_pooled_parts(pooled_volumes)
Prints the pooled parts and calculated concentrations.
Parameters
----------
pooled_volumes : dict
Dictionary containing the pooled volumes for each amplicon part.
Returns
-------
None
"""
print("To be pooled together")
con_per_part = {}
for key in pooled_volumes:
print(key)
total_vol = 0
total_con = 0
total_ng = 0
for ke in pooled_volumes[key]:
print(ke, pooled_volumes[key][ke])
total_vol += pooled_volumes[key][ke]['volume_to_mix']
total_con += pooled_volumes[key][ke]['concentration']
total_ng += pooled_volumes[key][ke]['concentration'] * pooled_volumes[key][ke]['volume_to_mix']
print("vol", round(total_vol, 1))
print("calculated con", total_ng / total_vol, '\n')
con_per_part[key] = round(total_ng / total_vol)
total_con = 0
total_vol = 0
total_ng = 0 | PypiClean |
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/examples/levelSet/electroChem/surfactantBulkDiffusionEquation.py | from __future__ import division
from __future__ import unicode_literals
__docformat__ = 'restructuredtext'
from fipy.terms.implicitSourceTerm import ImplicitSourceTerm
from fipy.variables.levelSetDiffusionVariable import _LevelSetDiffusionVariable
from fipy.terms.transientTerm import TransientTerm
from fipy.terms.diffusionTerm import DiffusionTermNoCorrection
def buildSurfactantBulkDiffusionEquation(bulkVar = None,
distanceVar = None,
surfactantVar = None,
otherSurfactantVar = None,
diffusionCoeff = None,
transientCoeff = 1.,
rateConstant = None):
r"""
The `buildSurfactantBulkDiffusionEquation` function returns a bulk diffusion of a
species with a source term for the jump from the bulk to an interface.
The governing equation is given by,
.. math::
\frac{\partial c}{\partial t} = \nabla \cdot D \nabla c
where,
.. math::
D = \begin{cases}
D_c & \text{when $\phi > 0$} \\
0 & \text{when $\phi \le 0$}
\end{cases}
The jump condition at the interface is defined by Langmuir
adsorption. Langmuir adsorption essentially states that the ability for
a species to jump from an electrolyte to an interface is proportional to
the concentration in the electrolyte, available site density and a
jump coefficient. The boundary condition at the interface is given by
.. math::
D \hat{n} \cdot \nabla c = -k c (1 - \theta) \qquad \text{at $\phi = 0$}.
Parameters
----------
bulkVar : ~fipy.variables.cellVariable.CellVariable
The bulk surfactant concentration variable.
distanceVar : ~fipy.variables.distanceVariable.DistanceVariable
surfactantVar : ~fipy.variables.surfactantVariable.SurfactantVariable
otherSurfactantVar : ~fipy.variables.surfactantVariable.SurfactantVariable
Any other surfactants that may remove this one.
diffusionCoeff : float or ~fipy.variables.faceVariable.FaceVariable
transientCoeff : float
In general 1 is used.
rateConstant : float
The adsorption coefficient.
"""
spCoeff = rateConstant * distanceVar.cellInterfaceAreas / bulkVar.mesh.cellVolumes
spSourceTerm = ImplicitSourceTerm(spCoeff)
bulkSpCoeff = spCoeff * bulkVar
coeff = bulkSpCoeff * surfactantVar.interfaceVar
diffusionCoeff = _LevelSetDiffusionVariable(distanceVar,
diffusionCoeff)
eq = TransientTerm(transientCoeff) - DiffusionTermNoCorrection(diffusionCoeff)
if otherSurfactantVar is not None:
otherCoeff = bulkSpCoeff * otherSurfactantVar.interfaceVar
else:
otherCoeff = 0
return eq - coeff + spSourceTerm - otherCoeff | PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20.20/plugins/common/table/lib/table-selection.js | define(
['aloha', 'aloha/jquery', 'table/table-plugin-utils', 'table/table-cell', 'i18n!table/nls/i18n'],
function (Aloha, $, Utils, TableCell, i18n) {
/**
* The TableSelection object is a helper-object
*/
var TableSelection = function (table) {
this.table = table;
};
/**
* Gives the type of the cell-selection
* possible values are "row" or "col"
* also possible value is 'cell', which defines custom cell selections
*/
TableSelection.prototype.selectionType = undefined;
/**
* Holds all currently selected table cells as an array of DOM "td" representations
*/
TableSelection.prototype.selectedCells = new Array();
/**
* Holds all table columnIdx if selectiontype is column
*/
TableSelection.prototype.selectedColumnIdxs = new Array();
/**
* Holds all table rowIds if selectiontype is column
*/
TableSelection.prototype.selectedRowIdxs = new Array();
/**
* Holds the active/disabled state of cell selection mode
*/
TableSelection.prototype.cellSelectionMode = false;
/**
* Gives the position of the base cell of a selection - [row, column]
*/
TableSelection.prototype.baseCellPosition = null;
/**
* Gives the range of last cell selection - [row, column]
*/
TableSelection.prototype.lastSelectionRange = null;
/**
* Marks all cells of the specified column or columns as selected
*
* @return void
*/
TableSelection.prototype.selectColumns = function ( columnsToSelect ) {
this.unselectCells();
var rows = this.table.getRows();
// first row is the selection row (dump it, it's not needed)
rows.shift();
var grid = Utils.makeGrid(rows);
for (var j = 0; j < columnsToSelect.length; j++) {
// check if this column is already selected.
if ( -1 !== $.inArray(columnsToSelect[j], this.selectedColumnIdxs) ) {
continue;
}
this.selectedColumnIdxs.push( columnsToSelect[j] );
for (var i = 0; i < grid.length; i++) {
var cellInfo = grid[i][columnsToSelect[j]];
if ( Utils.containsDomCell(cellInfo) ) {
$(cellInfo.cell).addClass(this.table.get('classCellSelected'));
this.selectedCells.push( cellInfo.cell );
}
}
}
this.selectionType = 'column';
};
/**
* Marks all cells of the specified row or rows as selected
*
* @return void
*/
TableSelection.prototype.selectRows = function ( rowsToSelect ) {
this.unselectCells();
var rows = this.table.getRows();
rowsToSelect.sort( function ( a, b ) { return a - b; } );
for (var i = 0; i < rowsToSelect.length; i++) {
if ( rows[ rowsToSelect[i] ] ) {
// check if this row is already selected.
for ( var z = 0; z < this.selectedRowIdxs.length; z++ ) {
if ( rowsToSelect[i] == this.selectedRowIdxs[z] ) {
return;
}
}
this.selectedRowIdxs.push( rowsToSelect[i] );
// to not select first cell, which is a control cell
for ( var j = 1; j < rows[ rowsToSelect[i] ].cells.length; j++ ) {
this.selectedCells.push( rows[ rowsToSelect[i] ].cells[j] );
// TODO make proper cell selection method
$( rows[ rowsToSelect[i] ].cells[j] ).addClass( this.table.get('classCellSelected') );
}
}
}
this.selectionType = 'row';
};
TableSelection.prototype.selectAll = function () {
var rowIndices = $.map( this.table.getRows(), function ( item, i ) {
return i;
});
//getRows() returns all rows, even the header row which we must not select
rowIndices.shift();
this.selectRows( rowIndices );
};
/**
* To be called when cells of the table were selected
* @see selectRows, selectColumns, selectCellRange
* TODO this should be private
*/
TableSelection.prototype.notifyCellsSelected = function () {
Aloha.trigger( 'aloha-table-selection-changed' );
// the UI feels more consisten when we remove the non-table
// selection when cells are selected
// TODO this code doesn't work right in IE as it causes the table
// scope of the floating menu to be lost. Maybe this can be
// handled by testing for an empty selection in the
// aloha-selection-changed event.
//Aloha.getSelection().removeAllRanges();
};
/**
* To be called when a cell-selection is entirely removed
* @see unselectCells
*/
TableSelection.prototype._notifyCellsUnselected = function () {
Aloha.trigger( 'aloha-table-selection-changed' );
};
/**
* This method return true if all sellected cells are TH cells.
*
* @return boolean
*/
TableSelection.prototype.isHeader = function ( ) {
if ( this.selectedCells.length == 0 ) {
return false;
}
// take 1 column to detect if the header button is pressd
for (var i = 0; i < this.selectedCells.length; i++) {
if ( !this.selectedCells[i] || this.selectedCells[i].nodeName.toLowerCase() != 'th' ) {
return false;
}
}
return true;
}
/**
* This method removes the "selected" class from all selected cells
*
* @return void
*/
TableSelection.prototype.unselectCells = function(){
var rows;
//don't unselect cells if cellSelectionMode is active
if ( this.cellSelectionMode ) {
return;
}
if (this.selectedCells.length > 0) {
rows = this.table.getRows();
for (var i = 0; i < rows.length; i++) {
for ( var j = 1; j < rows[i].cells.length; j++ ) {
// TODO make proper cell selection method
$( rows[i].cells[j] ).removeClass( this.table.get('classCellSelected') );
}
}
this.selectedCells = new Array();
this.selectedColumnIdxs = new Array();
this.selectedRowIdxs = new Array();
//we keep 'cell' as the default selection type instead of
//unsetting the selectionType to avoid an edge-case where a
//click into a cell doesn't trigger a call to
//TableCell.editableFocs (which would set the 'cell'
//selection type) which would result in the FloatingMenu
//losing the table scope.
this.selectionType = 'cell';
this._notifyCellsUnselected();
}
};
/**
* Returns the index of a given cell, in selectedCells
* returns -1 if the given cell is not in selectedCells
* @params cell
* DOMElement
*
* @return integer
*/
TableSelection.prototype.selectionIndex = function(cell){
for(var i = 0; i < this.selectedCells.length; i++){
if(this.selectedCells[i] === cell){
return i;
}
}
return -1;
};
/**
* Given a contour creates a object representing a rectangle.
* This function only gives a useful return value if the given
* contour rectangular.
*
* @param {object} contour
* a rectangular contour
* @return {object}
* an object with the properties top, right, bottom, left,
* representing the rectangular contour.
*/
function getRectFromContour( contour ) {
return {
'top' : contour.top[0],
'right' : contour.right[0] + 1,
'bottom': contour.bottom[0] + 1,
'left' : contour.left[0]
};
}
/**
* Given a grid and contour, determines whether the contour is
* rectangular, and each cell in the rectangle is selected.
*
* @param {array} grid
* a two-dimensional array representing a grid see Utils.makeGrid
* @param {object} contour
* an object reprensenting a contour see Utils.makeContour
* @param {function} isSelected
* a function that determines whether a cell in the given grid
* is selected for merging.
* @return {boolean}
* true if all cells inside the contour are selected and can
* be merged.
*/
function isMergeable(grid, contour, isSelected) {
var mergeable = true;
if ( -1 !== Utils.indexOfAnyBut( contour.top , contour.top[0] )
|| -1 !== Utils.indexOfAnyBut( contour.right , contour.right[0] )
|| -1 !== Utils.indexOfAnyBut( contour.bottom, contour.bottom[0] )
|| -1 !== Utils.indexOfAnyBut( contour.left , contour.left[0] ) ) {
// the outside of the selected area is jagged (not a rectangle)
mergeable = false;
} else {
// the outside of the selected area is a rectangle, but we
// must also ensore that there are no holes in the selection
var rect = getRectFromContour( contour )
Utils.walkGridInsideRect( grid, rect, function ( cellInfo ) {
if ( ! isSelected( cellInfo ) ) {
mergeable = false;
return false;
}
});
}
return mergeable;
}
/**
* This method merges all selected cells
*
* @return void
*/
TableSelection.prototype.mergeCells = function(){
var selectedCells = this.selectedCells;
if ( 0 === selectedCells.length ) {
return;
}
var isSelected = function ( cellInfo ) {
return -1 != $.inArray( cellInfo.cell, selectedCells );
};
var grid = Utils.makeGrid( this.table.getRows() );
var contour = Utils.makeContour( grid, isSelected );
if ( ! isMergeable( grid, contour, isSelected ) ) {
Aloha.showMessage(new Aloha.Message({
title : i18n.t('Table'),
text : i18n.t('table.mergeCells.notRectangular'),
type : Aloha.Message.Type.ALERT
}));
return;
}
var selectedRect = getRectFromContour( contour );
var $firstCell = $( grid[ selectedRect.top ][ selectedRect.left ].cell );
var $firstContainer = $( TableCell.getContainer( $firstCell.get( 0 ) ) );
Utils.walkGridInsideRect( grid, selectedRect, function ( cellInfo, x, y ) {
if ( x - cellInfo.spannedX === selectedRect.left
&& y - cellInfo.spannedY === selectedRect.top ) {
return;
}
var cell = cellInfo.cell;
var contents = $( TableCell.getContainer( cell ) ).contents();
// only append the delimiting space if there is some non-whitespace
for ( var i = 0; i < contents.length; i++ ) {
if ( "string" !== typeof contents[i]
|| "" !== $.trim( contents[i] ) ) {
$firstContainer.append( " " );
$firstContainer.append( contents );
break;
}
}
$( cell ).remove();
});
$firstCell.attr({ 'rowspan': selectedRect.bottom - selectedRect.top,
'colspan': selectedRect.right - selectedRect.left });
//select the merged cell
this.selectedCells = [ $firstCell.get( 0 ) ];
//reset flags
this.cellSelectionMode = false;
this.baseCellPosition = null;
this.lastSelectionRange = null;
this.selectionType = 'cell';
Aloha.trigger( 'aloha-table-selection-changed' );
};
/**
* This method splits all selected cells (if they are already have row or column spans)
*
* @return void
*/
TableSelection.prototype.splitCells = function(){
var selection = this;
// split the selected cells or currently active cell
var cells_to_split = this.selectedCells;
if (cells_to_split.length > 0) {
$(cells_to_split).each(function(){
Utils.splitCell(this, function () {
return selection.table.newActiveCell().obj;
});
});
//reset flags
this.cellSelectionMode = false;
this.baseCellPosition = null;
this.lastSelectionRange = null;
this.selectionType = 'cell';
Aloha.trigger( 'aloha-table-selection-changed' );
}
};
/**
* This method checks if the current selection of cells is merge able
*
* @return {boolean}
* true if more than one cell is selected.
*/
TableSelection.prototype.cellsAreMergeable = function() {
var selectedCells = this.selectedCells;
if ( selectedCells.length < 2 ) {
return false;
}
var isSelected = function ( cellInfo ) {
return -1 != $.inArray( cellInfo.cell, selectedCells );
};
var grid = Utils.makeGrid( this.table.getRows() );
var contour = Utils.makeContour( grid, isSelected );
if ( ! isMergeable( grid, contour, isSelected ) ) {
return false;
} else {
return true;
}
};
/**
* This method checks if the current selection of cells is split able
*
* @return {boolean}
* true if more than one cell is selected.
*/
TableSelection.prototype.cellsAreSplitable = function() {
var splitable = 0;
if ( this.selectedCells.length > 0 ) {
$(this.selectedCells).each(function(){
var cell = this;
var colspan = Utils.colspan( cell );
var rowspan = Utils.rowspan( cell );
if ( colspan > 1 || rowspan > 1 ) {
splitable++;
}
});
if ( splitable > 0 ) {
return true;
} else {
return false;
}
} else {
return false;
}
};
return TableSelection;
}); | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/standard/plugins/a11yhelp/dialogs/lang/de.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang("a11yhelp","de",{title:"Barrierefreiheitinformationen",contents:"Hilfeinhalt. Um den Dialog zu schliessen die Taste ESC drücken.",legend:[{name:"Allgemein",items:[{name:"Editorwerkzeugleiste",legend:"Drücken Sie ${toolbarFocus} auf der Symbolleiste. Gehen Sie zur nächsten oder vorherigen Symbolleistengruppe mit TAB und SHIFT+TAB. Gehen Sie zur nächsten oder vorherigen Symbolleiste auf die Schaltfläche mit dem RECHTS- oder LINKS-Pfeil. Drücken Sie die Leertaste oder Eingabetaste, um die Schaltfläche in der Symbolleiste aktivieren."},
{name:"Editordialog",legend:"Drücke innerhalb eines Dialogs TAB, um zum nächsten Element zu springen. Drücke SHIFT+TAB, um zum vorigen Element zu springen, drücke ENTER um das Formular im Dialog abzusenden, drücke ESC, um den Dialog zu schließen. Hat der Dialog mehrere Tabs, dann kannst du durch ALT+F10 die Tab-Liste aufrufen or mittels TAB als Teil der Dialog-Tab-Reihenfolge. Ist die Tab-Liste fokussiert, dann mithilfe der Pfeiltasten (LINKS und RECHTS) zwischen den Tabs gewechselt werden."},{name:"Editor-Kontextmenü",
legend:"Dürcken Sie ${contextMenu} oder die Anwendungstaste um das Kontextmenü zu öffnen. Man kann die Pfeiltasten zum Wechsel benutzen. Mit der Leertaste oder der Enter-Taste kann man den Menüpunkt aufrufen. Schliessen Sie das Kontextmenü mit der ESC-Taste."},{name:"Editor-Listenbox",legend:"Innerhalb einer Listenbox kann man mit der TAB-Taste oder den Pfeilrunter-Taste den nächsten Menüeintrag wählen. Mit der SHIFT+TAB Tastenkombination oder der Pfeilhoch-Taste gelangt man zum vorherigen Menüpunkt. Mit der Leertaste oder Enter kann man den Menüpunkt auswählen. Drücken Sie ESC zum Verlassen des Menüs."},
{name:"Editor-Elementpfadleiste",legend:"Drücken Sie ${elementsPathFocus} um sich durch die Pfadleiste zu bewegen. Um zum nächsten Element zu gelangen drücken Sie TAB oder die Pfeilrechts-Taste. Zum vorherigen Element gelangen Sie mit der SHIFT+TAB oder der Pfeillinks-Taste. Drücken Sie die Leertaste oder Enter um das Element auszuwählen."}]},{name:"Befehle",items:[{name:"Rückgängig-Befehl",legend:"Drücken Sie ${undo}"},{name:"Wiederherstellen-Befehl",legend:"Drücken Sie ${redo}"},{name:"Fettschrift-Befehl",
legend:"Drücken Sie ${bold}"},{name:"Kursiv-Befehl",legend:"Drücken Sie ${italic}"},{name:"Unterstreichen-Befehl",legend:"Drücken Sie ${underline}"},{name:"Link-Befehl",legend:"Drücken Sie ${link}"},{name:"Werkzeugleiste einklappen-Befehl",legend:"Drücken Sie ${toolbarCollapse}"},{name:"Zugang bisheriger Fokussierung Raumbefehl ",legend:"Drücken Sie ${accessPreviousSpace} auf den am nächsten nicht erreichbar Fokus-Abstand vor die Einfügemarke zugreifen: zwei benachbarte HR-Elemente. Wiederholen Sie die Tastenkombination um entfernte Fokusräume zu erreichen. "},
{name:"Zugang nächster Schwerpunkt Raumbefehl ",legend:"Drücken Sie $ { accessNextSpace }, um den nächsten unerreichbar Fokus Leerzeichen nach dem Cursor zum Beispiel auf: zwei benachbarten HR Elemente. Wiederholen Sie die Tastenkombination zum fernen Fokus Bereiche zu erreichen. "},{name:"Eingabehilfen",legend:"Drücken Sie ${a11yHelp}"},{name:"Einfügen als unformatierter Text. ",legend:"Drücke ${pastetext}",legendEdge:"Drücke ${pastetext} und anschließend ${paste}"}]}],tab:"Tab",pause:"Pause",capslock:"Feststell",
escape:"Escape",pageUp:"Bild auf",pageDown:"Bild ab",leftArrow:"Linke Pfeiltaste",upArrow:"Obere Pfeiltaste",rightArrow:"Rechte Pfeiltaste",downArrow:"Untere Pfeiltaste",insert:"Einfügen",leftWindowKey:"Linke Windowstaste",rightWindowKey:"Rechte Windowstaste",selectKey:"Taste auswählen",numpad0:"Ziffernblock 0",numpad1:"Ziffernblock 1",numpad2:"Ziffernblock 2",numpad3:"Ziffernblock 3",numpad4:"Ziffernblock 4",numpad5:"Ziffernblock 5",numpad6:"Ziffernblock 6",numpad7:"Ziffernblock 7",numpad8:"Ziffernblock 8",
numpad9:"Ziffernblock 9",multiply:"Multiplizieren",add:"Addieren",subtract:"Subtrahieren",decimalPoint:"Punkt",divide:"Dividieren",f1:"F1",f2:"F2",f3:"F3",f4:"F4",f5:"F5",f6:"F6",f7:"F7",f8:"F8",f9:"F9",f10:"F10",f11:"F11",f12:"F12",numLock:"Ziffernblock feststellen",scrollLock:"Rollen",semiColon:"Semikolon",equalSign:"Gleichheitszeichen",comma:"Komma",dash:"Bindestrich",period:"Punkt",forwardSlash:"Schrägstrich",graveAccent:"Gravis",openBracket:"Öffnende eckige Klammer",backSlash:"Rückwärtsgewandter Schrägstrich",
closeBracket:"Schließende eckige Klammer",singleQuote:"Einfaches Anführungszeichen"}); | PypiClean |
/Hyperion-0.9.10.tar.gz/Hyperion-0.9.10/hyperion/densities/alpha_disk.py | from __future__ import print_function, division
import numpy as np
from astropy import log as logger
from astropy.extern import six
from ..dust import SphericalDust
from ..util.constants import pi, G
from ..util.convenience import OptThinRadius
from ..util.integrate import integrate_powerlaw
from ..util.validator import validate_scalar
from .core import Disk
class AlphaDisk(Disk):
r'''
This class implements the density structure for an alpha-accretion disk as
implemented in `Whitney et al. (2003)
<http://dx.doi.org/10.1086/375415>`_, with a density given by:
.. math:: \rho(R,z,\phi) = \rho_0^{\rm disk}\,\left(1 - \sqrt{\frac{R_{\star}}{R}}\right)\left(\frac{R_0}{R}\right)^{\beta - p}\,\exp{\left[-\frac{1}{2}\left(\frac{z}{h(R)}\right)^2\right]} \\
where
.. math:: h(R) = h_0\left(\frac{R}{R_0}\right)^\beta
The :math:`\rho_0^{\rm disk}` parameter does not need to be set directly
(although it can be), and is instead automatically calculated when you set
the disk mass. The exact equation relating :math:`\rho_0^{\rm disk}` to the
disk mass can be found by integrating the equation for
:math:`\rho(R,z,\phi)` over three dimensions and setting the result equal
to the disk mass.
Once the :class:`~hyperion.densities.AlphaDisk` class has been
instantiated, the parameters for the density structure can be set via
attributes::
>>> from hyperion.util.constants import msun, au
>>> from hyperion.densities.alpha_disk import AlphaDisk
>>> disk = AlphaDisk()
>>> disk.mass = 2. * msun
>>> disk.rmin = 0.1 * au
>>> disk.rmax = 100 * au
The difference between :class:`~hyperion.densities.FlaredDisk` and
:class:`~hyperion.densities.AlphaDisk` is that the latter includes an
extra term in the density equation (:math:`1 - \sqrt{R_0/R}`)
but most importantly that it allows for viscous accretion luminosity,
specified either via an accretion rate, or an accretion luminosity. The
relation between the accretion rate and the accretion luminosity in an
infinitesimal volume is:
.. math:: \frac{d\dot{E}_{\rm acc}}{dV} = \frac{3 G M_\star \dot{M}_{\rm acc}}{\sqrt{32 \pi^3} R^3 h(R)} \left(1 - \sqrt{\frac{R_{\star}}{R}}\right) \exp{\left[-\frac{1}{2}\left(\frac{z}{h(R)}\right)^2\right]}
This is equation (4) from `Whitney et al. (2003)
<http://dx.doi.org/10.1086/375415>`_. Once integrated over the whole disk,
this gives a total luminosity of:
.. math:: L_{\rm acc} = \frac{G\,M_\star\,M_{\rm acc}}{2} \left[3\left(\frac{1}{R_{\rm min}} - \frac{1}{R_{\rm max}}\right) - 2\left(\sqrt{\frac{R_\star}{R_{\rm min}^3}} - \sqrt{\frac{R_\star}{R_{\rm max}^3}}\right)\right]
'''
def __init__(self, mass=None, rho_0=None, rmin=None, rmax=None, p=-1,
beta=-1.25, h_0=None, r_0=None, cylindrical_inner_rim=True,
cylindrical_outer_rim=True, mdot=None, lvisc=None, star=None,
dust=None):
# Start off by initializing mass and rho_0
self.mass = None
self.rho_0 = None
# Basic disk parameters
self.rmin = rmin
self.rmax = rmax
self.p = p
self.beta = beta
self.h_0 = h_0
self.r_0 = r_0
self.cylindrical_inner_rim = cylindrical_inner_rim
self.cylindrical_outer_rim = cylindrical_outer_rim
# Disk mass
if mass is not None and rho_0 is not None:
raise Exception("Cannot specify both mass and rho_0")
elif mass is not None:
self.mass = mass
elif rho_0 is not None:
self.rho_0 = rho_0
# Disk Accretion
if mdot is not None and lvisc is not None:
raise Exception("Cannot specify both mdot and lvisc")
self.mdot = mdot
self.lvisc = lvisc
# Central star
self.star = star
# Dust
self.dust = dust
self._freeze()
@property
def mass(self):
"""
Total disk mass (g)
"""
if self._mass is not None:
return self._mass
elif self._rho_0 is None:
return None
else:
self._check_all_set()
if self.rmax <= self.rmin:
return 0.
int1 = integrate_powerlaw(self.rmin, self.rmax, 1.0 + self.p)
int1 *= self.r_0 ** -self.p
int2 = integrate_powerlaw(self.rmin, self.rmax, 0.5 + self.p)
int2 *= self.star.radius ** 0.5 * self.r_0 ** -self.p
integral = (2. * pi) ** 1.5 * self.h_0 * (int1 - int2)
return self._rho_0 * integral
@mass.setter
def mass(self, value):
if value is not None:
validate_scalar('mass', value, domain='positive')
if self._rho_0 is not None:
logger.warning("Overriding value of rho_0 with value derived from mass")
self._rho_0 = None
self._mass = value
@property
def rho_0(self):
"""
Scale-factor for the disk density (g/cm^3)
"""
if self._rho_0 is not None:
return self._rho_0
elif self._mass is None:
return None
else:
self._check_all_set()
if self.rmax <= self.rmin:
return 0.
int1 = integrate_powerlaw(self.rmin, self.rmax, 1.0 + self.p)
int1 *= self.r_0 ** -self.p
int2 = integrate_powerlaw(self.rmin, self.rmax, 0.5 + self.p)
int2 *= self.star.radius ** 0.5 * self.r_0 ** -self.p
integral = (2. * pi) ** 1.5 * self.h_0 * (int1 - int2)
return self._mass / integral
@rho_0.setter
def rho_0(self, value):
if value is not None:
validate_scalar('rho_0', value, domain='positive')
if self._mass is not None:
logger.warning("Overriding value of mass with value derived from rho_0")
self._mass = None
self._rho_0 = value
@property
def rmin(self):
'''inner radius (cm)'''
if isinstance(self._rmin, OptThinRadius):
return self._rmin.evaluate(self.star, self.dust)
else:
return self._rmin
@rmin.setter
def rmin(self, value):
if not isinstance(value, OptThinRadius) and value is not None:
validate_scalar('rmin', value, domain='positive', extra=' or an OptThinRadius instance')
self._rmin = value
@property
def rmax(self):
'''outer radius (cm)'''
if isinstance(self._rmax, OptThinRadius):
return self._rmax.evaluate(self.star, self.dust)
else:
return self._rmax
@rmax.setter
def rmax(self, value):
if not isinstance(value, OptThinRadius) and value is not None:
validate_scalar('rmax', value, domain='positive', extra=' or an OptThinRadius instance')
self._rmax = value
@property
def p(self):
'''surface density power-law exponent'''
return self._p
@p.setter
def p(self, value):
if value is not None:
validate_scalar('p', value, domain='real')
self._p = value
@property
def beta(self):
'''scaleheight power-law exponent'''
return self._beta
@beta.setter
def beta(self, value):
if value is not None:
validate_scalar('beta', value, domain='real')
self._beta = value
@property
def h_0(self):
'''scaleheight of the disk at ``r_0`` (cm)'''
return self._h_0
@h_0.setter
def h_0(self, value):
if value is not None:
validate_scalar('h_0', value, domain='positive')
self._h_0 = value
@property
def r_0(self):
'''radius at which ``h_0`` is defined (cm)'''
return self._r_0
@r_0.setter
def r_0(self, value):
if value is not None:
validate_scalar('r_0', value, domain='positive')
self._r_0 = value
@property
def cylindrical_inner_rim(self):
'''
Whether the inner edge of the disk should be defined as a truncation
in cylindrical or spherical polar coordinates
'''
return self._cylindrical_inner_rim
@cylindrical_inner_rim.setter
def cylindrical_inner_rim(self, value):
if not isinstance(value, bool):
raise ValueError("cylindrical_inner_rim should be a boolean")
self._cylindrical_inner_rim = value
@property
def cylindrical_outer_rim(self):
'''
Whether the outer edge of the disk should be defined as a truncation
in cylindrical or spherical polar coordinates
'''
return self._cylindrical_outer_rim
@cylindrical_outer_rim.setter
def cylindrical_outer_rim(self, value):
if not isinstance(value, bool):
raise ValueError("cylindrical_outer_rim should be a boolean")
self._cylindrical_outer_rim = value
@property
def mdot(self):
'''accretion rate (g/s)'''
if self._mdot is not None:
return self._mdot
elif self._lvisc is None:
return None
else:
self._check_all_set()
if self.star.mass is None:
raise Exception("Stellar mass is undefined - cannot compute disk accretion rate")
mdot = self.lvisc / G / self.star.mass * 2. \
/ (3. / self.rmin - 3. / self.rmax
- 2. * np.sqrt(self.star.radius / self.rmin ** 3.)
+ 2. * np.sqrt(self.star.radius / self.rmax ** 3.))
return mdot
@mdot.setter
def mdot(self, value):
if value is not None:
validate_scalar('mdot', value, domain='positive')
if self._lvisc is not None:
logger.warning("Overriding value of lvisc with value derived from mdot")
self._lvisc = None
self._mdot = value
@property
def lvisc(self):
'''viscous accretion luminosity (ergs/s)'''
if self._lvisc is not None:
return self._lvisc
elif self._mdot is None:
return None
else:
self._check_all_set()
if self.star.mass is None:
raise Exception("Stellar mass is undefined - cannot compute disk accretion luminosity")
lvisc = G * self.star.mass * self.mdot / 2. \
* (3. / self.rmin - 3. / self.rmax
- 2. * np.sqrt(self.star.radius / self.rmin ** 3.)
+ 2. * np.sqrt(self.star.radius / self.rmax ** 3.))
return lvisc
@lvisc.setter
def lvisc(self, value):
if value is not None:
validate_scalar('lvisc', value, domain='positive')
if self._mdot is not None:
logger.warning("Overriding value of mdot with value derived from lvisc")
self._mdot = None
self._lvisc = value
@property
def star(self):
'''central star instance (needs ``mass`` and ``radius`` attributes)'''
return self._star
@star.setter
def star(self, value):
if value is None:
self._star = None
else:
try:
value.mass
except AttributeError:
raise ValueError("star should have a ``mass`` attribute")
try:
value.radius
except AttributeError:
raise ValueError("star should have a ``radius`` attribute")
self._star = value
@property
def dust(self):
'''dust properties (filename or dust object)'''
return self._dust
@dust.setter
def dust(self, value):
if isinstance(value, six.string_types):
self._dust = SphericalDust(value)
else:
self._dust = value
def __str__(self):
string = "= Alpha disk =\n"
string += " - M_disk: %.3e\n" % self.mass
string += " - R_min: %.3e\n" % self.rmin
string += " - R_min: %.3e\n" % self.rmax
string += " - p: %.3f\n" % self.p
string += " - beta: %.3f\n" % self.beta
string += " - h_0: %.3e\n" % self.h_0
string += " - r_0: %.3e\n" % self.r_0
string += " - Mdot: %.3e\n" % self.mdot
string += " - Lvisc: %.3e\n" % self.lvisc
return string
def _check_all_set(self):
if self._mass is None and self._rho_0 is None:
raise Exception("either mass or rho_0 should be set")
if self.rmin is None:
raise Exception("rmin is not set")
if self.rmax is None:
raise Exception("rmax is not set")
if self.p is None:
raise Exception("p is not set")
if self.beta is None:
raise Exception("beta is not set")
if self.h_0 is None:
raise Exception("h_0 is not set")
if self.r_0 is None:
raise Exception("r_0 is not set")
if isinstance(self.rmin, OptThinRadius):
raise Exception("Inner disk radius needs to be computed first")
if isinstance(self.rmax, OptThinRadius):
raise Exception("Outer disk radius needs to be computed first")
if self.star is None:
raise Exception("star is not set")
if self._lvisc is None and self._mdot is None:
raise Exception("either lvisc or mdot should be set")
def density(self, grid):
'''
Return the density grid
Parameters
----------
grid : :class:`~hyperion.grid.SphericalPolarGrid` or :class:`~hyperion.grid.CylindricalPolarGrid` instance.
The spherical or cylindrical polar grid object containing
information about the position of the grid cells.
Returns
-------
rho : np.ndarray
A 3-dimensional array containing the density of the disk inside
each cell. The shape of this array is the same as
``grid.shape``.
'''
self._check_all_set()
if self.rmax <= self.rmin:
logger.warning("Ignoring disk, since rmax < rmin")
return np.zeros(grid.shape)
if self.mass == 0:
return np.zeros(grid.shape)
# Find disk scaleheight at each cylindrical radius
h = self.h_0 * (grid.gw / self.r_0) ** self.beta
# Find disk density at all positions
rho = (self.r_0 / grid.gw) ** (self.beta - self.p) \
* np.exp(-0.5 * (grid.gz / h) ** 2)
# Geometrical factor
rho *= (1. - np.sqrt(self.star.radius / grid.gw))
# Truncate below rmin and above rmax
if self.cylindrical_inner_rim:
rho[grid.gw < self.rmin] = 0.
else:
rho[grid.gr < self.rmin] = 0.
if self.cylindrical_outer_rim:
rho[grid.gw > self.rmax] = 0.
else:
rho[grid.gr > self.rmax] = 0.
# Find density factor
rho *= self.rho_0
norm = self.mass / np.sum(rho * grid.volumes)
logger.info("Disk density is being re-scaled by a factor of %.2f to give the correct mass." % norm)
if norm > 1.1 or norm < 1. / 1.1:
logger.warning("Re-scaling factor is significantly different from 1, which indicates that the grid may be too coarse to properly resolve the disk.")
# Normalize to total disk mass
rho = rho * norm
return rho
def midplane_cumulative_density(self, r):
'''
Find the cumulative column density as a function of radius.
The cumulative density is measured outwards from the origin, and in
the midplane.
Parameters
----------
r : np.ndarray
Array of values of the radius up to which to tabulate the
cumulative density.
Returns
-------
rho : np.ndarray
Array of values of the cumulative density.
'''
self._check_all_set()
if self.rmax <= self.rmin:
logger.warning("Ignoring disk, since rmax < rmin")
return np.zeros(r.shape)
int1 = integrate_powerlaw(self.rmin, r.clip(self.rmin, self.rmax), self.p - self.beta)
int1 *= self.r_0 ** (self.beta - self.p)
int2 = integrate_powerlaw(self.rmin, r.clip(self.rmin, self.rmax), -0.5 + self.p - self.beta)
int2 *= self.star.radius ** 0.5 * self.r_0 ** (self.beta - self.p)
return self.rho_0 * (int1 - int2)
def _vertical_profile(self, r, theta):
self._check_all_set()
if self.rmax <= self.rmin:
logger.warning("Ignoring disk, since rmax < rmin")
return np.zeros(theta.shape)
# Convert coordinates to cylindrical polars
z = r * np.cos(theta)
w = r * np.sin(theta)
# Find disk scaleheight at each cylindrical radius
h = self.h_0 * (w / self.r_0) ** self.beta
# Find disk density at all positions
rho = (self.r_0 / w) ** (self.beta - self.p) \
* np.exp(-0.5 * (z / h) ** 2)
# Geometrical factor
rho *= (1. - np.sqrt(self.star.radius / w))
rho *= self.rho_0
# What about normalization
return rho
def vertical_cumulative_density(self, r, theta):
'''
Find the cumulative column density as a function of theta.
Parameters
----------
r : float
The spherical radius at which to calculate the cumulative density.
theta : np.ndarray
The theta values at which to tabulate the cumulative density.
Returns
-------
rho : np.ndarray
Array of values of the cumulative density.
'''
density = self._vertical_profile(r, theta)
d = r * np.radians(theta)
tau = density * d
tau[0] = 0.
return tau
def accretion_luminosity(self, grid):
'''
Return the viscous accretion luminosity grid
Parameters
----------
grid : :class:`~hyperion.grid.SphericalPolarGrid` or :class:`~hyperion.grid.CylindricalPolarGrid` instance.
The spherical or cylindrical polar grid object containing
information about the position of the grid cells.
Returns
-------
lvisc : np.ndarray
A 3-dimensional array containing the viscous accretion luminosity
of the disk inside each cell. The shape of this array is the same
as ``grid.shape``.
'''
if self.rmax <= self.rmin:
logger.warning("Ignoring disk, since rmax < rmin")
return np.zeros(grid.shape)
if self.lvisc == 0.:
return np.zeros(grid.shape)
if self.mdot == 0.:
return np.zeros(grid.shape)
self._check_all_set()
# Find disk scaleheight at each cylindrical radius
h = self.h_0 * (grid.gw / self.r_0) ** self.beta
# Find normalization constant
if self.lvisc is not None:
int1 = integrate_powerlaw(self.rmin, self.rmax, -2.0)
int2 = integrate_powerlaw(self.rmin, self.rmax, -2.5)
int2 *= self.star.radius ** 0.5
integral = (2. * pi) ** 1.5 * (int1 - int2)
lvisc0 = self.lvisc / integral
else:
lvisc0 = 3. * G * self.star.mass * self.mdot \
/ np.sqrt(32. * pi ** 3.)
# Find disk luminosity at all positions
luminosity = lvisc0 / grid.gw ** 3 / h * grid.volumes \
* (1. - np.sqrt(self.star.radius / grid.gw)) \
* np.exp(-0.5 * (grid.gz / h) ** 2)
# Truncate below rmin and above rmax
if self.cylindrical_inner_rim:
luminosity[grid.gw < self.rmin] = 0.
else:
luminosity[grid.gr < self.rmin] = 0.
if self.cylindrical_outer_rim:
luminosity[grid.gw > self.rmax] = 0.
else:
luminosity[grid.gr > self.rmax] = 0.
logger.info("Luminosity sum [actual] : %.3e" % np.sum(luminosity))
logger.info("Luminosity sum [theoretical] : %.3e" % self.lvisc)
return luminosity
def scale_height_at(self, r):
'''
Return the scaleheight of the disk at radius `r`
'''
return self.h_0 * (r / self.r_0) ** self.beta | PypiClean |
/MedPy-0.4.0.tar.gz/MedPy-0.4.0/medpy/graphcut/generate.py |
# build-in modules
import inspect
# third-party modules
import scipy
# own modules
from ..core import Logger
from .graph import GCGraph
from medpy.graphcut.energy_label import __check_label_image
def graph_from_voxels(fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
"""
Create a graph-cut ready graph to segment a nD image using the voxel neighbourhood.
Create a `~medpy.graphcut.maxflow.GraphDouble` object for all voxels of an image with a
:math:`ndim * 2` neighbourhood.
Every voxel of the image is regarded as a node. They are connected to their immediate
neighbours via arcs. If to voxels are neighbours is determined using
:math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). In the next step the arcs weights
(n-weights) are computed using the supplied ``boundary_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All voxels that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered voxels receive a maximum
(`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink.
All other t-weights are set using the supplied ``regional_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Parameters
----------
fg_markers : ndarray
The foreground markers as binary array of the same shape as the original image.
bg_markers : ndarray
The background markers as binary array of the same shape as the original image.
regional_term : function
This can be either `False`, in which case all t-weights are set to 0, except for
the nodes that are directly connected to the source or sink; or a function, in
which case the supplied function is used to compute the t_edges. It has to
have the following signature *regional_term(graph, regional_term_args)*, and is
supposed to compute (source_t_weight, sink_t_weight) for all voxels of the image
and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights
have only to be computed for nodes where they do not equal zero. Additional
parameters can be passed to the function via the ``regional_term_args`` parameter.
boundary_term : function
This can be either `False`, in which case all n-edges, i.e. between all nodes
that are not source or sink, are set to 0; or a function, in which case the
supplied function is used to compute the edge weights. It has to have the
following signature *boundary_term(graph, boundary_term_args)*, and is supposed
to compute the edges between the graphs nodes and to add them to the supplied
`~medpy.graphcut.graph.GCGraph` object. Additional parameters can be passed to
the function via the ``boundary_term_args`` parameter.
regional_term_args : tuple
Use this to pass some additional parameters to the ``regional_term`` function.
boundary_term_args : tuple
Use this to pass some additional parameters to the ``boundary_term`` function.
Returns
-------
graph : `~medpy.graphcut.maxflow.GraphDouble`
The created graph, ready to execute the graph-cut.
Raises
------
AttributeError
If an argument is malformed.
FunctionError
If one of the supplied functions returns unexpected results.
Notes
-----
If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
All arcs whose weight is not explicitly set are assumed to carry a weight of zero.
"""
# prepare logger
logger = Logger.getInstance()
# prepare result graph
logger.debug('Assuming {} nodes and {} edges for image of shape {}'.format(fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape))
graph = GCGraph(fg_markers.size, __voxel_4conectedness(fg_markers.shape))
logger.info('Performing attribute tests...')
# check, set and convert all supplied parameters
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
# set dummy functions if not supplied
if not regional_term: regional_term = __regional_term_voxel
if not boundary_term: boundary_term = __boundary_term_voxel
# check supplied functions and their signature
if not hasattr(regional_term, '__call__') or not 2 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError('regional_term has to be a callable object which takes two parameter.')
if not hasattr(boundary_term, '__call__') or not 2 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError('boundary_term has to be a callable object which takes two parameters.')
logger.debug('#nodes={}, #hardwired-nodes source/sink={}/{}'.format(fg_markers.size,
len(fg_markers.ravel().nonzero()[0]),
len(bg_markers.ravel().nonzero()[0])))
# compute the weights of all edges from the source and to the sink i.e.
# compute the weights of the t_edges Wt
logger.info('Computing and adding terminal edge weights...')
regional_term(graph, regional_term_args)
# compute the weights of the edges between the neighbouring nodes i.e.
# compute the weights of the n_edges Wr
logger.info('Computing and adding inter-node edge weights...')
boundary_term(graph, boundary_term_args)
# collect all voxels that are under the foreground resp. background markers i.e.
# collect all nodes that are connected to the source resp. sink
logger.info('Setting terminal weights for the markers...')
if not 0 == scipy.count_nonzero(fg_markers):
graph.set_source_nodes(fg_markers.ravel().nonzero()[0])
if not 0 == scipy.count_nonzero(bg_markers):
graph.set_sink_nodes(bg_markers.ravel().nonzero()[0])
return graph.get_graph()
def graph_from_labels(label_image,
fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
"""
Create a graph-cut ready graph to segment a nD image using the region neighbourhood.
Create a `~medpy.graphcut.maxflow.GraphDouble` object for all regions of a nD label
image.
Every region of the label image is regarded as a node. They are connected to their
immediate neighbours by arcs. If to regions are neighbours is determined using
:math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D).
In the next step the arcs weights (n-weights) are computed using the supplied
``boundary_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All regions that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered regions receive a
maximum (`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink.
All other t-weights are set using the supplied ``regional_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Parameters
----------
label_image: ndarray
The label image as an array cwhere each voxel carries the id of the region it
belongs to. Note that the region labels have to start from 1 and be continuous
(can be achieved with `~medpy.filter.label.relabel`).
fg_markers : ndarray
The foreground markers as binary array of the same shape as the original image.
bg_markers : ndarray
The background markers as binary array of the same shape as the original image.
regional_term : function
This can be either `False`, in which case all t-weights are set to 0, except for
the nodes that are directly connected to the source or sink; or a function, in
which case the supplied function is used to compute the t_edges. It has to
have the following signature *regional_term(graph, regional_term_args)*, and is
supposed to compute (source_t_weight, sink_t_weight) for all regions of the image
and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights
have only to be computed for nodes where they do not equal zero. Additional
parameters can be passed to the function via the ``regional_term_args`` parameter.
boundary_term : function
This can be either `False`, in which case all n-edges, i.e. between all nodes
that are not source or sink, are set to 0; or a function, in which case the
supplied function is used to compute the edge weights. It has to have the
following signature *boundary_term(graph, boundary_term_args)*, and is supposed
to compute the edges between all adjacent regions of the image and to add them
to the supplied `~medpy.graphcut.graph.GCGraph` object. Additional parameters
can be passed to the function via the ``boundary_term_args`` parameter.
regional_term_args : tuple
Use this to pass some additional parameters to the ``regional_term`` function.
boundary_term_args : tuple
Use this to pass some additional parameters to the ``boundary_term`` function.
Returns
-------
graph : `~medpy.graphcut.maxflow.GraphDouble`
The created graph, ready to execute the graph-cut.
Raises
------
AttributeError
If an argument is malformed.
FunctionError
If one of the supplied functions returns unexpected results.
Notes
-----
If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
All arcs whose weight is not explicitly set are assumed to carry a weight of zero.
"""
# prepare logger
logger = Logger.getInstance()
logger.info('Performing attribute tests...')
# check, set and convert all supplied parameters
label_image = scipy.asarray(label_image)
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
__check_label_image(label_image)
# set dummy functions if not supplied
if not regional_term: regional_term = __regional_term_label
if not boundary_term: boundary_term = __boundary_term_label
# check supplied functions and their signature
if not hasattr(regional_term, '__call__') or not 3 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError('regional_term has to be a callable object which takes three parameters.')
if not hasattr(boundary_term, '__call__') or not 3 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError('boundary_term has to be a callable object which takes three parameters.')
logger.info('Determining number of nodes and edges.')
# compute number of nodes and edges
nodes = len(scipy.unique(label_image))
# POSSIBILITY 1: guess the number of edges (in the best situation is faster but requires a little bit more memory. In the worst is slower.)
edges = 10 * nodes
logger.debug('guessed: #nodes={} nodes / #edges={}'.format(nodes, edges))
# POSSIBILITY 2: compute the edges (slow)
#edges = len(__compute_edges(label_image))
#logger.debug('computed: #nodes={} nodes / #edges={}'.format(nodes, edges))
# prepare result graph
graph = GCGraph(nodes, edges)
logger.debug('#hardwired-nodes source/sink={}/{}'.format(len(scipy.unique(label_image[fg_markers])),
len(scipy.unique(label_image[bg_markers]))))
#logger.info('Extracting the regions bounding boxes...')
# extract the bounding boxes
#bounding_boxes = find_objects(label_image)
# compute the weights of all edges from the source and to the sink i.e.
# compute the weights of the t_edges Wt
logger.info('Computing and adding terminal edge weights...')
#regions = set(graph.get_nodes()) - set(graph.get_source_nodes()) - set(graph.get_sink_nodes())
regional_term(graph, label_image, regional_term_args) # bounding boxes indexed from 0 # old version: regional_term(graph, label_image, regions, bounding_boxes, regional_term_args)
# compute the weights of the edges between the neighbouring nodes i.e.
# compute the weights of the n_edges Wr
logger.info('Computing and adding inter-node edge weights...')
boundary_term(graph, label_image, boundary_term_args)
# collect all regions that are under the foreground resp. background markers i.e.
# collect all nodes that are connected to the source resp. sink
logger.info('Setting terminal weights for the markers...')
graph.set_source_nodes(scipy.unique(label_image[fg_markers] - 1)) # requires -1 to adapt to node id system
graph.set_sink_nodes(scipy.unique(label_image[bg_markers] - 1))
return graph.get_graph()
def __regional_term_voxel(graph, regional_term_args):
"""Fake regional_term function with the appropriate signature."""
return {}
def __regional_term_label(graph, label_image, regional_term_args):
"""Fake regional_term function with the appropriate signature."""
return {}
def __boundary_term_voxel(graph, boundary_term_args):
"""Fake regional_term function with the appropriate signature."""
# supplying no boundary term contradicts the whole graph cut idea.
return {}
def __boundary_term_label(graph, label_image, boundary_term_args):
"""Fake regional_term function with the appropriate signature."""
# supplying no boundary term contradicts the whole graph cut idea.
return {}
def __voxel_4conectedness(shape):
"""
Returns the number of edges for the supplied image shape assuming 4-connectedness.
The name of the function has historical reasons. Essentially it returns the number
of edges assuming 4-connectedness only for 2D. For 3D it assumes 6-connectedness,
etc.
@param shape the shape of the image
@type shape sequence
@return the number of edges
@rtype int
"""
shape = list(shape)
while 1 in shape: shape.remove(1) # empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array)
return int(round(sum([(dim - 1)/float(dim) for dim in shape]) * scipy.prod(shape))) | PypiClean |
/GeobricksProj4ToEPSG-0.0.15.tar.gz/GeobricksProj4ToEPSG-0.0.15/geobricks_proj4_to_epsg/utils/epsg_json_file.py | import requests
import json
from geobricks_proj4_to_epsg.core.proj4_to_epsg import get_proj4_json_from_string
# TODO: @Deprecated
# dirty methods to get epsg/proj4 codes
epsg_json = []
cached_epsg_codes = []
def create_epsg_json_file():
with open("../data/epsg.json", "r") as f:
projection_list = json.load(f)
for p in projection_list:
if p["epsg"] not in cached_epsg_codes:
cached_epsg_codes.append(p["epsg"])
print p["epsg"]
proj4_text = get_proj4_from_spatialreference(p["epsg"])
print proj4_text
data = get_proj4_epsg_json(p["epsg"], proj4_text)
if data is not None:
epsg_json.append(data)
print "----"
# print epsg_json
write_json_file(epsg_json)
def get_proj4_from_spatialreference(epsg):
r = requests.get("http://spatialreference.org/ref/epsg/"+ str(epsg) +"/proj4/")
return r.text
def get_proj4_epsg_json(epsg, proj4_text):
if "Not found" in proj4_text:
return None
return {
"epsg": epsg,
"proj4": get_proj4_json_from_string(proj4_text)
}
def write_json_file(json_data):
with open('../data/epsg.json', 'w') as outfile:
print "wrinting file"
print json_data
json.dump(json_data, outfile)
# this method clean the json produced from create_epsg_json_file(). There are no valid data
# return from the web service
def _clean_epsg_json_data():
epsg_json_data = []
with open("epsg.json", "r") as f:
projection_list = json.load(f)
for p in projection_list:
if p is not None and "proj" in p["proj4"]:
epsg_json_data.append(p)
write_json_file(epsg_json_data)
# spatialref doesn't have 3857 and 900913...
def _add_google_mercator_epsg_codes():
epsg_json_data = []
with open("../data/epsg_original.json", "r") as f:
projection_list = json.load(f)
projection_list.append({ 'epsg': '3857', 'proj4': get_proj4_json_from_string('+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +a=6378137 +b=6378137 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs')})
projection_list.append({ 'epsg': '900913', 'proj4' : get_proj4_json_from_string('+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +a=6378137 +b=6378137 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs')})
write_json_file(projection_list)
# methods to run
#create_epsg_json_file()
#_clean_epsg_json_data()
#_add_google_mercator_epsg_codes()
# print get_proj4_from_spatialreference(3857) | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/forms/boundfield.py | import re
from django.core.exceptions import ValidationError
from django.forms.utils import pretty_name
from django.forms.widgets import MultiWidget, Textarea, TextInput
from django.utils.functional import cached_property
from django.utils.html import format_html, html_safe
from django.utils.translation import gettext_lazy as _
__all__ = ("BoundField",)
@html_safe
class BoundField:
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ""
def __str__(self):
"""Render this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
@cached_property
def subwidgets(self):
"""
Most widgets yield a single subwidget, but others like RadioSelect and
CheckboxSelectMultiple produce one subwidget for each choice.
This property is cached so that only one database query occurs when
rendering ModelChoiceFields.
"""
id_ = self.field.widget.attrs.get("id") or self.auto_id
attrs = {"id": id_} if id_ else {}
attrs = self.build_widget_attrs(attrs)
return [
BoundWidget(self.field.widget, widget, self.form.renderer)
for widget in self.field.widget.subwidgets(
self.html_name, self.value(), attrs=attrs
)
]
def __bool__(self):
# BoundField evaluates to True even if it doesn't have subwidgets.
return True
def __iter__(self):
return iter(self.subwidgets)
def __len__(self):
return len(self.subwidgets)
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, (int, slice)):
raise TypeError(
"BoundField indices must be integers or slices, not %s."
% type(idx).__name__
)
return self.subwidgets[idx]
@property
def errors(self):
"""
Return an ErrorList (empty if there are no errors) for this field.
"""
return self.form.errors.get(
self.name, self.form.error_class(renderer=self.form.renderer)
)
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Render the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If a widget isn't specified, use the
field's default widget.
"""
widget = widget or self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
attrs = self.build_widget_attrs(attrs, widget)
if self.auto_id and "id" not in widget.attrs:
attrs.setdefault(
"id", self.html_initial_id if only_initial else self.auto_id
)
if only_initial and self.html_initial_name in self.form.data:
# Propagate the hidden initial value.
value = self.form._widget_data_value(
self.field.hidden_widget(),
self.html_initial_name,
)
else:
value = self.value()
return widget.render(
name=self.html_initial_name if only_initial else self.html_name,
value=value,
attrs=attrs,
renderer=self.form.renderer,
)
def as_text(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"""Return a string of HTML for representing this as a <textarea>."""
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Return the data for this BoundField, or None if it wasn't given.
"""
return self.form._widget_data_value(self.field.widget, self.html_name)
def value(self):
"""
Return the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
data = self.initial
if self.form.is_bound:
data = self.field.bound_data(self.data, data)
return self.field.prepare_value(data)
def _has_changed(self):
field = self.field
if field.show_hidden_initial:
hidden_widget = field.hidden_widget()
initial_value = self.form._widget_data_value(
hidden_widget,
self.html_initial_name,
)
try:
initial_value = field.to_python(initial_value)
except ValidationError:
# Always assume data has changed if validation fails.
return True
else:
initial_value = self.initial
return field.has_changed(initial_value, self.data)
def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None):
"""
Wrap the given contents in a <label>, if the field has an ID attribute.
contents should be mark_safe'd to avoid HTML escaping. If contents
aren't given, use the field's HTML-escaped label.
If attrs are given, use them as HTML attributes on the <label> tag.
label_suffix overrides the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (
self.field.label_suffix
if self.field.label_suffix is not None
else self.form.label_suffix
)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(":?.!"):
contents = format_html("{}{}", contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get("id") or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = {**(attrs or {}), "for": id_for_label}
if self.field.required and hasattr(self.form, "required_css_class"):
attrs = attrs or {}
if "class" in attrs:
attrs["class"] += " " + self.form.required_css_class
else:
attrs["class"] = self.form.required_css_class
context = {
"field": self,
"label": contents,
"attrs": attrs,
"use_tag": bool(id_),
"tag": tag or "label",
}
return self.form.render(self.form.template_name_label, context)
def legend_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wrap the given contents in a <legend>, if the field has an ID
attribute. Contents should be mark_safe'd to avoid HTML escaping. If
contents aren't given, use the field's HTML-escaped label.
If attrs are given, use them as HTML attributes on the <legend> tag.
label_suffix overrides the form's label_suffix.
"""
return self.label_tag(contents, attrs, label_suffix, tag="legend")
def css_classes(self, extra_classes=None):
"""
Return a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, "split"):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, "error_css_class"):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, "required_css_class"):
extra_classes.add(self.form.required_css_class)
return " ".join(extra_classes)
@property
def is_hidden(self):
"""Return True if this BoundField's widget is hidden."""
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculate and return the ID attribute for this BoundField, if the
associated Form has specified auto_id. Return an empty string otherwise.
"""
auto_id = self.form.auto_id # Boolean or string
if auto_id and "%s" in str(auto_id):
return auto_id % self.html_name
elif auto_id:
return self.html_name
return ""
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get("id") or self.auto_id
return widget.id_for_label(id_)
@cached_property
def initial(self):
return self.form.get_initial_for_field(self.field, self.name)
def build_widget_attrs(self, attrs, widget=None):
widget = widget or self.field.widget
attrs = dict(attrs) # Copy attrs to avoid modifying the argument.
if (
widget.use_required_attribute(self.initial)
and self.field.required
and self.form.use_required_attribute
):
# MultiValueField has require_all_fields: if False, fall back
# on subfields.
if (
hasattr(self.field, "require_all_fields")
and not self.field.require_all_fields
and isinstance(self.field.widget, MultiWidget)
):
for subfield, subwidget in zip(self.field.fields, widget.widgets):
subwidget.attrs["required"] = (
subwidget.use_required_attribute(self.initial)
and subfield.required
)
else:
attrs["required"] = True
if self.field.disabled:
attrs["disabled"] = True
return attrs
@property
def widget_type(self):
return re.sub(
r"widget$|input$", "", self.field.widget.__class__.__name__.lower()
)
@property
def use_fieldset(self):
"""
Return the value of this BoundField widget's use_fieldset attribute.
"""
return self.field.widget.use_fieldset
@html_safe
class BoundWidget:
"""
A container class used for iterating over widgets. This is useful for
widgets that have choices. For example, the following can be used in a
template:
{% for radio in myform.beatles %}
<label for="{{ radio.id_for_label }}">
{{ radio.choice_label }}
<span class="radio">{{ radio.tag }}</span>
</label>
{% endfor %}
"""
def __init__(self, parent_widget, data, renderer):
self.parent_widget = parent_widget
self.data = data
self.renderer = renderer
def __str__(self):
return self.tag(wrap_label=True)
def tag(self, wrap_label=False):
context = {"widget": {**self.data, "wrap_label": wrap_label}}
return self.parent_widget._render(self.template_name, context, self.renderer)
@property
def template_name(self):
if "template_name" in self.data:
return self.data["template_name"]
return self.parent_widget.template_name
@property
def id_for_label(self):
return self.data["attrs"].get("id")
@property
def choice_label(self):
return self.data["label"] | PypiClean |
/GQCMS-0.0.4-py3-none-any.whl/build/lib/build/lib/gqcms/matrices/Determinant.py | from itertools import combinations, product
import numpy as np
class Determinant:
"""
This class can be used to generate ONVs from a list of occupied orbitals
and generate all n-tuply exited determinants
:param alpha_occ: list of occupied alpha orbitals
:param beta_occ: list of occupied beta orbitals
:param alpha_onv: bit string representation of occupied alpha orbitals
:param beta_onv: bit string respresentation of occupied beta orbitals
"""
def __init__(self, alpha_occ=[], beta_occ=[], nalpha=None, nbeta=None, sites=None):
"""
:param nalpha: number of alpha electrons (default is None)
:param nbeta: number of beta electrons (default is None)
:param alpha_occ: list of occupied alpha orbitals indices
e.g. [0, 1] means orbitals 0 and 1 are occupied
:param beta_occ: list of occupied beta orbitals indices
:param sites: the system size, if None, then half filling is assumed
"""
if nalpha is not None and nbeta is not None:
self._alpha_occ = list(range(nalpha))
self._beta_occ = list(range(nbeta))
else:
self._alpha_occ = list(alpha_occ)
self._beta_occ = list(beta_occ)
self._nalpha = len(self._alpha_occ)
self._nbeta = len(self._beta_occ)
self._sites = self._nalpha + self._nbeta if sites is None else sites
# Convert occupancy list to bit string
self._alpha_onv = Determinant.orbitals_to_onv(self._alpha_occ)
self._beta_onv = Determinant.orbitals_to_onv(self._beta_occ)
self.excitations_list = []
def __eq__(self, other):
return self._alpha_onv == other.alpha_onv and self._beta_onv == other.beta_onv
def __hash__(self) -> int:
return hash((self._alpha_onv, self._beta_onv))
def __str__(self):
"""
Print a representation of the Determinant
"""
output = ""
# Loop through all sites
for site in range(self._sites):
if site in self._alpha_occ:
# If site is occupied by an alpha electron, add an up arrow
output += "\u25B2"
else:
# If no alpha electron is on this site, add _
output += "_"
if site in self._beta_occ:
# If site is occupied by a beta electron, add a down arrow
output += "\u25BC"
else:
# If no beta electron is on this site, add _
output += "_"
# Add spacing between sites
output += "\t"
return output
# return "|" + str(self._alpha_occ) + str(self._beta_occ) + ">"
@property
def alpha_onv(self):
return self._alpha_onv
@property
def beta_onv(self):
return self._beta_onv
@property
def alpha_occ(self):
return np.asarray(self._alpha_occ)
@property
def beta_occ(self):
return np.asarray(self._beta_occ)
@property
def nalpha(self):
return self._nalpha
@property
def nbeta(self):
return self._nbeta
@property
def nelectrons(self):
return self._nalpha + self._nbeta
@property
def sites(self):
return self._sites
@staticmethod
def orbitals_to_onv(occ_list: list) -> list:
"""
Convert a list of occupied orbitals to bit string
e.g. [0, 1] -> 11
:param occ_list: list of occupied orbitals
"""
# Return 0 if occ_list is empty
if not occ_list:
return 0
# Sort list and reverse it, because the highest occupied orbital
# is the left most bit
occ_list = sorted(occ_list, reverse=True)
bitstring = 0
prev_occ_orbital = occ_list[0]
for occ_orbital in occ_list:
# Leftshift the bit string so that the last bit corresponds to
# the current occupied orbital
bitstring <<= prev_occ_orbital - occ_orbital
# Set last bit on 1
bitstring |= 1
# Store current orbital
prev_occ_orbital = occ_orbital
# Leftshift so that the last orbital corresponds to orbital 1
bitstring <<= prev_occ_orbital
return bitstring
@staticmethod
def onv_to_orbitals(bitstring) -> list:
"""
Return a list with the occupied orbitals indices
:param bitstring: bitstring representation of determinant
"""
occ_list = []
index = 0
# Loop until all bits have been seen
while bitstring != 0:
# If the right most bit is 1, this orbital is occupied
if bitstring & 1 == 1:
occ_list.append(index)
# Remove the right most bit
bitstring >>= 1
index += 1
return occ_list
@staticmethod
def to_spin_orbitals(alpha_occ, beta_occ):
"""
Returns a list of spin orbitals where even orbitals are alpha and
odd orbitals are beta
:param alpha_occ: list of occupied alpha orbitals
:param beta_occ: list of occupied beta orbitals
"""
return [i * 2 for i in alpha_occ] + [i * 2 + 1 for i in beta_occ]
@staticmethod
def get_unoccupied_orbitals(bitstring, nmo: int) -> list:
"""
Return a list with the unoccupied orbitals indices
:param bitstring: bit string representation of a determinant
:param nmo: number of molecular orbitals
"""
# Negate the bit string, now 1 corresponds to unoccupied orbital
bitstring = ~bitstring
unocc_list = []
# Loop through all orbital indices
for i in range(nmo):
# If the last bit is 1, this orbital is unoccupied add it to
# the list
if bitstring & 1 == 1:
unocc_list.append(i)
# Remove the right most bit
bitstring >>= 1
return unocc_list
@staticmethod
def _num_different_orbitals(onv1, onv2) -> int:
"""
Counts how many orbitals are different occupied between onv1 and
onv2
"""
different_bits = onv1 ^ onv2
count = 0
while different_bits != 0:
if different_bits & 1 == 1:
count += 1
different_bits >>= 1
# If one electron is in another orbital, two bits will be different
return count / 2
@staticmethod
def get_position(ref_list, occ_list):
"""
Returns the indices from the values in occ_list in ref_list
Used to find the position of an orbital in the determinant
"""
positions = []
for index, i in enumerate(sorted(ref_list)):
if i in occ_list:
positions.append(index)
return positions
@staticmethod
def _different_bits_to_orbitals(onv1, onv2):
"""
Returns a list of orbitals that are different between onv1 and onv2
:param onv1: bit string representation of a determinant
:param onv2: bit string representation of a determinant
"""
common_bits = onv1 & onv2
# Find the different bits and convert to orbital indices
different_orb_1 = Determinant.onv_to_orbitals(onv1 ^ common_bits)
different_orb_2 = Determinant.onv_to_orbitals(onv2 ^ common_bits)
return different_orb_1, different_orb_2
def get_spin_orbitals(self):
"""
Returns a list of spin orbitals where even orbitals are alpha and
odd orbitals are beta
"""
return [i * 2 for i in self._alpha_occ] + [i * 2 + 1 for i in self._beta_occ]
def copy(self):
"""
Creates a copy of itself
"""
return Determinant(self._alpha_occ, self._beta_occ, sites=self._sites)
def remove_alpha_orbital(self, orbital_index: int):
"""
Removes the alpha orbital at orbital_index. This is equivalent to
setting the bit at index 'orbital_index' to 0
:param orbital_index: orbital to remove
"""
# If we want to destroy an electron that is not occupied the
# resulting wave function is 0
if orbital_index not in self._alpha_occ:
self._alpha_onv = 0
self._beta_onv = 0
else:
# Create a bit string with at index 'orbital_index' a 1
# Use XOR to set the bit at index 'orbital_index' in alpha_onv
# to 0 and don't change any other bit
self._alpha_onv ^= 1 << orbital_index
# Update occupied list
self._alpha_occ = Determinant.onv_to_orbitals(self._alpha_onv)
self._beta_occ = Determinant.onv_to_orbitals(self._beta_onv)
def remove_beta_orbital(self, orbital_index: int):
"""
Removes the beta orbital at orbital_index. This is equivalent to
setting the bit at index 'orbital_index' to 0
:param orbital_index: orbital to remove
"""
# If we want to destroy an electron that is not occupied the
# resulting wave function is 0
if orbital_index not in self._beta_occ:
self._beta_onv = 0
self._alpha_onv = 0
else:
# Create a bit string where the bit at index 'orbital_index' is 1
# Use bitwise XOR to set the bit at index 'orbital_index' in
# beta_onv to 0 and don't change any other bit
self._beta_onv ^= 1 << orbital_index
# Update occupied list
self._alpha_occ = Determinant.onv_to_orbitals(self._alpha_onv)
self._beta_occ = Determinant.onv_to_orbitals(self._beta_onv)
def add_alpha_orbital(self, orbital_index: int):
"""
Add an alpha orbital at orbital_index. This is equivalent to setting
the bit at index 'orbital_index' to 1
:param orbital_index: orbital to create
"""
# If orbital is already occupied kill the wave function
if orbital_index in self._alpha_occ:
self._alpha_onv = 0
self._beta_onv = 0
else:
# Create a bit string where the bit at index 'orbital_index' is 1
# Use OR to set the bit at index 'orbital_index' in alpha_onv to 1
# and don't change any other bit
self._alpha_onv |= 1 << orbital_index
# Update occupied list
self._alpha_occ = Determinant.onv_to_orbitals(self._alpha_onv)
self._beta_occ = Determinant.onv_to_orbitals(self._beta_onv)
def add_beta_orbital(self, orbital_index: int):
"""
Add an alpha orbital at orbital_index. This is equivalent to setting
the bit at index 'orbital_index' to 1
:param orbital_index: orbital to create
"""
# If orbital is already occupied kill the wave function
if orbital_index in self._beta_occ:
self._alpha_onv = 0
self._beta_onv = 0
else:
# Create a bit string where the bit at index 'orbital_index' is 1
# Use OR to set the bit at index 'orbital_index' in beta_onv to 1
# and don't change any other bit
self._beta_onv |= 1 << orbital_index
# Update occupied list
self._alpha_occ = Determinant.onv_to_orbitals(self._alpha_onv)
self._beta_occ = Determinant.onv_to_orbitals(self._beta_onv)
def n_tuply_excitations(self, n: int, nmo: int, triplets=False) -> list:
"""
Returns a list of all n-tuply excited determinants
:param n: number of excitations
:param nmo: number of sites
"""
# Return determinant if no excitations are asked
if n == 0:
return [self]
alpha_unocc = Determinant.get_unoccupied_orbitals(self._alpha_onv, nmo)
beta_unocc = Determinant.get_unoccupied_orbitals(self._beta_onv, nmo)
determinants = []
# Create all possible combinations with n elements from the
# alpha_occ orbitals to eleminate the use of a prefactor
for bs in combinations(self._alpha_occ, n):
for rs in combinations(alpha_unocc, n):
det = self.copy()
# Remove orbitals
for b in bs:
det.remove_alpha_orbital(b)
# Add orbitals
for r in rs:
det.add_alpha_orbital(r)
determinants.append(det)
for bs in combinations(self._beta_occ, n):
for rs in combinations(beta_unocc, n):
det = self.copy()
# Remove orbitals
for b in bs:
det.remove_beta_orbital(b)
# Add orbitals
for r in rs:
det.add_beta_orbital(r)
determinants.append(det)
# Triplet excitations
if triplets:
# Excite alpha to beta
for bs in combinations(self._alpha_occ, n):
for rs in combinations(beta_unocc, n):
det = self.copy()
# Remove orbitals
for b in bs:
det.remove_alpha_orbital(b)
# Add orbitals
for r in rs:
det.add_beta_orbital(r)
determinants.append(det)
# Excite beta to alpha
for bs in combinations(self._beta_occ, n):
for rs in combinations(alpha_unocc, n):
det = self.copy()
# Remove orbitals
for b in bs:
det.remove_beta_orbital(b)
# Add orbitals
for r in rs:
det.add_alpha_orbital(r)
determinants.append(det)
# Excite alpha and beta
# for i in range(n):
# for alpha_a in combinations(self._alpha_occ, i):
# for alpha_r in combinations(alpha_unocc, i):
# for j in range(n):
# self.excitations_list.append((i, j))
# for beta_b in combinations(self._beta_occ, j):
# for beta_s in combinations(beta_unocc, j):
# det = self.copy()
# # Remove alpha orbitals
# for a in alpha_a:
# det.remove_alpha_orbital(a)
# # Remove beta orbitals
# for b in beta_b:
# det.remove_beta_orbital(b)
# # Create alpha orbitals
# for r in alpha_r:
# det.add_alpha_orbital(r)
# # Create beta orbitals
# for s in beta_s:
# det.add_beta_orbital(s)
# determinants.append(det)
for n_beta in range(1, n):
alpha_excitations_origin = list(combinations(self._alpha_occ, n-n_beta))
beta_excitations_origin = list(combinations(self._beta_occ, n_beta))
alpha_excitations_end = list(combinations(alpha_unocc, n-n_beta))
beta_excitations_end = list(combinations(beta_unocc, n_beta))
for i in product(alpha_excitations_origin, beta_excitations_origin):
alpha_i, beta_i = i
for a in product(alpha_excitations_end, beta_excitations_end):
alpha_a, beta_a = a
det = self.copy()
# Remove alpha orbitals
for i in alpha_i:
det.remove_alpha_orbital(i)
# Remove beta orbitals
for i in beta_i:
det.remove_beta_orbital(i)
# Create alpha orbitals
for a in alpha_a:
det.add_alpha_orbital(a)
# Create beta orbitals
for a in beta_a:
det.add_beta_orbital(a)
determinants.append(det)
return determinants
def single_excitations(self, nmo: int) -> list:
"""
Returns a list of all singly excited determinants
:param nmo: number of molecular orbitals
"""
return self.n_tuply_excitations(1, nmo)
def single_and_double_excitations(self, nmo) -> list:
"""
Returns a list of all singly and doubly excited Determinant
:param nmo: number of molecular orbitals
"""
return self.n_tuply_excitations(1, nmo) + self.n_tuply_excitations(2, nmo)
def single_double_and_triple_excitations(self, nmo) -> list:
"""
Returns a list of all singly, doubly and triple excited Determinant
:param nmo: number of molecular orbitals
"""
return (
self.n_tuply_excitations(1, nmo)
+ self.n_tuply_excitations(2, nmo)
+ self.n_tuply_excitations(3, nmo)
)
def all_excitations(self, nmo: int) -> list:
"""
Returns a list of all possible excited Determinant
:param nmo: number of molecular orbitals
"""
# pool = multiprocessing.Pool(multiprocessing.cpu_count())
determinants = [self]
# The maximum number of excitation is limited by the number of
# electrons. The length of the occupied indices list gives the
# number of electrons (N).
for n in range(1, len(self._alpha_occ) + len(self._beta_occ) + 1):
det = self.n_tuply_excitations(n, nmo, triplets=False)
determinants.extend(det)
# return determinants
return list(set(determinants))
def num_different_orbitals(self, other) -> int:
"""
Returns a number of how many orbitals are different between
the two slater Determinant
:param other: other Determinant to compare
"""
return Determinant._num_different_orbitals(
self._alpha_onv, other.alpha_onv
) + Determinant._num_different_orbitals(self._beta_onv, other.beta_onv)
def get_common_orbitals(self, other) -> list:
"""
Returns a list of common occupied orbitals
:param other: a Determinant to compare to
"""
# Create a new bit string with a 1 on places where both old ONVs
# are occupied
common_onv_alpha = self._alpha_onv & other._alpha_onv
common_onv_beta = self._beta_onv & other._beta_onv
# Convert bistring to list
common_list_alpha = Determinant.onv_to_orbitals(common_onv_alpha)
common_list_beta = Determinant.onv_to_orbitals(common_onv_beta)
# returns the mixed list
return Determinant.to_spin_orbitals(common_list_alpha, common_list_beta)
def get_doubly_occupied_orbitals(self) -> list:
"""
Returns a list of doubly occupied orbitals
"""
return Determinant.onv_to_orbitals(self._alpha_onv & self._beta_onv)
def get_sign(self, alpha_orbitals=[], beta_orbitals=[]):
"""
Returns the sign resulting from orbital rotation
:param alpha_orbitals: alpha orbitals rotated to the front
:param beta_orbitals: beta orbitals rotated to the front
"""
# Find where the orbitals are in the determinant
alpha_pos = Determinant.get_position(self._alpha_occ, alpha_orbitals)
beta_pos = Determinant.get_position(self._beta_occ, beta_orbitals)
sign = 1
# Compute how many rotations needs to be done
for i in range(len(alpha_pos)):
# The number of rotations are reduced by the index because
# there are already i orbitals move to the front.
# If the number of rotations is odd, multiply the sign by -1
if (alpha_pos[i] - i) % 2 == 1:
sign *= -1
# Same is done for beta
for i in range(len(beta_pos)):
if (beta_pos[i] - i) % 2 == 1:
sign *= -1
return sign
def get_sign_spin_orbital(self, orbital):
pos = Determinant.get_position(self.get_spin_orbitals(), [orbital])
return 1 if pos[0] % 2 == 0 else -1
def get_different_orbitals(self, other):
"""
Returns a list of orbitals that are different between onv1 and onv2
"""
# Get list of orbitals that are different between the determinants
diff_alpha_1, diff_alpha_2 = Determinant._different_bits_to_orbitals(
self._alpha_onv, other.alpha_onv
)
diff_beta_1, diff_beta_2 = Determinant._different_bits_to_orbitals(
self._beta_onv, other.beta_onv
)
# Compute sign for orbital rotation
sign_1 = self.get_sign(diff_alpha_1, diff_beta_1)
sign_2 = other.get_sign(diff_alpha_2, diff_beta_2)
# Convert orbital indices to spin orbitals list
spin_orb_1 = Determinant.to_spin_orbitals(diff_alpha_1, diff_beta_1)
spin_orb_2 = Determinant.to_spin_orbitals(diff_alpha_2, diff_beta_2)
return spin_orb_1, spin_orb_2, sign_1 * sign_2 | PypiClean |
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/docs/reference/bibliography.rst | .. _bibliography:
Bibliography
============
This is a collection of various bibliographic items referenced in the
documentation.
.. [Adami2013] Adami C and Hintze A. (2013) Evolutionary instability of zero-determinant strategies demonstrates that winning is not everything. Nature communications. https://www.nature.com/articles/ncomms3193
.. [Akin2015] Akin, Ethan. "What you gotta know to play good in the Iterated Prisoner’s Dilemma." Games 6.3 (2015): 175-190.
.. [Amaral2016] Amaral, M. A., Wardil, L., Perc, M., & Da Silva, J. K. L. (2016). Stochastic win-stay-lose-shift strategy with dynamic aspirations in evolutionary social dilemmas. Physical Review E - Statistical, Nonlinear, and Soft Matter Physics, 94(3), 1–9. https://doi.org/10.1103/PhysRevE.94.032317
.. [Andre2013] Andre L. C., Honovan P., Felipe T. and Frederico G. (2013). Iterated Prisoner’s Dilemma - An extended analysis, http://abricom.org.br/wp-content/uploads/2016/03/bricsccicbic2013_submission_202.pdf
.. [Ashlock2006] Ashlock, D., & Kim E. Y, & Leahy, N. (2006). Understanding Representational Sensitivity in the Iterated Prisoner’s Dilemma with Fingerprints. IEEE Transactions On Systems, Man, And Cybernetics, Part C: Applications And Reviews, 36 (4)
.. [Ashlock2006b] Ashlock, W. & Ashlock, D. (2006). Changes in Prisoner's Dilemma Strategies Over Evolutionary Time With Different Population Sizes 2006 IEEE International Conference on Evolutionary Computation. http://DOI.org/10.1109/CEC.2006.1688322
.. [Ashlock2008] Ashlock, D., & Kim, E. Y. (2008). Fingerprinting: Visualization and automatic analysis of prisoner’s dilemma strategies. IEEE Transactions on Evolutionary Computation, 12(5), 647–659. http://doi.org/10.1109/TEVC.2008.920675
.. [Ashlock2009] Ashlock, D., Kim, E. Y., & Ashlock, W. (2009) Fingerprint analysis of the noisy prisoner’s dilemma using a finite-state representation. IEEE Transactions on Computational Intelligence and AI in Games. 1(2), 154-167 http://doi.org/10.1109/TCIAIG.2009.2018704
.. [Ashlock2014] Ashlock, W., Tsang, J. & Ashlock, D. (2014) The evolution of exploitation. 2014 IEEE Symposium on Foundations of Computational Intelligence (FOCI) http://DOI.org/10.1109/FOCI.2014.7007818
.. [Ashlock2015] Ashlock, D., Brown, J.A., & Hingston P. (2015). Multiple Opponent Optimization of Prisoner’s Dilemma Playing Agents. Multiple Opponent Optimization of Prisoner’s Dilemma Playing Agents http://DOI.org/10.1109/TCIAIG.2014.2326012
.. [Au2006] Au, T.-C. and Nau, D. S. (2006) Accident or intention: That is the question (in the iterated prisoner’s dilemma). In Proc. Int. Conf. Auton. Agents and Multiagent Syst. (AAMAS), pp. 561–568. http://www.cs.umd.edu/~nau/papers/au2006accident.pdf
.. [Axelrod1980] Axelrod, R. (1980). Effective Choice in the Prisoner’s Dilemma. Journal of Conflict Resolution, 24(1), 3–25.
.. [Axelrod1980b] Axelrod, R. (1980). More Effective Choice in the Prisoner’s Dilemma. Journal of Conflict Resolution, 24(3), 379-403.
.. [Axelrod1984] The Evolution of Cooperation. Basic Books. ISBN 0-465-02121-2.
.. [Axelrod1995] Wu, J. and Axelrod, R. (1995). How to cope with noise in the Iterated prisoner’s dilemma, Journal of Conflict Resolution, 39(1), pp. 183–189. doi: 10.1177/0022002795039001008.
.. [Banks1990] Banks, J. S., & Sundaram, R. K. (1990). Repeated games, finite automata, and complexity. Games and Economic Behavior, 2(2), 97–117. http://doi.org/10.1016/0899-8256(90)90024-O
.. [Bendor1993] Bendor, Jonathan. "Uncertainty and the Evolution of Cooperation." The Journal of Conflict Resolution, 37(4), 709–734.
.. [Beaufils1997] Beaufils, B. & Delahaye, J. & Mathieu, P. (1997). Our Meeting With Gradual: A Good Strategy For The Iterated Prisoner’s Dilemma. http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.4041
.. [Berg2015] Berg, P. Van Den, & Weissing, F. J. (2015). The importance of mechanisms for the evolution of cooperation. Proceedings of the Royal Society B-Biological Sciences, 282.
.. [CRISTAL-SMAC2018] CRISTAL Lab, SMAC Team, Lille University (2018). IPD : the Iterated Prisoner's Dilemma. https://github.com/cristal-smac/ipd
.. [Downing1975] Downing, Leslie L. "The Prisoner's Dilemma game as a problem-solving phenomenon: An outcome maximization interpretation." Simulation & Games 6.4 (1975): 366-391.
.. [Eckhart2015] Eckhart Arnold (2016) CoopSim v0.9.9 beta 6. https://github.com/jecki/CoopSim/
.. [Frean1994] Frean, Marcus R. "The Prisoner's Dilemma without Synchrony." Proceedings: Biological Sciences, vol. 257, no. 1348, 1994, pp. 75–79. www.jstor.org/stable/50253.
.. [Harper2017] Harper, M., Knight, V., Jones, M., Koutsovoulos, G., Glynatsi, N. E., & Campbell, O. (2017) Reinforcement learning produces dominant strategies for the Iterated Prisoner’s Dilemma. PloS one. https://doi.org/10.1371/journal.pone.0188046
.. [Hauert2002] Hauert, Christoph, and Olaf Stenull. "Simple adaptive strategy wins the prisoner's dilemma." Journal of Theoretical Biology 218.3 (2002): 261-272.
.. [Hilbe2013] Hilbe, C., Nowak, M.A. and Traulsen, A. (2013). Adaptive dynamics of extortion and compliance, PLoS ONE, 8(11), p. e77886. doi: 10.1371/journal.pone.0077886.
.. [Hilbe2017] Hilbe, C., Martinez-Vaquero, L. A., Chatterjee K., Nowak M. A. (2017). Memory-n strategies of direct reciprocity, Proceedings of the National Academy of Sciences May 2017, 114 (18) 4715-4720; doi: 10.1073/pnas.1621239114.
.. [Kuhn2017] Kuhn, Steven, "Prisoner's Dilemma", The Stanford Encyclopedia of Philosophy (Spring 2017 Edition), Edward N. Zalta (ed.), https://plato.stanford.edu/archives/spr2017/entries/prisoner-dilemma/
.. [Kraines1989] Kraines, David, and Vivian Kraines. "Pavlov and the prisoner's dilemma." Theory and decision 26.1 (1989): 47-79. doi:10.1007/BF00134056
.. [Krapohl2020] Krapohl, S., Ocelík, V. & Walentek, D.M. The instability of globalization: applying evolutionary game theory to global trade cooperation. Public Choice 188, 31–51 (2021). https://doi.org/10.1007/s11127-020-00799-1
.. [LessWrong2011] Zoo of Strategies (2011) LessWrong. Available at: http://lesswrong.com/lw/7f2/prisoners_dilemma_tournament_results/
.. [Li2007] Li, J, How to Design a Strategy to Win an IPD Tournament, in Kendall G., Yao X. and Chong S. (eds.) The iterated prisoner’s dilemma: 20 years on. World Scientific, chapter 4, pp. 29-40, 2007.
.. [Li2009] Li, J. & Kendall, G. (2009). A Strategy with Novel Evolutionary Features for the Iterated Prisoner’s Dilemma. Evolutionary Computation 17(2): 257–274.
.. [Li2011] Li, J., Hingston, P., Member, S., & Kendall, G. (2011). Engineering Design of Strategies for Winning Iterated Prisoner ’ s Dilemma Competitions, 3(4), 348–360.
.. [Li2014] Li, J. and Kendall, G. (2014). The Effect of Memory Size on the Evolutionary Stability of Strategies in Iterated Prisoner's Dilemma. IEEE Transactions on Evolutionary Computation, 18(6) 819-826
.. [LiS2014] Li, Siwei. (2014). Strategies in the Stochastic Iterated Prisoner's Dilemma. Available at: http://math.uchicago.edu/~may/REU2014/REUPapers/Li,Siwei.pdf
.. [Luis2008] Luis R. Izquierdo and Segismundo S. Izquierdo (2008). Dynamics of the Bush-Mosteller Learning Algorithm in 2x2 Games, Reinforcement Learning, Cornelius Weber, Mark Elshaw and Norbert Michael Mayer (Ed.), InTech, DOI: 10.5772/5282. Available from: https://www.intechopen.com/books/reinforcement_learning/dynamics_of_the_bush-mosteller_learning_algorithm_in_2x2_games
.. [Marinoff1992] Marinoff, Louis. (1992). Maximizing expected utilities in the prisoner's dilemma. Journal of Conflict Resolution 36.1: 183-216.
.. [Mathieu2015] Mathieu, P. and Delahaye, J. (2015). New Winning Strategies for the Iterated Prisoner's Dilemma. Proceedings of the 2015 International Conference on Autonomous Agents and Multiagent Systems.
.. [Mittal2009] Mittal, S., & Deb, K. (2009). Optimal strategies of the iterated prisoner’s dilemma problem for multiple conflicting objectives. IEEE Transactions on Evolutionary Computation, 13(3), 554–565. https://doi.org/10.1109/TEVC.2008.2009459
.. [Murase2020] Murase, Y., & Baek, S.K. (2020). Five Rules for Friendly Rivalry in Direct Reciprocity. Scientific Reports 10:16904 https://doi.org/10.1038/s41598-020-73855-x
.. [Nachbar1992] Nachbar J., Evolution in the finitely repeated prisoner’s dilemma, Journal of Economic Behavior & Organization, 19(3): 307-326, 1992.
.. [NC2019] https://github.com/ncase/trust (Accessed: 30 October 2019)
.. [Nowak1989] Nowak, Martin, and Karl Sigmund. "Game-dynamical aspects of the prisoner's dilemma." Applied Mathematics and Computation 30.3 (1989): 191-213.
.. [Nowak1990] Nowak, M., & Sigmund, K. (1990). The evolution of stochastic strategies in the Prisoner's Dilemma. Acta Applicandae Mathematica. https://link.springer.com/article/10.1007/BF00049570
.. [Nowak1992] Nowak, M.., & May, R. M. (1992). Evolutionary games and spatial chaos. Nature. http://doi.org/10.1038/359826a0
.. [Nowak1993] Nowak, M., & Sigmund, K. (1993). A strategy of win-stay, lose-shift that outperforms tit-for-tat in the Prisoner’s Dilemma game. Nature, 364(6432), 56–58. http://doi.org/10.1038/364056a0
.. [Ohtsuki2006] Ohtsuki, Hisashi, et al. "A simple rule for the evolution of cooperation on graphs and social networks." Nature 441.7092 (2006): 502.
.. [PD2017] http://www.prisoners-dilemma.com/competition.html (Accessed: 6 June 2017). Archived at https://web.archive.org/web/20171227021632/http://www.prisoners-dilemma.com/competition.html
.. [Press2012] Press, W. H., & Dyson, F. J. (2012). Iterated Prisoner’s Dilemma contains strategies that dominate any evolutionary opponent. Proceedings of the National Academy of Sciences, 109(26), 10409–10413. http://doi.org/10.1073/pnas.1206569109
.. [Prison1998] LIFL (1998) PRISON. Available at: http://www.lifl.fr/IPD/ipd.frame.html (Accessed: 19 September 2016).
.. [Robson1990] Robson, Arthur J. "Efficiency in evolutionary games: Darwin, Nash and the secret handshake." Journal of theoretical Biology 144.3 (1990): 379-396.
.. [Roemheld2013] Roemheld, Lars. "Evolutionary Extortion and Mischief: Zero Determinant strategies in iterated 2x2 games". Available at: https://arxiv.org/abs/1308.2576
.. [Singer-Clark2014] Singer-Clark, T. (2014). Morality Metrics On Iterated Prisoner’s Dilemma Players.
.. [Shakarian2013] Shakarian, P., Roos, P. & Moores, G. A Novel Analytical Method for Evolutionary Graph Theory Problems.
.. [Slany2007] Slany W. and Kienreich W., On some winning strategies for the iterated prisoner’s dilemma, in Kendall G., Yao X. and Chong S. (eds.) The iterated prisoner’s dilemma: 20 years on. World Scientific, chapter 8, pp. 171-204, 2007.
.. [Stewart2012] Stewart, a. J., & Plotkin, J. B. (2012). Extortion and cooperation in the Prisoner’s Dilemma. Proceedings of the National Academy of Sciences, 109(26), 10134–10135. http://doi.org/10.1073/pnas.1208087109
.. [Szabo2007] Szabó, G., & Fáth, G. (2007). Evolutionary games on graphs. Physics Reports, 446(4-6), 97–216. http://doi.org/10.1016/j.physrep.2007.04.004
.. [Gaudesi2016] Gaudesi, Marco, et al. "Exploiting evolutionary modeling to prevail in iterated prisoner’s dilemma tournaments." IEEE Transactions on Computational Intelligence and AI in Games 8.3 (2016): 288-300.
.. [Tzafestas2000] Tzafestas, E. (2000). Toward adaptive cooperative behavior. From Animals to Animals: Proceedings of the 6th International Conference on the Simulation of Adaptive Behavior {(SAB-2000)}, 2, 334–340.
| PypiClean |
/HumanFuture-0.2.tar.gz/HumanFuture-0.2/humanfuture.py | from datetime import datetime, timedelta
class NegativeDeltaError(Exception): pass
class UnformattableError(Exception): pass
def humanize(future, ref=None):
"""
Return a nice string representing a future datetime in english.
If you need to explicitely set the reference that the future is relative
to, just pass it in as a second datetime object.
"""
if not ref:
ref = datetime.now()
delta = future - ref
seconds = delta.seconds
days = delta.days
global_seconds = days * 24 * 60 * 60 + seconds
minutes = int(round(seconds/60.) % 60)
day_changes = (future - datetime(*ref.timetuple()[:3])).days
if days < 0:
raise NegativeDeltaError("Negative timedelta. I can only do futures!")
if global_seconds <= 45:
if seconds <= 15:
return 'a moment'
else:
return english_number(seconds, 'second', 'seconds')
elif global_seconds < 60 * 59.5:
if seconds <= 90:
return 'about a minute'
elif seconds <= 60 * 4.5:
return 'about %s' % english_number(minutes, 'minute', 'minutes')
else:
return english_number(minutes, 'minute', 'minutes')
elif global_seconds < 60 * 60 * 2.5:
return '%s%s' % (english_number(hours(seconds), 'hour', 'hours'),
(lambda m: '' if m is 0 else ' and %s' % english_number(m, 'minute', 'minutes'))(minutes))
elif global_seconds < 60 * 60 * 24 and ref.day == future.day:
if future.hour == 23 and future.minute == 58:
return 'two minutes to midnight'
return english_time(future)
elif (global_seconds <= 60 * 60 * 24 * 2 and day_changes == 1):
if future.hour == 0:
if future.minute == 0:
return 'midnight tonight'
return 'tomorrow at %s' % english_time(future)
elif (global_seconds <= 60 * 60 * 24 * 8 and day_changes <= 7):
if day_changes <= 3 or (future.weekday() == 6 and ref.weekday() != 6):
return '%s at %s' % (future.strftime('%A'), english_time(future))
elif (future.weekday() > ref.weekday() or ref.weekday() == 6) and day_changes <= 6:
return 'this %s at %s' % (future.strftime('%A'), english_time(future))
else:
return 'next %s at %s' % (future.strftime('%A'), english_time(future))
elif ref.year == future.year:
return '%s at %s' % (english_date(future), english_time(future))
else:
return '%s, %d at %s' % (english_date(future), future.year, english_time(future))
raise UnformattableError("Couldn't format date.")
def hours(seconds):
return int(seconds/3600)
def english_number(num, unit=None, plural=None):
eng_num = ['zero', 'one', 'two', 'three', 'four', 'five',
'six', 'seven', 'eight', 'nine'][num] if num < 10 else str(num)
if unit:
if num is 1:
return '%s %s' % (eng_num, unit)
else:
return '%s %s' % (eng_num, plural if plural else unit)
return eng_num
def english_time(time):
if time.hour == 12 and time.minute == 0:
return 'noon'
midi = 'am' if time.hour < 12 else 'pm'
mins = str(time.minute).zfill(2) if time.minute else None
hour = (lambda h: h if h is not 0 else 12)(time.hour % 12)
hour = 12 if hour == 0 else hour
return '%s %s' % ('%s:%s' % (hour, mins) if mins else hour, midi)
def english_date(time):
return '%s %d' % (time.strftime('%B'), time.day) | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/wininteg.py | # Dev Notes:
# - On Win9x QueryValueEx returns the empty string for a non-existant
# default key value. On non-Win9x an EnvironmentError is raised. Care has
# been made in the code to handle this API semantic difference.
#
#TODO:
# - Use the ASSOC and FTYPE command line utils instead of all this
# registry entry mucking, if possible! Do these commands exist even
# on Win9x machine? I wonder.
# - Perhaps reduce "add_assoc" to "register_type" (which includes a
# default icon) and "add_assoc". Then "add_assoc" fails if there is
# no type registered.
# - Add interface for adding a shortcut on the desktop.
# - Add interface for adding a shortcut on the quick launch bar.
# - Test suite! There are subtle _winreg API differences on Win9x
# which should be tested.
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import cmd
import pprint
import getopt
import logging
import itertools
from six.moves import range
if sys.platform.startswith("win"):
import six.moves.winreg
#---- exceptions
class WinIntegError(Exception):
pass
#---- globals
_version_ = (0, 2, 1)
log = logging.getLogger("wininteg")
#---- internal support routines
def _splitall(path):
"""Split the given path into all its directory parts and return the list
of those parts (see Python Cookbook recipe for test suite.)
"""
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
class _ListCmd(cmd.Cmd):
"""Pass arglists instead of command strings to commands.
Modify the std Cmd class to pass arg lists instead of command lines.
This seems more appropriate for integration with sys.argv which handles
the proper parsing of the command line arguments (particularly handling
of quoting of args with spaces).
"""
name = "_ListCmd"
def cmdloop(self, intro=None):
raise NotImplementedError
def onecmd(self, argv):
# Differences from Cmd
# - use an argv, rather than a command string
# - don't specially handle the '?' redirect to 'help'
# - don't allow the '!' shell out
if not argv:
return self.emptyline()
self.lastcmd = argv
cmdName = argv[0]
try:
func = getattr(self, 'do_' + cmdName)
except AttributeError:
return self.default(argv)
try:
return func(argv)
except TypeError as ex:
log.error("%s: %s", cmdName, ex)
log.error("try '%s help %s'", self.name, cmdName)
if 1: # for debugging
print()
import traceback
traceback.print_exception(*sys.exc_info())
def default(self, args):
log.error("unknown syntax: '%s'", " ".join(args))
return 1
def _do_one_help(self, arg):
try:
# If help_<arg1>() exists, then call it.
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc = getattr(self, 'do_' + arg).__doc__
except AttributeError:
doc = None
if doc: # *do* have help, print that
sys.stdout.write(doc + '\n')
sys.stdout.flush()
else:
log.error("no help for '%s'", arg)
else:
return func()
# Technically this improved do_help() does not fit into _ListCmd, and
# something like this would be more appropriate:
# def do_help(self, argv):
# cmd.Cmd.do_help(self, ' '.join(argv[1:]))
# but I don't want to make another class for it.
def do_help(self, argv):
if argv[1:]:
for arg in argv[1:]:
retval = self._do_one_help(arg)
if retval:
return retval
else:
doc = self.__class__.__doc__ # try class docstring
if doc:
sys.stdout.write(doc + '\n')
sys.stdout.flush()
elif __doc__: # else try module docstring
sys.stdout.write(__doc__)
sys.stdout.flush()
def emptyline(self):
# Differences from Cmd
# - Don't repeat the last command for an emptyline.
pass
def _parseFirstArg(cmd):
cmd = cmd.strip()
if cmd.startswith('"'):
# The .replace() is to ensure it does not mistakenly find the
# second '"' in, say (escaped quote):
# "C:\foo\"bar" arg1 arg2
idx = cmd.replace('\\"', 'XX').find('"', 1)
if idx == -1:
raise WinIntegError("Malformed command: %r" % cmd)
first, rest = cmd[1:idx], cmd[idx+1:]
rest = rest.lstrip()
else:
if ' ' in cmd:
first, rest = cmd.split(' ', 1)
else:
first, rest = cmd, ""
return first
def _getTypeName(ext):
"""Calculate a reasonable Windows "type name" for the given extension."""
assert ext[0] == '.', "Extension is invalid: '%s'" % ext
# First try some common/generally accepted type name mappings.
commonTypeMappings = {
'.pl': 'Perl',
'.py': 'Python.File',
'.js': 'JSFile',
'.xml': 'XMLFile',
'.xsl': 'XSLFile',
'.xslt': 'XSLTFile',
'.pm': 'Perl.Module',
'.t': 'Perl.TestScript',
#XXX This is the name that ActiveTcl/TclPro uses for the .tcl file
# association. We choose to use its name as well.
'.tcl': 'ActiveTclScript',
'.php': 'PHPFile',
'.plx': 'PlxFile',
'.wsdl': 'WSDLFile',
}
typeName = commonTypeMappings.get(ext, None)
# Fallback: the name will be "FOOFile" for an extension ".foo".
if typeName is None:
typeName = ext[1:].upper() + "File"
return typeName
def _getTypeNameFromRegistry(ext, root=None):
"""Read the type name from the registry
@param ext {unicode} The extension; must start with a leading period
@param root {HKEY} The root tree to use; if unspecified, HKCR is used.
(i.e. the merged tree)
@returns {unicode} The type name, or None if not found
"""
log.debug("_getTypeNameFromRegistry: getting '%s'", ext)
assert ext[0] == '.', "Extension is invalid: '%s'" % ext
import six.moves.winreg
if root is None:
root = six.moves.winreg.HKEY_CLASSES_ROOT
elif root in (six.moves.winreg.HKEY_LOCAL_MACHINE, six.moves.winreg.HKEY_CURRENT_USER):
# use the Software\Classes subkey
root = six.moves.winreg.OpenKey(root, r"Software\Classes")
try:
extKey = six.moves.winreg.OpenKey(root, ext)
except WindowsError:
log.debug("Failed to open '%s'", ext)
return None
log.debug("Opened key '%s'", ext)
# Get the type name from this key.
try:
typeName, typeNameType = _safeQueryValueEx(extKey, "")
except WindowsError:
return None
return typeName
def _safeQueryValueEx(key, name):
"""Try to work around some issues with string length and NULL terminators
in string registry entries.
For example, sometimes (don't know how to reproduce those circumstances
yet -- see Komodo bug 33333) a QueryValueEx will return a string with a
number of '\x00' null characters. This method strips those.
XXX See the not about the different behaviour of QueryValueEx on Win9x
versus WinNT for null values. Perhaps this method could abstract
that.
"""
value, valueType = six.moves.winreg.QueryValueEx(key, name)
if valueType in (six.moves.winreg.REG_SZ, six.moves.winreg.REG_MULTI_SZ, six.moves.winreg.REG_EXPAND_SZ):
value = value.strip('\x00')
return (value, valueType)
def _deleteKeyIfEmpty(root, keyName, rootDesc="..."):
"""Delete the given registry key, and any ancestor keys up to the root, if
they are empty (have no subkeys or values).
@param root {HKEY} The ancestor which should not be deleted
@param keyName {unicode} The name of the subkey to delete if empty
@param rootDesc {unicode} A string describing the root (for logging)
@returns {int} The number of keys deleted
"""
import six.moves.winreg
count = 0
log.debug(r"_deleteKeyIfEmpty: deleting %s\%s", rootDesc, keyName)
while keyName:
with six.moves.winreg.OpenKey(root, keyName) as key:
try:
six.moves.winreg.EnumValue(key, 0)
except WindowsError:
pass
else:
log.debug("_deleteKeyIfEmpty: %s has values", keyName)
return count # not empty
try:
six.moves.winreg.EnumKey(key, 0)
except WindowsError:
pass
else:
log.debug("_deleteKeyIfEmpty: %s has subkeys", keyName)
return count # not empty
six.moves.winreg.DeleteKey(root, keyName)
count += 1
log.info(r"deleted '%s\%s' key", rootDesc, keyName)
keyName = "\\".join(keyName.split("\\")[:-1])
# try again with the parent
return count
#---- public module interface
def getHKLMRegistryValue(keyName, valueName):
"""Return a (<value>, <valueType>) tuple for the given registry value.
An EnvironmentError is raised if the value does not exist.
(Note: On Win9x the empty string may be returned for non-existant values
instead of raising an environment error.)
"""
log.debug("getHKLMRegistryValue(keyName=%r, valueName=%r)", keyName,
valueName)
import six.moves.winreg
key = six.moves.winreg.OpenKey(six.moves.winreg.HKEY_LOCAL_MACHINE, keyName)
return _safeQueryValueEx(key, valueName)
def setHKLMRegistryValue(keyName, valueName, valueType, value):
"""Set the given value in the registry.
An EnvironmentError is raised if unsuccessful.
"""
log.debug("setHKLMRegistryValue(keyName=%r, valueName=%r, valueType=%r, "\
"value=%r)", keyName, valueName, valueType, value)
import six.moves.winreg
# Open the key for writing.
try:
key = six.moves.winreg.OpenKey(six.moves.winreg.HKEY_LOCAL_MACHINE, keyName,
0, six.moves.winreg.KEY_SET_VALUE)
except EnvironmentError as ex:
# Either do not have permissions or we must create the keys
# leading up to this key. Presume that latter, if the former
# then it will fall out in the subsequent calls.
parts = _splitall(keyName)
for i in range(len(parts)):
partKeyName = os.path.join(*parts[:i+1])
partKey = six.moves.winreg.CreateKey(six.moves.winreg.HKEY_LOCAL_MACHINE,
partKeyName)
key = six.moves.winreg.OpenKey(six.moves.winreg.HKEY_LOCAL_MACHINE, keyName,
0, six.moves.winreg.KEY_SET_VALUE)
# Write the given value.
six.moves.winreg.SetValueEx(key, valueName, 0, valueType, value)
def getFileAssociation(ext):
"""Return the register filetype and an order list of associated actions.
"ext" is the extension to lookup. It must include the leading '.'.
Returns the following:
(<filetype>, <filetype display name>, <ordered list of actions>)
where the list of actions is intended to be ordered as they would be
in the Windows Explorer context menu for a file with that extension.
Each action is a tuple,
(<action name>, <action display string>, <command line>)
If the file type is not found, raises WinIntegError
"""
log.debug("getFileAssociation(ext=%r)", ext)
import six.moves.winreg
#---- 1. Find the type name from the extension.
typeName = _getTypeNameFromRegistry(ext)
if typeName is None:
raise WinIntegError("unrecognize extension: '%s'" % ext)
# Get the type display name from the type key (it is the default value).
displayName = None
try:
with six.moves.winreg.OpenKey(six.moves.winreg.HKEY_CLASSES_ROOT, typeName) as typeKey:
displayName = _safeQueryValueEx(typeKey, "")[0]
except WindowsError as ex:
pass
#---- 2. Get the current actions associated with this file type.
# Get a list of all the current actions. E.g. for this layout:
# HKEY_CLASSES_ROOT
# Python.File
# shell
# Edit -> (value not set)
# Edit2 -> "&Edit with Komodo"
# open -> (value not set)
# the actions are:
# [("Edit", "&Edit"), ("Edit2", "&Edit with Komodo"),
# ("open", "&Open")]
# Implicit naming rules:
# - "open" and "print" get capitalized, others do not seems to (including
# "edit").
# - the first letter is made the accesskey with a '&'-prefix
actionNames = []
try:
shellKey = six.moves.winreg.OpenKey(six.moves.winreg.HKEY_CLASSES_ROOT,
"%s\\shell" % typeName)
for index in itertools.count():
try:
actionName = six.moves.winreg.EnumKey(shellKey, index)
try:
with six.moves.winreg.OpenKey(shellKey, actionName) as actionKey:
actionDisplayName = _safeQueryValueEx(actionKey, None)[0]
except WindowsError as ex:
if ex.winerror != 2: # ERROR_FILE_NOT_FOUND
raise
actionDisplayName = None
if not actionDisplayName:
if actionName.lower() == "open":
actionDisplayName = "&Open"
else:
actionDisplayName = "&"+actionName
actionNames.append( (actionName, actionDisplayName) )
except WindowsError:
break
except WindowsError:
pass
log.debug("action names for '%s': %s", typeName, actionNames)
actions = []
for actionName, actionDisplayName in actionNames:
command = None
try:
commandKey = six.moves.winreg.OpenKey(six.moves.winreg.HKEY_CLASSES_ROOT,
"%s\\shell\\%s\\command"
% (typeName, actionName))
except WindowsError as ex:
pass
else:
try:
command, commandType = _safeQueryValueEx(commandKey, "")
except WindowsError:
pass
actions.append( (actionName, actionDisplayName, command) )
#---- 3. Sort the actions as does Windows Explorer
# This seems to use the following rules:
# - If there is an "opennew", then that is first and all others are
# after in alphabetical order.
# - Else if there is an "open", then that is first and all others are
# after in alphabetical order.
name2action = {}
for action in actions:
name2action[action[0].lower()] = action
if "opennew" in name2action:
default = name2action["opennew"]
del name2action["opennew"]
elif "open" in name2action:
default = name2action["open"]
del name2action["open"]
else:
default = None
actions = [name2action[k] for k in sorted(name2action.keys())]
if default: actions.insert(0, default)
return (typeName, displayName, actions)
def checkFileAssociation(ext, action, exe):
"""Check that the given association is setup as expected.
"ext" is the extention (it must include the leading dot).
"action" is the association action to check.
"exe" is the expected associated executable.
This can raise an WindowsError if unsuccessful.
"""
log.debug("checkFileAssociation(ext=%r, action=%r, exe=%r)",
ext, action, exe)
import six.moves.winreg
#---- Find the type name from the extension.
try:
[typeName, typeDisplayName, actions] = getFileAssociation(ext)
except WinIntegError:
return "'%s' extension is not registered with system" % ext
#---- Abort check if there is no matching action.
for actionName, actionDisplayName, command in actions:
log.debug("actionDisplayName: %r actionName: %r action: %r",
actionDisplayName, actionName, action)
if (actionDisplayName.lower() == action.lower()
or actionName.lower() == action.lower()):
break
else:
actionsSummary = ', '.join(a[1] or a[0] for a in actions)
return "no '%s' action is associated with %s/%s "\
"(existing actions are: %s)"\
% (action, ext, typeName, actionsSummary)
#---- Check that actual command matches expectation.
if ' ' in exe:
expectedCommands = ['"%s" "%%1" %%*' % exe]
else:
expectedCommands = ['%s "%%1" %%*' % exe,
'"%s" "%%1" %%*' % exe] # allow redundant quotes
for expectedCommand in expectedCommands:
if expectedCommand == command:
return None
else:
return ("current '%s' command for %s/%s doesn't match "
"expectation:\n\tcurrent: %s\n\texpected: %s"
% (actionDisplayName, ext, typeName, command,
expectedCommands[0]))
def addFileAssociation(ext, action, exe, fallbackTypeName=None):
"""Add a file association from the given extension to the given
executable.
"ext" is the extention (it must include the leading dot).
"action" is the association action to make.
"exe" is the executable to which to associate.
"fallbackTypeName" is a file type name to use ONLY IF a type name
does not already exist for the given extension.
This can raise an EnvironmentError if unsuccessful. (XXX Can this be
limited to a WindowsError?)
"""
log.debug("addFileAssociation(ext=%r, action=%r, exe=%r, "\
"fallbackTypeName=%r)", ext, action, exe, fallbackTypeName)
import six.moves.winreg
userClasses = six.moves.winreg.OpenKey(six.moves.winreg.HKEY_CURRENT_USER, r"Software\Classes")
#---- 1. Find the type name from the extension.
typeName = _getTypeNameFromRegistry(ext)
if typeName is None:
typeName = fallbackTypeName or _getTypeName(ext)
with six.moves.winreg.CreateKey(userClasses, ext) as extKey:
# re-open the key with write access
with six.moves.winreg.OpenKey(extKey, "", 0, six.moves.winreg.KEY_SET_VALUE) as extKey:
six.moves.winreg.SetValueEx(extKey, "", 0, six.moves.winreg.REG_SZ, typeName)
log.info("type name for '%s' is '%s'", ext, typeName)
#---- 2. Get the current actions associated with this file type.
# Get a list of all the current actions.
try:
currActions = getFileAssociation(ext)[2]
except WinIntegError:
currActions = []
log.info("current actions for '%s': %s", typeName, currActions)
#---- 3. Determine which subkey of HKCR\\$typeName\\shell to use for
# action.
if ' ' in action: # e.g. "Edit with Komodo"
# We might want to replace one of the existing actions if the
# action names are the same.
for currAction in currActions:
if action .replace('&', '').lower() ==\
currAction[1].replace('&', '').lower():
actionKeyName = currAction[0]
break
else:
# Pick an action key name that does not conflict.
currActionKeyNames = set(a[0].lower() for a in currActions)
for i in [''] + list(range(2, 100)):
actionKeyName = action.split()[0] + str(i) # Edit1, Edit2, ...
if actionKeyName.lower() not in currActionKeyNames:
break
else:
raise WinIntegError("Could not determine a non-conflicting "\
"action key name for file type '%s' and "\
"action '%s'." % (typeName, action))
actionName = action
else: # e.g. "Edit"
actionKeyName = action
actionName = None
actionKeyPath = "%s\\shell\\%s" % (typeName, actionKeyName)
log.info("creating '%s' action at key 'HKCR\\%s'",
actionName or actionKeyName, actionKeyPath)
#---- 4. Register the action.
# First, set the action name if necessary (and ensure the action key
# is created).
actionKey = six.moves.winreg.CreateKey(userClasses,
r"%s\shell\%s" % (typeName, actionKeyName))
if actionName is not None:
log.info("setting name for action key '%s' of file type '%s': '%s'",
actionKeyName, typeName, actionName)
six.moves.winreg.SetValueEx(actionKey, "", 0, six.moves.winreg.REG_SZ, actionName)
# Next, determine the command and create/update the "command" subkey.
if ' ' in exe:
command = '"%s" "%%1" %%*' % exe
else:
command = '%s "%%1" %%*' % exe
with six.moves.winreg.CreateKey(actionKey, "command") as commandKey:
log.info("setting command for '%s' action of '%s' file type: %r",
actionName or actionKeyName, typeName, command)
six.moves.winreg.SetValueEx(commandKey, "", 0, six.moves.winreg.REG_EXPAND_SZ, command)
def removeFileAssociation(ext, action, exe, fromHKLM=False):
"""Remove the given file association PROVIDED the current state of
the association points to the given executable.
"ext" is the extention (it must include the leading dot).
"action" is the association action to make.
"exe" is the executable to which to associate.
"fromHKLM", if set, causes the association to be removed from HKLM instead
of HKCU; this may raise a WindowsError if permissions are denied
This can raise an WindowsError if unsuccessful.
Returns True if the association was removed; False if it there was no need
to (not set, or set to a different executable).
"""
log.debug("removeFileAssociation(ext=%r, action=%r, exe=%r, HKLM=%r)", ext,
action, exe, fromHKLM)
import six.moves.winreg
if fromHKLM:
HKCR = six.moves.winreg.OpenKey(six.moves.winreg.HKEY_LOCAL_MACHINE, r"Software\Classes")
else:
HKCR = six.moves.winreg.OpenKey(six.moves.winreg.HKEY_CURRENT_USER, r"Software\Classes")
#---- 1. Find the type name and associations from the extension.
try:
[typeName, typeDisplay, currActions] = getFileAssociation(ext)
except WinIntegError:
log.warn("extension '%s' is not registered, giving up", ext)
return False
log.info("type name for '%s' is '%s' actions: %r", ext, typeName, currActions)
#---- 2. Determine which subkey of HKCR\\$typeName\\shell is relevant.
actionKeyName = None
actionData = None
for currAction in currActions:
if currAction[1]:
if action .replace('&', '').lower() ==\
currAction[1].replace('&', '').lower():
actionKeyName, actionDisplayName, command = currAction
break
else:
if action .replace('&', '').lower() ==\
currAction[0].replace('&', '').lower():
actionKeyName, actionDisplayName, command = currAction
break
else:
log.info("could not find relevant current action to remove: '%s'",
action)
return False
log.info("relevant current action: '%s' command: '%s'",
actionKeyName, command)
#---- 3. Abort if the current action is NOT to the given exe.
commandExe = _parseFirstArg(command)
if os.path.split(exe)[-1].lower() != os.path.split(commandExe)[-1].lower():
log.warn("current association, %r, is not to the given exe, "\
"%r, aborting", commandExe, exe)
return False
#---- 4. Remove the action key.
with six.moves.winreg.OpenKey(HKCR, r"%s\shell\%s\command" % (typeName, actionKeyName), 0, six.moves.winreg.KEY_SET_VALUE) as commandKey:
six.moves.winreg.DeleteValue(commandKey, "")
log.info("deleted default value for 'HKCR\\%s\\shell\\%s\\command'",
typeName, actionKeyName)
# Clean up an empty registry branch.
try:
subkey = r"%s\shell\%s\command" % (typeName, actionKeyName)
# re-open HKCR with write access
with six.moves.winreg.OpenKey(HKCR, "", 0, six.moves.winreg.KEY_SET_VALUE) as root:
log.debug("re-opened root")
numDeleted = _deleteKeyIfEmpty(root, subkey, rootDesc="HKCR")
if numDeleted == 1:
# perhaps there's a description in <type>\shell\<action>\(Default)
subkey = r"%s\shell\%s" % (typeName, actionKeyName)
def tryDeleteDefault():
with six.moves.winreg.OpenKey(HKCR, subkey, 0, six.moves.winreg.KEY_QUERY_VALUE | six.moves.winreg.KEY_SET_VALUE) as actionKey:
try:
six.moves.winreg.EnumKey(actionKey, 0)
return None # other subkeys exist, don't delete
except WindowsError:
pass
hasDefault = True
try:
# check if the default value exists (the user-visible description of the file type)
six.moves.winreg.QueryValueEx(actionKey, "")
except WindowsError:
# there's no default value
hasDefault = False
try:
six.moves.winreg.EnumValue(actionKey, 1 if hasDefault else 0)
return None # other values exist
except WindowsError:
pass
if hasDefault:
six.moves.winreg.DeleteValue(actionKey, "")
return _deleteKeyIfEmpty(root, subkey, rootDesc="HKCR")
moreDeleted = tryDeleteDefault()
if moreDeleted is not None:
numDeleted += moreDeleted
log.debug("deleted %r keys", numDeleted)
if numDeleted >= len(subkey.split("\\")):
# the whole thing was deleted; clean up the extension tree as well
with six.moves.winreg.OpenKey(root, ext, 0, six.moves.winreg.KEY_SET_VALUE) as extKey:
six.moves.winreg.DeleteValue(extKey, None)
try:
_deleteKeyIfEmpty(root, ext, rootDesc="HKCR")
except WindowsError as ex:
if ex.winerror != 2: # ERROR_FILE_NOT_FOUND
raise
log.debug("removeFileAssociation: Can't find %s", ext)
# ignore not found errors, the file extension part may have
# come from the HKLM version of HKCR
except WindowsError as ex:
if ex.winerror != 5: # ERROR_ACCESS_DENIED
raise
log.debug("removeFileAssociation: Access denied (%r)", ex)
return True
#---- command line interface
class WinIntegShell(_ListCmd):
"""
wininteg - a tool for integrating an app into Microsoft Window
Usage:
wininteg [<options>...] <command> [<args>...]
Options:
-h, --help Print this help and exit.
-V, --version Print the version info and exit.
-v, --verbose More verbose output.
Wininteg's usage is intended to feel like p4's command line
interface.
Getting Started:
wininteg help print this help
wininteg help <command> help on a specific command
Commands:
get_assoc EXT list assocations for EXT
add_assoc EXT ACTION APPPATH add assocation for EXT
check_assoc EXT ACTION APPPATH check expected EXT assocation
remove_assoc EXT ACTION APPPATH remove specific assoc for EXT
"""
name = "wininteg"
def emptyline(self):
self.do_help(["help"])
def help_usage(self):
sys.stdout.write(__doc__)
sys.stdout.flush()
def do_get_assoc(self, argv):
"""
get_assoc -- Get the current file association.
wininteg get_assoc [<options>...] <ext>
<ext> is the extention (it must include the leading dot).
This looks up and prints all associated actions and shell commands
for the current extension.
"""
# Process options.
try:
optlist, args = getopt.getopt(argv[1:], "")
except getopt.GetoptError as ex:
log.error("get_assoc: %s", ex)
log.error("get_assoc: try 'wininteg help get_assoc'")
return 1
# Process arguments.
if len(args) != 1:
log.error("get_assoc: incorrect number of arguments: %s", args)
log.error("get_assoc: try 'wininteg help get_assoc'")
return 1
ext = args[0]
try:
type, name, actions = getFileAssociation(ext)
print("File Type: %s (%s)" % (name, type))
if actions:
print("Actions:")
for aName, aDisplayName, aCommand in actions:
print(" %s (%s)" % (aDisplayName, aName))
print(" %s" % aCommand)
else:
print("Actions: <none>")
except Exception as ex:
log.error(str(ex))
if log.isEnabledFor(logging.DEBUG):
import traceback
traceback.print_exception(*sys.exc_info())
return 1
def do_check_assoc(self, argv):
"""
check_assoc -- Check that a file association is as expected
wininteg check_assoc [<options>...] <ext> <action> <exe>
<ext> is the extention (it must include the leading dot).
<action> is the association action to check.
<exe> is the expected associated executable.
"""
# Process options.
try:
optlist, args = getopt.getopt(argv[1:], "", [])
except getopt.GetoptError as ex:
log.error("add_assoc: %s", ex)
log.error("add_assoc: try 'wininteg help check_assoc'")
return 1
# Process arguments.
if len(args) != 3:
log.error("check_assoc: incorrect number of arguments: %s", args)
log.error("check_assoc: try 'wininteg help check_assoc'")
return 1
ext, action, exe = args
try:
msg = checkFileAssociation(ext, action, exe)
if msg is not None:
print(msg)
except Exception as ex:
log.error(str(ex))
if log.isEnabledFor(logging.DEBUG):
import traceback
traceback.print_exception(*sys.exc_info())
return 1
def do_add_assoc(self, argv):
"""
add_assoc -- Add a file association.
wininteg add_assoc [<options>...] <ext> <action> <exe>
<ext> is the extention (it must include the leading dot).
<action> is the association action to make.
<exe> is the executable to which to associate.
Options:
--type-name=<name>, -t <name>
Specify a _fallback_ type name for the given extension.
An association is made for the given extension to the given executable.
If the extension already has a register type name, then that
name is used. You may provide a fallback type name to use, if it
is needed, otherwise one will be created based on the extension.
"""
# Process options.
try:
optlist, args = getopt.getopt(argv[1:], "t:", ["type-name="])
except getopt.GetoptError as ex:
log.error("add_assoc: %s", ex)
log.error("add_assoc: try 'wininteg help add_assoc'")
return 1
fallbackTypeName = None
for opt, optarg in optlist:
if opt in ("-t", "--type-name"):
fallbackTypeName = optarg
# Process arguments.
if len(args) != 3:
log.error("add_assoc: incorrect number of arguments: %s", args)
log.error("add_assoc: try 'wininteg help add_assoc'")
return 1
ext, action, exe = args
try:
addFileAssociation(ext, action, exe, fallbackTypeName)
except Exception as ex:
log.error(str(ex))
if log.isEnabledFor(logging.DEBUG):
import traceback
traceback.print_exception(*sys.exc_info())
return 1
def do_remove_assoc(self, argv):
"""
remove_assoc -- Remove a file association.
wininteg remove_assoc <ext> <action> <exe>
<ext> is the extention (it must include the leading dot).
<action> is the association action to remove.
<exe> is the executable to which to associate.
The given file association is removed, PROVIDED the currently
registered command is for the given executable. If it is not
then the association is left alone: we don't want to disrupt a
file association to another app.
"""
# Process options.
try:
optlist, args = getopt.getopt(argv[1:], "")
except getopt.GetoptError as ex:
log.error("remove_assoc: %s", ex)
log.error("remove_assoc: try 'wininteg help remove_assoc'")
return 1
# Process arguments.
if len(args) != 3:
log.error("remove_assoc: incorrect number of arguments: %s", args)
log.error("remove_assoc: try 'wininteg help remove_assoc'")
return 1
ext, action, exe = args
try:
removeFileAssociation(ext, action, exe)
except Exception as ex:
log.error(str(ex))
if log.isEnabledFor(logging.DEBUG):
import traceback
traceback.print_exception(*sys.exc_info())
return 1
def _main(argv):
logging.basicConfig()
try:
optlist, args = getopt.getopt(argv[1:], "hVv",
["help", "version", "verbose"])
except getopt.GetoptError as msg:
log.error("%s. Your invocation was: %s", msg, argv)
log.error("Try 'wininteg --help'.")
return 1
for opt, optarg in optlist:
if opt in ("-h", "--help"):
sys.stdout.write(WinIntegShell.__doc__)
return 0
elif opt in ("-V", "--version"):
print("wininteg %s" % '.'.join([str(i) for i in _version_]))
return 0
elif opt in ("-v", "--verbose"):
log.setLevel(Logger.DEBUG)
shell = WinIntegShell()
return shell.onecmd(args)
if __name__ == "__main__":
__file__ = os.path.abspath(sys.argv[0])
sys.exit( _main(sys.argv) ) | PypiClean |
/CUQIpy-FEniCS-0.4.0.tar.gz/CUQIpy-FEniCS-0.4.0/README.md | # CUQIpy-FEniCS
CUQIpy-FEniCS is a plugin for [CUQIpy](https://github.com/CUQI-DTU/CUQIpy) software package. It provides an interface between FEniCS PDE models and CUQIpy modules.
## Installation
First install [FEniCS](https://fenicsproject.org/download/archive/) (we recommend using Anaconda from the available installation options). Then install CUQIpy-FEniCS with pip:
```bash
pip install cuqipy-fenics
```
If CUQIpy is not installed, it will be installed automatically.
## Quickstart
```python
import numpy as np
import matplotlib.pyplot as plt
import cuqi
import cuqipy_fenics
# Load a fenics forward model and data from testproblem library
model, y_data, info = cuqipy_fenics.testproblem.FEniCSDiffusion1D.get_components(
dim=20,
endpoint=1,
exactSolution='smooth_step',
mapping='exponential',
SNR=10000,
left_bc=0,
right_bc=8
)
# Set up Bayesian model
x = cuqi.distribution.GMRF(np.zeros(model.domain_dim),
25, 1, 'zero', geometry=model.domain_geometry)
# y ~ N(model(x), 0.01^2)
y = cuqi.distribution.Gaussian(mean=model(x), cov=0.05**2)
# Set up Bayesian Problem object
BP = cuqi.problem.BayesianProblem(y, x).set_data(y=y_data)
# Sample from the posterior
samples = BP.sample_posterior(5000)
# Analyze the samples
samples.burnthin(1000).plot_ci(95, plot_par=True,
exact=info.exactSolution, linestyle='-', marker='.')
```
For more examples, see the [demos](demos) folder.
| PypiClean |
/Julep-0.3.1.tar.gz/Julep-0.3.1/julep/config.py |
import imp
import socket
import os
import signal
import logging
import sys
import scoreboard
class ServerConfig(object):
_default_options = {
"num_servers" : 4,
"max_connections" : 32,
"preload_hooks" : [],
"max_requests" : 4096,
"max_errors" : 16,
"max_request_time" : 30
}
@property
def default_options(self):
return self._default_options
def verify_options(self):
if 'app' in self.options:
if isinstance(self.options['app'],str):
mod_name,fun_name = self.options['app'].rsplit(".",1)
module = __import__(mod_name,globals(),locals(),[fun_name])
self.app = getattr(module,fun_name)
else:
self.app = self.options['app']
if 'wsgi_file' in self.options:
self.wsgi_module = imp.load_source('_wsgi_mod',
self.options['wsgi_file'])
self.app = self.wsgi_module.application
def initialize_scoreboard(self,server_set):
self.scoreboard = scoreboard.Scoreboard(self.options['num_servers'])
server_set.scoreboards[self.name] = self.scoreboard
def cleanup_sockets(self):
pass
class NetworkServer(ServerConfig):
def __init__(self,address,port,**kwargs):
self.address = address
self.port = port
self.options = {}
self.options.update(self.default_options)
self.options.update(kwargs)
def initialize_socket(self,server_set):
sock = server_set.get_or_create_socket(socket.AF_INET,
(self.address,self.port))
sock.listen(self.options['max_connections'])
self.socket = sock
class SocketFileServer(ServerConfig):
def __init__(self,filename,**kwargs):
self.filename = filename
self.options = {}
self.options.update(self.default_options)
self.options.update(kwargs)
def initialize_socket(self,server_set):
sock = server_set.get_or_create_socket(socket.AF_UNIX,self.filename)
sock.listen(self.options['max_connections'])
self.socket = sock
class ConfigFileParser(object):
def __init__(self,filename):
imp.acquire_lock()
try:
config_module = imp.load_source("julep_config",filename)
except Exception,e:
logging.exception("Failed to load config file")
raise
imp.release_lock()
servers = self.servers = {}
for name in dir(config_module):
value = getattr(config_module,name)
if isinstance(value,ServerConfig):
servers[name] = value
def test_config(config_file):
try:
config_module = ConfigFileParser(config_file)
for name,serverconfig in config_module.servers.items():
serverconfig.name = name
serverconfig.verify_options()
return True
except:
return False
return False | PypiClean |
/Bayesian-0.3.3-py3-none-any.whl/bayesian/samples.py | import sys
sys.path.append('../')
from bayesian import Bayes, classify_normal, classify
print(' == High Level Functions == ')
print(' -- Gender Classification -- ')
# Decides if the person with those measures is male or female.
print(classify_normal({'height': 6, 'weight': 130, 'foot size': 8},
{'male': [{'height': 6, 'weight': 180, 'foot size': 12},
{'height': 5.92, 'weight': 190, 'foot size': 11},
{'height': 5.58, 'weight': 170, 'foot size': 12},
{'height': 5.92, 'weight': 165, 'foot size': 10}],
'female': [{'height': 5, 'weight': 100, 'foot size': 6},
{'height': 5.5, 'weight': 150, 'foot size': 8},
{'height': 5.42, 'weight': 130, 'foot size': 7},
{'height': 5.75, 'weight': 150, 'foot size': 9}]}))
print('')
print(' -- Spam Detection With `Classify` -- ')
spams = ["buy viagra", "dear recipient", "meet sexy singles"] # etc
genuines = ["let's meet tomorrow", "remember to buy milk"]
message = "remember the meeting tomorrow"
# Classify as "genuine" because of the words "remember" and "tomorrow".
print(classify(message, {'spam': spams, 'genuine': genuines}))
# Classifies "unknown_file" as either a Python or Java file, considering
# you have directories with examples of each language.
#print classify_file("unknown_file", ["java_files", "python_files"])
# Classifies every file under "folder" as either a Python or Java file,
# considering you have subdirectories with examples of each language.
#print classify_folder("folder")
print('')
print(' == Low Level Functions == ')
print(' -- Classic Cancer Test Problem --')
# 1% chance of having cancer.
b = Bayes([('not cancer', 0.99), ('cancer', 0.01)])
# Test positive, 9.6% false positives and 80% true positives
b.update((9.6, 80))
print(b)
print('Most likely:', b.most_likely())
print('')
print(' -- Spam Filter With Existing Model --')
# Database with number of sightings of each words in (genuine, spam)
# emails.
words_odds = {'buy': (5, 100), 'viagra': (1, 1000), 'meeting': (15, 2)}
# Emails to be analyzed.
emails = [
"let's schedule a meeting for tomorrow", # 100% genuine (meeting)
"buy some viagra", # 100% spam (buy, viagra)
"buy coffee for the meeting", # buy x meeting, should be genuine
]
for email in emails:
# Start with priors of 90% chance being genuine, 10% spam.
# Probabilities are normalized automatically.
b = Bayes([('genuine', 90), ('spam', 10)])
# Update probabilities, using the words in the emails as events and the
# database of chances to figure out the change.
b.update_from_events(email.split(), words_odds)
# Print the email and if it's likely spam or not.
print(email[:15] + '...', b.most_likely())
print('')
print(' -- Spam Filter With Email Corpus -- ')
# Email corpus. A hundred spam emails to buy products and with the word
# "meeting" thrown around. Genuine emails are about meetings and buying
# milk.
instances = {'spam': ["buy viagra", "buy cialis"] * 100 + ["meeting love"],
'genuine': ["meeting tomorrow", "buy milk"] * 100}
# Use str.split to extract features/events/words from the corpus and build
# the model.
model = Bayes.extract_events_odds(instances, str.split)
# Create a new Bayes instance with 10%/90% priors on emails being genuine.
b = Bayes({'spam': .9, 'genuine': .1})
# Update beliefs with features/events/words from an email.
b.update_from_events("buy coffee for meeting".split(), model)
# Print the email and if it's likely spam or not.
print("'buy coffee for meeting'", ':', b)
print('')
print(' -- Are You Cheating? -- ')
results = ['heads', 'heads', 'tails', 'heads', 'heads']
events_odds = {'heads': {'honest': .5, 'cheating': .9},
'tails': {'honest': .5, 'cheating': .1}}
b = Bayes({'cheating': .5, 'honest': .5})
b.update_from_events(results, events_odds)
print(b)
def b():
return Bayes((0.99, 0.01), labels=['not cancer', 'cancer'])
# Random equivalent examples
b() * (9.6, 80)
(b() * (9.6, 80)).opposite().opposite()
b().update({'not cancer': 9.6, 'cancer': 80})
b().update((9.6, 80))
b().update_from_events(['pos'], {'pos': (9.6, 80)})
b().update_from_tests([True], [(9.6, 80)])
Bayes([('not cancer', 0.99), ('cancer', 0.01)]) * (9.6, 80)
Bayes({'not cancer': 0.99, 'cancer': 0.01}) * {'not cancer': 9.6,
'cancer': 80} | PypiClean |
/ElasticQuery-3.2.tar.gz/ElasticQuery-3.2/elasticquery/dsl.py |
from .dsl_util import make_struct, unroll_definitions, unroll_struct
class MetaQuery(type):
'''
Metaclass mapping attributes to dsl objects on Filter/Query getattr.
'''
def __init__(cls, name, bases, d):
super(MetaQuery, cls).__init__(name, bases, d)
unroll_definitions(cls._definitions)
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
raise cls._exception(key)
# Generates a new class object with a struct based on the definitions
return lambda *args, **kwargs: cls(
key,
make_struct(cls._definitions[key], *args, **kwargs),
)
class MetaAggregate(MetaQuery):
'''
Modified MetaQuery.MetaAggregate getattr to handle aggregate names.
'''
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: cls(
key,
args[0],
make_struct(cls._definitions[key], *args[1:], **kwargs),
)
class MetaSuggester(MetaQuery):
'''
Modified MetaQuery.MetaSuggester getattr to handle suggester names and text.
'''
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: cls(
key,
args[0],
args[1],
make_struct(cls._definitions[key], *args[2:], **kwargs),
)
class BaseQuery(object):
'''
The base class which represents a Filter/Query struct.
'''
_struct = None
_dsl_type = None
def __init__(self, dsl_type, struct):
self._dsl_type = dsl_type
self._struct = struct
def dict(self):
# Handle reserved Python keyword alternatives (from_, or_)
dsl_type = self._dsl_type[:-1] if self._dsl_type.endswith('_') else self._dsl_type
return {
dsl_type: unroll_struct(self._struct),
}
class BaseAggregate(BaseQuery):
'''
Modified BaseQuery to handle aggregate name storage.
'''
_name = None
def __init__(self, dsl_type, name, struct):
self._dsl_type = dsl_type
self._struct = struct
self._name = name
self._aggs = []
def dict(self):
struct = {
self._name: {
self._dsl_type: unroll_struct(self._struct),
},
}
if self._aggs:
aggregates = {}
for agg in self._aggs:
aggregates.update(agg.dict())
struct[self._name]['aggregations'] = aggregates
return struct
def aggregate(self, *aggregates):
self._aggs.extend(aggregates)
return self
class BaseSuggester(BaseQuery):
'''
Modified BaseQuery to handle suggester name & text storage.
'''
_name = None
def __init__(self, dsl_type, name, text, struct):
self._dsl_type = dsl_type
self._struct = struct
self._name = name
self._text = text
self._suggs = []
def dict(self):
struct = {
self._name: {
'text': self._text,
self._dsl_type: unroll_struct(self._struct),
},
}
return struct | PypiClean |
/BCAWT-1.0.6.tar.gz/BCAWT-1.0.6/README.md | # BCAWT: Automated tool for codon usage bias analysis for molecular evolution
[](https://travis-ci.org/AliYoussef96/BCAW-Tool)
[](https://bcaw-tools-documentation.readthedocs.io/en/latest/?badge=latest)
[](https://badge.fury.io/py/BCAWT)
[](http://joss.theoj.org/papers/5c17f813c2eca6b9d7c4ecf5d2ea97e9)
## BCAW tool Updates
Now you can run BCAW tool using a GUI software that can work on any operating system. It is very easy to use. For more information and to download it: [BCAWT-GUI](https://github.com/AliYoussef96/BCAWT-GUI).
## Statement of Need
There are no tools available enable users to run a whole automated workflow for codon usage bias analysis. Using python 3.7 BCAW Tool ( Bio Codon Analysis Workflow Tool ) was developed to address this problem.
BCAW Tool manages a complete automated workflow to analyze the codon usage bias for genes and genomes of any organism. With minimum coding skills.
For more details about codon usage bias , and the equations used in BCAWT [see](https://bcaw-tools-documentation.readthedocs.io/en/latest/intro.html).
## Dependencies
1- Biopython
2- pandas
3- CAI
4- scipy
5- matplotlib
6- numpy
7- prince
## Installation Instructions
**Using pip**
```python
pip install BCAWT
```
**Note:** Python >=3.7 is required.
## Contribution Guidelines
**Contributions to the software are welcome**
For bugs and suggestions, the most effective way is by raising an issue on the github issue tracker.
Github allows you to classify your issues so that we know if it is a bug report, feature request or feedback to the authors.
If you wish to contribute some changes to the code then you should submit a [pull request](https://github.com/AliYoussef96/BCAW-Tool/pulls)
How to create a Pull Request? [documentation on pull requests](https://help.github.com/en/articles/about-pull-requests)
## Usage
#### Auto testing
**Note here we try to test the result of BCAW tool and not the modules, for testing the modules in the package use [test.py](https://github.com/AliYoussef96/BCAW-Tool/blob/master/tests/test.py)**
First download fasta file containing the coding sequence ( you can download any fasta file containing gene sequences to be analyzed from [NCBI](https://www.ncbi.nlm.nih.gov/) database).
or just download that file [Test file](https://github.com/AliYoussef96/BCAW-Tool/blob/master/tests/Ecoli.fasta)
then run ( It will automatically run a test on the results files ):
```python
from BCAWT import BCAWT_auto_test
path = "Test_folder" # absolute path to the directory to save the result in
test_fasta = "Test_fasta_file" # absolute path to the fasta file that will be tested
BCAWT_auto_test.auto_test(path, test_fasta)
#processing....
BCAWT_auto_test.auto_check_files(path) # note: this test assumes that in the result folder nothing except the result files form the above function.
```
#### Main Usage
```python
from BCAWT import BCAWT
BCAWT.BCAW(['Ecoli.fasta'],'save_path',genetic_code_=11,Auto=True)
```
## Input
```
main_fasta_file (list): list of string of the file's path or file-like object
save_path (str): absolute path to the directory to save the result in, default = the current directory
ref_fasta_file (list): list of string of the file's path or file-like object, default = None
Auto (bool): default = False, if ref_fasta_file not None.
genetic_code_ (int) : default = 1, The Genetic Codes number described by [NCBI](https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi)
```
**Important Note:** BCAW tool expect coding sequences as input and not genes, for more information about what the difference between them you can take a look [here](https://qr.ae/TWt2gE)
#### To obtain such fasta file for a species of interest
Say that the species of interest is Escherichia coli str. K-12 substr. MG1655:
1- Go to the NCBI database.
2- In the search bar write ( Escherichia coli str. K-12 substr. MG1655, complete genome ).
3- choose one of the results ( depending on what you want in your analysis ).
3- On the right of the page, you will find **send to** option. From **sent to** select **Coding Sequences** then **FASTA nucleotides** Finally, press on **Create File**
For [NCBI Genomes Download (FTP) FAQ](https://www.ncbi.nlm.nih.gov/genome/doc/ftpfaq/)
## Output
#### The expected CSV files output
|CSV file name|Description|
|------------|-----------|
| ATCG | contains ; gene id, GC, GC1, GC2, GC3, GC12, AT, AT3 A3, T3, C3, G3, GRAVY, AROMO and, Gene Length |
| CA_RSCU | contains ; each RSCU result for each codon in each genes |
| CA_RSCUcodons | contains ; correspondence analysis first 4 axis for each codon |
| CA_RSCUgenes | contains ; correspondence analysis first 4 axis for each gene |
| CAI | contains ; gene id and CAI index |
| ENc | contains ; gene id and ENc index. |
| P2-index | contains ; gene id and P2 index |
| optimal codons | contains; putative optimal codons detected |
#### All output plots from BCAW tool analysis for coding sequence from Escherichia coli

## Documentations
1. An intro to the codon usage bias >> [CUB introduction](https://bcaw-tools-documentation.readthedocs.io/en/latest/intro.html)
2. For more information about the equations used to analyze CUB in the BCAW tool >> [Equations](https://bcaw-tools-documentation.readthedocs.io/en/latest/intro.html#equations-used-for-codon-usage-bias-analysis)
3. For more information about the output >> [Output](https://bcaw-tools-documentation.readthedocs.io/en/latest/Table_output.html)
4. For more information about the abbreviations used >> [Abbreviations table](https://github.com/AliYoussef96/BCAW-Tool/blob/master/Abbreviations.md)
## Citation
Anwar, (2019). BCAWT: Automated tool for codon usage bias analysis for molecular evolution. Journal of Open Source Software, 4(42), 1500, https://doi.org/10.21105/joss.01500
| PypiClean |
/MultiRunnable-0.17.0a2-py3-none-any.whl/multirunnable/framework/api/operator.py | from typing import Union, NewType
from abc import ABCMeta, abstractmethod
from ...types import (
MRLock as _MRLock,
MRRLock as _MRRLock,
MRSemaphore as _MRSemaphore,
MRBoundedSemaphore as _MRBoundedSemaphore,
MREvent as _MREvent,
MRCondition as _MRCondition,
MRQueue as _MRQueue
)
__MRFeature = Union[_MRLock, _MRRLock, _MRSemaphore, _MRBoundedSemaphore, _MREvent, _MRCondition, _MRQueue]
_MRFeatureType = NewType("MRFeatureType", __MRFeature)
class AdapterOperator(metaclass=ABCMeta):
pass
class BaseLockAdapterOperator(AdapterOperator):
_Feature_Instance: _MRFeatureType = None
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return f"<Operator object for {repr(self._feature_instance)}>"
def __enter__(self):
self._feature_instance.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._feature_instance.__exit__(exc_type, exc_val, exc_tb)
@property
def _feature_instance(self) -> _MRFeatureType:
if self._Feature_Instance is None:
self._Feature_Instance = self._get_feature_instance()
if self._Feature_Instance is None:
__feature_opt_class = self.__class__.__name__
__feature = __feature_opt_class.replace("Operator", "")
raise ValueError(f"The {__feature} object not be initialed yet.")
return self._Feature_Instance
@_feature_instance.setter
def _feature_instance(self, feature: _MRFeatureType) -> None:
self._Feature_Instance = feature
@abstractmethod
def _get_feature_instance(self) -> _MRFeatureType:
pass
@abstractmethod
def acquire(self, *args, **kwargs) -> None:
pass
@abstractmethod
def release(self, *args, **kwargs) -> None:
pass
class _AsyncContextManager:
def __init__(self, lock):
self._lock = lock
def __enter__(self):
return None
def __exit__(self, exc_type, exc_val, exc_tb):
self._lock.release()
class AsyncAdapterOperator(metaclass=ABCMeta):
def __enter__(self):
raise RuntimeError("")
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class BaseAsyncLockAdapterOperator(AsyncAdapterOperator):
_Feature_Instance: _MRFeatureType = None
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return f"<AsyncOperator object for {repr(self._feature_instance)}>"
def __await__(self):
return self.__acquire_ctx().__await__()
async def __aenter__(self):
await self._feature_instance.__aenter__()
return None
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._feature_instance.__aexit__(exc_type, exc_val, exc_tb)
async def __acquire_ctx(self):
await self.acquire()
return _AsyncContextManager(self)
@property
def _feature_instance(self) -> _MRFeatureType:
if self._Feature_Instance is None:
self._Feature_Instance = self._get_feature_instance()
if self._Feature_Instance is None:
__feature_opt_class = self.__class__.__name__
__feature = __feature_opt_class.replace("Operator", "")
raise ValueError(f"The {__feature} object not be initialed yet.")
return self._Feature_Instance
@_feature_instance.setter
def _feature_instance(self, feature: _MRFeatureType) -> None:
self._Feature_Instance = feature
@abstractmethod
def _get_feature_instance(self) -> _MRFeatureType:
pass
@abstractmethod
async def acquire(self, *args, **kwargs) -> None:
pass
@abstractmethod
def release(self, *args, **kwargs) -> None:
pass | PypiClean |
/BanterBot-0.0.5.tar.gz/BanterBot-0.0.5/banterbot/utils/word.py | import datetime
from dataclasses import dataclass
from typing import Optional
import azure.cognitiveservices.speech as speechsdk
from banterbot.data.enums import SpeechProcessingType, WordCategory
@dataclass(frozen=True)
class Word:
"""
This class encapsulates a word in the output of a text-to-speech synthesis or input from a speech-to-text
recognition. It includes the word itself and the timestamp when the word was spoken. Optionally, its category (e.g.,
word, punctuation using Azure's Speech Synthesis Boundary Type), and its confidence score can be included too.
Attributes:
word (str): The word that has been synthesized/recognized.
offset (datetime.timedelta): Time elapsed between initialization and synthesis/recognition.
duration (datetime.timedelta): Amount of time required for the word to be fully spoken.
category (WordCategory): The category of the text contents.
confidence (Optional[int]): The confidence score (for speech-to-text) for the given word.
source: SpeechProcessingType: Whether the word's source is text-to-speech (TTS) or speech-to-text (STT).
"""
word: str
offset: datetime.timedelta
duration: datetime.timedelta
category: WordCategory
source: SpeechProcessingType
confidence: Optional[float] = None
def __len__(self) -> int:
"""
Computes and returns the length of the word.
This method is useful for determining the length of the word without having to access the `word` attribute
directly. It can be used, for example, in filtering or sorting operations on a list of `Word`
instances.
Returns:
int: The length of the word.
"""
return len(self.word)
def __str__(self) -> str:
"""
Provides a string representation of the instance, including the word and its timestamp.
This method is useful for displaying a human-readable representation of the instance, which can be helpful for
debugging or logging purposes.
Returns:
str: A string containing the word, the time elapsed since the beginning of speech synthesis, and its source.
"""
description = (
f"<"
"word: '{self.word}' "
f"| offset: {self.offset.seconds}s "
f"| duration: {self.duration.seconds}s "
f"| source: {self.source}"
">"
)
return description
def __repr__(self) -> str:
"""
Returns the word itself as its string representation. This simplifies the display of the object in certain
contexts, such as when printing a list of `Word` instances.
This method is called by built-in Python functions like `repr()` and is used, for example, when displaying the
object in an interactive Python session or when printing a list containing the object.
Returns:
str: The word itself.
"""
return self.word | PypiClean |
/Flask-Excel-0.0.7.tar.gz/Flask-Excel-0.0.7/CHANGELOG.rst | Change log
================================================================================
0.0.7 - 20.07.2017
--------------------------------------------------------------------------------
Updated
********************************************************************************
#. the intialization method has been modified. please call init_excel(app)
before you do anything else. This change was made in order to apply for
approved flask extension status. And by doing this change, it will support
multiple Flask apps and only the app that was initialized with init_excel
gets Flask-Excel and other apps in your BIG app won't get affected.
0.0.6 - 22.06.2017
--------------------------------------------------------------------------------
Updated
********************************************************************************
#. `#22 <https://github.com/pyexcel/Flask-Excel/issues/22>`_: support download
file name in unicode(including Chinese texts)
0.0.5 - 21.08.2016
--------------------------------------------------------------------------------
Updated
********************************************************************************
#. compatibility with pyexcel v0.2.2: automatic discovery of pyexcel plugins.
#. `#15 <https://github.com/pyexcel/Flask-Excel/issues/15>`_: file name may have
more than one dot
0.0.4 - 15.01.2016
--------------------------------------------------------------------------------
Updated
********************************************************************************
#. `#8 <https://github.com/pyexcel/Flask-Excel/issues/8>`_: set file name in response
0.0.3 - 01.07.2015
--------------------------------------------------------------------------------
Updated
********************************************************************************
#. code refactoring. less code lines in Flask-Excel and more reusable code in
pyexcel-webio
0.0.2 - 21.05.2015
--------------------------------------------------------------------------------
Added
********************************************************************************
#. turn query sets into a response
0.0.1 - 22.01.2015
--------------------------------------------------------------------------------
Mix pyexcel into Flask.request and bring more make_response functions.
| PypiClean |
/Bedframe-0.13.5.tar.gz/Bedframe-0.13.5/bedframe/webtypes/_python/_datetime.py |
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import datetime as _datetime
import re as _re
import pytz as _tz
from .. import _core
class datetime(_core.webobject):
"""A web-transmittable date and time
This wraps a timezone-aware :class:`datetime.datetime`.
"""
@classmethod
def fromprim(cls, prim):
# FIXME: generalize this and move it to :mod:`spruce.datetime`
match = cls._FORMAT_RE.match(prim)
if match:
try:
year = int(match.group('year'))
month = int(match.group('month'))
day = int(match.group('day'))
hour = int(match.group('hour') or 0)
minute = int(match.group('minute') or 0)
second = int(match.group('second') or 0)
microsecond = int(match.group('microsecond') or 0)
except (TypeError, ValueError):
# FIXME
raise ValueError()
tz_sign = match.group('tz_sign')
if tz_sign:
try:
tz_hours = int(tz_sign + match.group('tz_hours'))
tz_minutes = int(tz_sign
+ (match.group('tz_minutes') or '0'))
except (TypeError, ValueError):
# FIXME
raise ValueError()
tz_minutes += tz_hours * 60
tzinfo = _tz.FixedOffset(tz_minutes)
else:
tzinfo = _tz.UTC
return cls(_datetime.datetime(year, month, day, hour, minute,
second, microsecond, tzinfo))
# FIXME
raise ValueError()
def prim(self):
return unicode(self.native().strftime(self._FORMAT))
_FORMAT = '%Y-%m-%d %H:%M:%S.%f %z'
_FORMAT_RE = \
_re.compile(r'(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+)'
r'(?= (?P<hour>\d+):(?P<minute>\d+):(?P<second>\d+)'
r'.(?P<microsecond>\d+)'
r'(?= (?P<tz_sign>[-+])(?P<tz_hours>\d\d)'
r'(?P<tz_minutes>\d\d)?)?)?')
class timedelta(_core.webobject):
"""A web-transmittable time difference
This wraps a :class:`datetime.timedelta`.
"""
@classmethod
def fromprim(cls, prim):
match = cls._FORMAT_RE.match(prim)
if match:
try:
days = int(match.group('days'))
seconds = int(match.group('seconds'))
microseconds = int(match.group('microseconds'))
except (TypeError, ValueError):
# FIXME
raise ValueError()
return cls(_datetime.timedelta(days=days, seconds=seconds,
microseconds=microseconds))
# FIXME
raise ValueError()
def prim(self):
return self._FORMAT.format(td=self.native())
_FORMAT = u'{td.days:+} days {td.seconds:+}.{td.microseconds:06} s'
_FORMAT_RE = _re.compile(r'(?P<days>[+-]\d+) days'
r' (?P<seconds>[+-]\d+).(?P<microseconds>\d+) s') | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/lorem/bn_BD/__init__.py | from typing import Dict
from .. import Provider as LoremProvider
class Provider(LoremProvider):
"""Implement lorem provider for ``bn_BD`` locale."""
# source 1: https://en.wikipedia.org/wiki/Bengali_vocabulary
# source 2: https://en.wikipedia.org/wiki/Bengali_grammar
word_connector = " "
sentence_punctuation = "।"
word_list = (
"পানি",
"লবণ",
"দাওয়াত",
"মরিচ",
"খালা",
"ফুফু",
"গোসল",
"বাতাস",
"চাহিদা",
"স্বাগতম",
"যোগ",
"আসন",
"আশ্রম",
"আয়ুর্বেদ",
"বন্ধন",
"খাট",
"ধুতি",
"মায়া",
"স্বামী",
"লক্ষ্মী",
"লক্ষ্মণ",
"কুড়ি",
"খুকি",
"খোকা",
"খোঁচা",
"খোঁজ",
"চাল",
"চিংড়ি",
"চুলা",
"ঝিনুক",
"ঝোল",
"ঠ্যাং",
"ঢোল",
"পেট",
"বোবা",
"মাঠ",
"মুড়ি",
"আবহাওয়া",
"চাকরি",
"আয়না",
"আরাম",
"বকশিশ",
"আস্তে",
"কাগজ",
"খারাপ",
"খোদা",
"খুব",
"গরম",
"চশমা",
"চাকর",
"চাদর",
"জান",
"জায়গা",
"ডেগচি",
"দম",
"দেরি",
"দোকান",
"পর্দা",
"বদ",
"বাগান",
"রাস্তা",
"রোজ",
"হিন্দু",
"পছন্দ",
"টেক্কা",
"আলু",
"নখ",
"খুন",
"আওয়াজ",
"আসল",
"এলাকা",
"ওজন",
"কলম",
"খবর",
"খালি",
"খেয়াল",
"গরিব",
"জমা",
"তারিখ",
"দুনিয়া",
"নকল",
"ফকির",
"বদল",
"বাকি",
"শয়তান",
"সাহেব",
"সনদ",
"সাল",
"সন",
"হিসাব",
"দাদা",
"বাবা",
"নানি",
"চকমক",
"বাবুর্চি",
"বেগম",
"কেচি",
"লাশ",
"তবলা",
"আলমারি",
"ইস্ত্রি",
"ইস্তিরি",
"ইস্পাত",
"কামিজ",
"গামলা",
"চাবি",
"জানালা",
"তামাক",
"পেরেক",
"ফিতা",
"বারান্দা",
"বালতি",
"বেহালা",
"বোতাম",
"মেজ",
"সাবান",
"কেদারা",
"আতা",
"আনারস",
"কাজু",
"কপি",
"পেঁপে",
"পেয়ারা",
"সালাদ",
"গির্জা",
"যিশু",
"পাদ্রি",
"ইংরেজ",
"অফিস",
"জেল",
"ডাক্তার",
"পুলিশ",
"ব্যাংক",
"ভোট",
"স্কুল",
"হাসপাতাল",
"কাপ",
"গ্লাস",
"চেয়ার",
"টেবিল",
"বাক্স",
"লণ্ঠন",
"প্লাস্টিক",
"কলেজ",
"সাইকেল",
"রেস্তোরাঁ",
"সুড়ঙ্গ",
"চা",
"চিনি",
"সুনামি",
"রিক্সা",
"বোকা",
"ছোট্ট",
"লুঙ্গি",
"ডেঙ্গু",
"মানুষজন",
"মাফিয়া",
"স্টুডিও",
"ম্যালেরিয়া",
"ক্যাঙারু",
"বুমেরাং",
"আমি",
"তুই",
"তুমি",
"আপনি",
"এ",
"ইনি",
"ও",
"উনি",
"সে",
"তিনি",
"সেটি",
"আমরা",
"তোরা",
"তোমরা",
"আপনারা",
"এরা",
"এগুলো",
"ওরা",
"এঁরা",
"ওঁরা",
"তারা",
"তাঁরা",
"সেগুলো",
"আমাকে",
"তোকে",
"আমাদেরকে",
"তোদেরকে",
"তোমাকে",
"তোমাদেরকে",
"আপনাকে",
"আপনাদেরকে",
"একে",
"এদেরকে",
"এঁকে",
"এঁদেরকে",
"এটি",
"এটা",
"ওকে",
"ওদেরকে",
"ওঁকে",
"ওঁদেরকে",
"ওটি",
"ওটা",
"ওগুলো",
"তাকে",
"তাদেরকে",
"তাঁকে",
"তাঁদেরকে",
"সেটা",
"কে",
"কার",
"কাকে",
"কোন",
"কি",
"কেউ",
"কারও",
"কাউকে",
"কোনও",
"কিছু",
)
parts_of_speech: Dict[str, tuple] = {} | PypiClean |
/FFC-2017.1.0.tar.gz/FFC-2017.1.0/ffc/plot.py | "This module provides functionality for plotting finite elements."
# Copyright (C) 2010 Anders Logg
#
# This file is part of FFC.
#
# FFC is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FFC. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2010-12-07
# Last changed: 2010-12-15
__all__ = ["plot"]
from numpy import dot, cross, array, sin, cos, pi, sqrt
from numpy.linalg import norm
import sys
from ffc.fiatinterface import create_element
from ffc.log import warning, error, info
# Import Soya3D
try:
import soya
from soya.sphere import Sphere
from soya.label3d import Label3D
from soya.sdlconst import QUIT
_soya_imported = True
except:
_soya_imported = False
# Colors for elements
element_colors = {"Argyris": (0.45, 0.70, 0.80),
"Arnold-Winther": (0.00, 0.00, 1.00),
"Brezzi-Douglas-Marini": (1.00, 1.00, 0.00),
"Crouzeix-Raviart": (1.00, 0.25, 0.25),
"Discontinuous Lagrange": (0.00, 0.25, 0.00),
"Discontinuous Raviart-Thomas": (0.90, 0.90, 0.30),
"Hermite": (0.50, 1.00, 0.50),
"Lagrange": (0.00, 1.00, 0.00),
"Mardal-Tai-Winther": (1.00, 0.10, 0.90),
"Morley": (0.40, 0.40, 0.40),
"Nedelec 1st kind H(curl)": (0.90, 0.30, 0.00),
"Nedelec 2nd kind H(curl)": (0.70, 0.20, 0.00),
"Raviart-Thomas": (0.90, 0.60, 0.00)}
def plot(element, rotate=True):
"Plot finite element."
# Check if Soya3D has been imported
if not _soya_imported:
warning("Unable to plot element, Soya3D not available (install package python-soya).")
return
# Special case: plot dof notation
if element == "notation":
# Create model for notation
notation = create_notation_models()
# Render plot window
render(notation, "Notation", 0, True, rotate)
else:
# Create cell model
cell, is3d = create_cell_model(element)
cellname = element.cell().cellname() # Assuming single cell
# Create dof models
dofs, num_moments = create_dof_models(element)
# Create title
if element.degree() is not None:
title = "%s of degree %d on a %s" % (element.family(), element.degree(), cellname)
else:
title = "%s on a %s" % (element.family(), cellname)
# Render plot window
render([cell] + dofs, title, num_moments, is3d, rotate)
def render(models, title, num_moments, is3d, rotate):
"Render given list of models."
# Note that we view from the positive z-axis, and not from the
# negative y-axis. This should make no difference since the
# element dofs are symmetric anyway and it plays better with
# the default camera settings in Soya.
# Initialize Soya
soya.init(title)
# Create scene
scene = soya.World()
scene.atmosphere = soya.Atmosphere()
if title == "Notation":
scene.atmosphere.bg_color = (0.0, 1.0, 0.0, 1.0)
else:
scene.atmosphere.bg_color = (1.0, 1.0, 1.0, 1.0)
# Not used, need to manually handle rotation
# label = Label3D(scene, text=str(num_moments), size=0.005)
# label.set_xyz(1.0, 1.0, 1.0)
# label.set_color((0.0, 0.0, 0.0, 1.0))
# Define rotation
if is3d:
class RotatingBody(soya.Body):
def advance_time(self, proportion):
self.rotate_y(2.0 * proportion)
else:
class RotatingBody(soya.Body):
def advance_time(self, proportion):
self.rotate_z(2.0 * proportion)
# Select type of display, rotating or not
if rotate:
Body = RotatingBody
else:
Body = soya.Body
# Add all models
for model in models:
body = Body(scene, model)
# Set light
light = soya.Light(scene)
if is3d:
light.set_xyz(1.0, 5.0, 5.0)
else:
light.set_xyz(0.0, 0.0, 1.0)
light.cast_shadow = 1
light.shadow_color = (0.0, 0.0, 0.0, 0.5)
# Set camera
camera = soya.Camera(scene)
camera.ortho = 0
p = camera.position()
if is3d:
if rotate:
camera.set_xyz(-20, 10, 50.0)
camera.fov = 2.1
p.set_xyz(0.0, 0.4, 0.0)
else:
camera.set_xyz(-20, 10, 50.0)
camera.fov = 1.6
p.set_xyz(0.3, 0.42, 0.5)
else:
if rotate:
camera.set_xyz(0, 10, 50.0)
camera.fov = 2.6
p.set_xyz(0.0, 0.0, 0.0)
else:
camera.set_xyz(0, 10, 50.0)
camera.fov = 1.7
p.set_xyz(0.5, 0.4, 0.0)
camera.look_at(p)
soya.set_root_widget(camera)
# Handle exit
class Idler(soya.Idler):
def end_round(self):
for event in self.events:
if event[0] == QUIT:
print("Closing plot, bye bye")
sys.exit(0)
# Main loop
idler = Idler(scene)
idler.idle()
def tangents(n):
"Return normalized tangent vectors for plane defined by given vector."
# Figure out which vector to take cross product with
eps = 1e-10
e = array((1.0, 0.0, 0.0))
if norm(cross(n, e)) < eps:
e = array((0.0, 1.0, 0.0))
# Take cross products and normalize
t0 = cross(n, e)
t0 = t0 / norm(t0)
t1 = cross(n, t0)
t1 = t1 / norm(t0)
return t0, t1
def Cylinder(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)):
"Return model for cylinder from p0 to p1 with radius r."
# Convert to NumPy array
if isinstance(p0, soya.Vertex):
p0 = array((p0.x, p0.y, p0.z))
p1 = array((p1.x, p1.y, p1.z))
else:
p0 = array(p0)
p1 = array(p1)
# Get tangent vectors for plane
n = p0 - p1
n = n / norm(n)
t0, t1 = tangents(n)
# Traverse the circles
num_steps = 10
dtheta = 2.0 * pi / float(num_steps)
for i in range(num_steps):
# Compute coordinates for square
dx0 = cos(i * dtheta) * t0 + sin(i * dtheta) * t1
dx1 = cos((i + 1) * dtheta) * t0 + sin((i + 1) * dtheta) * t1
x0 = p0 + r * dx0
x1 = p0 + r * dx1
x2 = p1 + r * dx0
x3 = p1 + r * dx1
# Cover square by two triangles
v0 = soya.Vertex(scene, x0[0], x0[1], x0[2], diffuse=color)
v1 = soya.Vertex(scene, x1[0], x1[1], x1[2], diffuse=color)
v2 = soya.Vertex(scene, x2[0], x2[1], x2[2], diffuse=color)
v3 = soya.Vertex(scene, x3[0], x3[1], x3[2], diffuse=color)
f0 = soya.Face(scene, (v0, v1, v2))
f1 = soya.Face(scene, (v1, v2, v3))
f0.double_sided = 1
f1.double_sided = 1
# Extract model
model = scene.to_model()
return model
def Cone(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)):
"Return model for cone from p0 to p1 with radius r."
# Convert to NumPy array
if isinstance(p0, soya.Vertex):
p0 = array((p0.x, p0.y, p0.z))
p1 = array((p1.x, p1.y, p1.z))
else:
p0 = array(p0)
p1 = array(p1)
# Get tangent vectors for plane
n = p0 - p1
n = n / norm(n)
t0, t1 = tangents(n)
# Traverse the circles
num_steps = 10
dtheta = 2.0 * pi / float(num_steps)
v2 = soya.Vertex(scene, p1[0], p1[1], p1[2], diffuse=color)
for i in range(num_steps):
# Compute coordinates for bottom of face
dx0 = cos(i * dtheta) * t0 + sin(i * dtheta) * t1
dx1 = cos((i + 1) * dtheta) * t0 + sin((i + 1) * dtheta) * t1
x0 = p0 + r * dx0
x1 = p0 + r * dx1
# Create face
v0 = soya.Vertex(scene, x0[0], x0[1], x0[2], diffuse=color)
v1 = soya.Vertex(scene, x1[0], x1[1], x1[2], diffuse=color)
f = soya.Face(scene, (v0, v1, v2))
f.double_sided = 1
# Extract model
model = scene.to_model()
return model
def Arrow(scene, x, n, center=False):
"Return model for arrow from x in direction n."
# Convert to Numpy arrays
x = array(x)
n = array(n)
# Get tangents
t0, t1 = tangents(n)
# Dimensions for arrow
L = 0.3
l = 0.35 * L
r = 0.04 * L
R = 0.125 * L
# Center arrow
if center:
print("Centering!")
x -= 0.5 * (L + l) * n
# Create cylinder and cone
cylinder = Cylinder(scene, x, x + L * n, r)
cone = Cone(scene, x + L * n, x + (L + l) * n, R)
# Extract model
return scene.to_model()
def UnitTetrahedron(color=(0.0, 1.0, 0.0, 0.5)):
"Return model for unit tetrahedron."
info("Plotting unit tetrahedron")
# Create separate scene (since we will extract a model, not render)
scene = soya.World()
# Create vertices
v0 = soya.Vertex(scene, 0.0, 0.0, 0.0, diffuse=color)
v1 = soya.Vertex(scene, 1.0, 0.0, 0.0, diffuse=color)
v2 = soya.Vertex(scene, 0.0, 1.0, 0.0, diffuse=color)
v3 = soya.Vertex(scene, 0.0, 0.0, 1.0, diffuse=color)
# Create edges
e0 = Cylinder(scene, v0, v1, 0.007)
e1 = Cylinder(scene, v0, v2, 0.007)
e2 = Cylinder(scene, v0, v3, 0.007)
e3 = Cylinder(scene, v1, v2, 0.007)
e4 = Cylinder(scene, v1, v3, 0.007)
e5 = Cylinder(scene, v2, v3, 0.007)
# Create faces
f0 = soya.Face(scene, (v1, v2, v3))
f1 = soya.Face(scene, (v0, v2, v3))
f2 = soya.Face(scene, (v0, v1, v3))
f3 = soya.Face(scene, (v0, v1, v2))
# Make faces double sided
f0.double_sided = 1
f1.double_sided = 1
f2.double_sided = 1
f3.double_sided = 1
# Extract model
model = scene.to_model()
return model
def UnitTriangle(color=(0.0, 1.0, 0.0, 0.5)):
"Return model for unit tetrahedron."
info("Plotting unit triangle")
# Create separate scene (since we will extract a model, not render)
scene = soya.World()
# Create vertice
v0 = soya.Vertex(scene, 0.0, 0.0, 0.0, diffuse=color)
v1 = soya.Vertex(scene, 1.0, 0.0, 0.0, diffuse=color)
v2 = soya.Vertex(scene, 0.0, 1.0, 0.0, diffuse=color)
# Create edges
e0 = Cylinder(scene, v0, v1, 0.007)
e1 = Cylinder(scene, v0, v2, 0.007)
e2 = Cylinder(scene, v1, v2, 0.007)
# Create face
f = soya.Face(scene, (v0, v1, v2))
# Make face double sided
f.double_sided = 1
# Extract model
model = scene.to_model()
return model
def PointEvaluation(x):
"Return model for point evaluation at given point."
info("Plotting dof: point evaluation at x = %s" % str(x))
# Make sure point is 3D
x = to3d(x)
# Create separate scene (since we will extract a model, not render)
scene = soya.World()
# Define material (color) for the sphere
material = soya.Material()
material.diffuse = (0.0, 0.0, 0.0, 1.0)
# Create sphere
sphere = Sphere(scene, material=material)
# Scale and moveand move to coordinate
sphere.scale(0.05, 0.05, 0.05)
p = sphere.position()
p.set_xyz(x[0], x[1], x[2])
sphere.move(p)
# Extract model
model = scene.to_model()
return model
def PointDerivative(x):
"Return model for evaluation of derivatives at given point."
info("Plotting dof: point derivative at x = %s" % str(x))
# Make sure point is 3D
x = to3d(x)
# Create separate scene (since we will extract a model, not render)
scene = soya.World()
# Define material (color) for the sphere
material = soya.Material()
material.diffuse = (0.0, 0.0, 0.0, 0.2)
# Create sphere
sphere = Sphere(scene, material=material)
# Scale and moveand move to coordinate
sphere.scale(0.1, 0.1, 0.1)
p = sphere.position()
p.set_xyz(x[0], x[1], x[2])
sphere.move(p)
# Extract model
model = scene.to_model()
return model
def PointSecondDerivative(x):
"Return model for evaluation of second derivatives at given point."
info("Plotting dof: point derivative at x = %s" % str(x))
# Make sure point is 3D
x = to3d(x)
# Create separate scene (since we will extract a model, not render)
scene = soya.World()
# Define material (color) for the sphere
material = soya.Material()
material.diffuse = (0.0, 0.0, 0.0, 0.05)
# Create sphere
sphere = Sphere(scene, material=material)
# Scale and moveand move to coordinate
sphere.scale(0.15, 0.15, 0.15)
p = sphere.position()
p.set_xyz(x[0], x[1], x[2])
sphere.move(p)
# Extract model
model = scene.to_model()
return model
def DirectionalEvaluation(x, n, flip=False, center=False):
"Return model for directional evaluation at given point in given direction."
info("Plotting dof: directional evaluation at x = %s in direction n = %s" % (str(x), str(n)))
# Make sure points are 3D
x = to3d(x)
n = to3d(n)
# Create separate scene (since we will extract a model, not render)
scene = soya.World()
# Normalize
n = array(n)
n = 0.75 * n / norm(n)
# Flip normal if necessary
if flip and not pointing_outwards(x, n):
info("Flipping direction of arrow so it points outward.")
n = -n
# Create arrow
arrow = Arrow(scene, x, n, center)
# Extract model
model = scene.to_model()
return model
def DirectionalDerivative(x, n):
"Return model for directional derivative at given point in given direction."
info("Plotting dof: directional derivative at x = %s in direction n = %s" % (str(x), str(n)))
# Make sure points are 3D
x = to3d(x)
n = to3d(n)
# Create separate scene (since we will extract a model, not render)
scene = soya.World()
# Normalize
n = array(n)
n = 0.75 * n / norm(n)
# Create line
line = Cylinder(scene, x - 0.07 * n, x + 0.07 * n, 0.005)
# Extract model
model = scene.to_model()
return model
def IntegralMoment(cellname, num_moments, x=None):
"Return model for integral moment for given element."
info("Plotting dof: integral moment")
# Set position
if x is None and cellname == "triangle":
a = 1.0 / (2 + sqrt(2)) # this was a fun exercise
x = (a, a, 0.0)
elif x is None:
a = 1.0 / (3 + sqrt(3)) # so was this
x = (a, a, a)
# Make sure point is 3D
x = to3d(x)
# Fancy scaling of radius and color
r = 1.0 / (num_moments + 5)
if num_moments % 2 == 0:
c = 1.0
else:
c = 0.0
# Create separate scene (since we will extract a model, not render)
scene = soya.World()
# Define material (color) for the sphere
material = soya.Material()
material.diffuse = (c, c, c, 0.7)
# Create sphere
sphere = Sphere(scene, material=material)
# Scale and moveand move to coordinate
sphere.scale(r, r, r)
p = sphere.position()
p.set_xyz(x[0], x[1], x[2])
sphere.move(p)
# Extract model
model = scene.to_model()
return model
def create_cell_model(element):
"Create Soya3D model for cell."
# Get color
family = element.family()
if family not in element_colors:
warning("Don't know a good color for elements of type '%s', using default color." % family)
family = "Lagrange"
color = element_colors[family]
color = (color[0], color[1], color[2], 0.7)
# Create model based on cell type
cellname = element.cell().cellname()
if cellname == "triangle":
return UnitTriangle(color), False
elif cellname == "tetrahedron":
return UnitTetrahedron(color), True
error("Unable to plot element, unhandled cell type: %s" % str(cellname))
def create_dof_models(element):
"Create Soya3D models for dofs."
# Flags for whether to flip and center arrows
directional = {"PointScaledNormalEval": (True, False),
"PointEdgeTangent": (False, True),
"PointFaceTangent": (False, True)}
# Elements not supported fully by FIAT
unsupported = {"Argyris": argyris_dofs,
"Arnold-Winther": arnold_winther_dofs,
"Hermite": hermite_dofs,
"Mardal-Tai-Winther": mardal_tai_winther_dofs,
"Morley": morley_dofs}
# Check if element is supported
family = element.family()
if family not in unsupported:
# Create FIAT element and get dofs
fiat_element = create_element(element)
dofs = [(dof.get_type_tag(), dof.get_point_dict()) for dof in fiat_element.dual_basis()]
else:
# Bybass FIAT and set the dofs ourselves
dofs = unsupported[family](element)
# Iterate over dofs and add models
models = []
num_moments = 0
for (dof_type, L) in dofs:
# Check type of dof
if dof_type == "PointEval":
# Point evaluation, just get point
points = list(L.keys())
if not len(points) == 1:
error("Strange dof, single point expected.")
x = points[0]
# Generate model
models.append(PointEvaluation(x))
elif dof_type == "PointDeriv":
# Evaluation of derivatives at point
points = list(L.keys())
if not len(points) == 1:
error("Strange dof, single point expected.")
x = points[0]
# Generate model
models.append(PointDerivative(x))
elif dof_type == "PointSecondDeriv":
# Evaluation of derivatives at point
points = list(L.keys())
if not len(points) == 1:
error("Strange dof, single point expected.")
x = points[0]
# Generate model
models.append(PointSecondDerivative(x))
elif dof_type in directional:
# Normal evaluation, get point and normal
points = list(L.keys())
if not len(points) == 1:
error("Strange dof, single point expected.")
x = points[0]
n = [xx[0] for xx in L[x]]
# Generate model
flip, center = directional[dof_type]
models.append(DirectionalEvaluation(x, n, flip, center))
elif dof_type == "PointNormalDeriv":
# Evaluation of derivatives at point
points = list(L.keys())
if not len(points) == 1:
error("Strange dof, single point expected.")
x = points[0]
n = [xx[0] for xx in L[x]]
# Generate model
models.append(DirectionalDerivative(x, n))
elif dof_type in ("FrobeniusIntegralMoment", "IntegralMoment", "ComponentPointEval"):
# Generate model
models.append(IntegralMoment(element.cell().cellname(), num_moments))
# Count the number of integral moments
num_moments += 1
else:
error("Unable to plot dof, unhandled dof type: %s" % str(dof_type))
return models, num_moments
def create_notation_models():
"Create Soya 3D models for notation."
models = []
y = 1.3
dy = -0.325
# Create model for evaluation
models.append(PointEvaluation([0, y]))
y += dy
# Create model for derivative evaluation
models.append(PointDerivative([0, y]))
models.append(PointDerivative([0, y]))
models.append(PointDerivative([0, y]))
y += dy
# Create model for second derivative evaluation
models.append(PointSecondDerivative([0, y]))
models.append(PointSecondDerivative([0, y]))
models.append(PointSecondDerivative([0, y]))
y += dy
# Create model for directional evaluation
models.append(DirectionalEvaluation([0, y], [1, 1], False, True))
y += dy
# Create model for directional evaluation
models.append(DirectionalDerivative([0, y], [1, 1]))
y += dy
# Create model for integral moments
models.append(IntegralMoment("tetrahedron", 0, [0, y]))
models.append(IntegralMoment("tetrahedron", 1, [0, y]))
models.append(IntegralMoment("tetrahedron", 2, [0, y]))
return models
def pointing_outwards(x, n):
"Check if n is pointing inwards, used for flipping dofs."
eps = 1e-10
x = array(x) + 0.1 * array(n)
return x[0] < -eps or x[1] < -eps or x[2] < -eps or x[2] > 1.0 - x[0] - x[1] + eps
def to3d(x):
"Make sure point is 3D."
if len(x) == 2:
x = (x[0], x[1], 0.0)
return x
def arnold_winther_dofs(element):
"Special fix for Arnold-Winther elements until Rob fixes in FIAT."
if not element.cell().cellname() == "triangle":
error("Unable to plot element, only know how to plot Mardal-Tai-Winther on triangles.")
return [("PointEval", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointScaledNormalEval", {(1.0 / 5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
("PointScaledNormalEval", {(2.0 / 5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
("PointScaledNormalEval", {(3.0 / 5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
("PointScaledNormalEval", {(4.0 / 5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
("PointScaledNormalEval", {(4.0 / 5, 1.0 / 5.0): [(1.0, (0,)), (1.0, (1,))]}),
("PointScaledNormalEval", {(3.0 / 5, 2.0 / 5.0): [(1.0, (0,)), (1.0, (1,))]}),
("PointScaledNormalEval", {(2.0 / 5, 3.0 / 5.0): [(1.0, (0,)), (1.0, (1,))]}),
("PointScaledNormalEval", {(1.0 / 5, 4.0 / 5.0): [(1.0, (0,)), (1.0, (1,))]}),
("PointScaledNormalEval", {(0.0, 1.0 / 5.0): [(-1.0, (0,)), (0.0, (1,))]}),
("PointScaledNormalEval", {(0.0, 2.0 / 5.0): [(-1.0, (0,)), (0.0, (1,))]}),
("PointScaledNormalEval", {(0.0, 3.0 / 5.0): [(-1.0, (0,)), (0.0, (1,))]}),
("PointScaledNormalEval", {(0.0, 4.0 / 5.0): [(-1.0, (0,)), (0.0, (1,))]}),
("IntegralMoment", None),
("IntegralMoment", None),
("IntegralMoment", None)]
def argyris_dofs(element):
"Special fix for Hermite elements until Rob fixes in FIAT."
if not element.degree() == 5:
error("Unable to plot element, only know how to plot quintic Argyris elements.")
if not element.cell().cellname() == "triangle":
error("Unable to plot element, only know how to plot Argyris on triangles.")
return [("PointEval", {(0.0, 0.0): [(1.0, ())]}),
("PointEval", {(1.0, 0.0): [(1.0, ())]}),
("PointEval", {(0.0, 1.0): [(1.0, ())]}),
("PointDeriv", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof twice
("PointSecondDeriv", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointSecondDeriv", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointSecondDeriv", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointSecondDeriv", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointSecondDeriv", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointSecondDeriv", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointSecondDeriv", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointSecondDeriv", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointSecondDeriv", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointNormalDeriv", {(0.5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
("PointNormalDeriv", {(0.5, 0.5): [(1.0, (0,)), (1.0, (1,))]}),
("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), (0.0, (1,))]})]
def hermite_dofs(element):
"Special fix for Hermite elements until Rob fixes in FIAT."
dofs_2d = [("PointEval", {(0.0, 0.0): [(1.0, ())]}),
("PointEval", {(1.0, 0.0): [(1.0, ())]}),
("PointEval", {(0.0, 1.0): [(1.0, ())]}),
("PointDeriv", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(0.0, 0.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(1.0, 0.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof twice
("PointDeriv", {(0.0, 1.0): [(1.0, ())]}), # hack, same dof twice
("PointEval", {(1.0 / 3, 1.0 / 3): [(1.0, ())]})]
dofs_3d = [("PointEval", {(0.0, 0.0, 0.0): [(1.0, ())]}),
("PointEval", {(1.0, 0.0, 0.0): [(1.0, ())]}),
("PointEval", {(0.0, 1.0, 0.0): [(1.0, ())]}),
("PointEval", {(0.0, 0.0, 1.0): [(1.0, ())]}),
("PointDeriv", {(0.0, 0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(0.0, 0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(0.0, 0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(1.0, 0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(1.0, 0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(1.0, 0.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(0.0, 1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(0.0, 1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(0.0, 1.0, 0.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(0.0, 0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(0.0, 0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointDeriv", {(0.0, 0.0, 1.0): [(1.0, ())]}), # hack, same dof three times
("PointEval", {(1.0 / 3, 1.0 / 3, 1.0 / 3): [(1.0, ())]}),
("PointEval", {(0.0, 1.0 / 3, 1.0 / 3): [(1.0, ())]}),
("PointEval", {(1.0 / 3, 0.0, 1.0 / 3): [(1.0, ())]}),
("PointEval", {(1.0 / 3, 1.0 / 3, 0.0): [(1.0, ())]})]
if element.cell().cellname() == "triangle":
return dofs_2d
else:
return dofs_3d
def mardal_tai_winther_dofs(element):
"Special fix for Mardal-Tai-Winther elements until Rob fixes in FIAT."
if not element.cell().cellname() == "triangle":
error("Unable to plot element, only know how to plot Mardal-Tai-Winther on triangles.")
return [("PointScaledNormalEval", {(1.0 / 3, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
("PointScaledNormalEval", {(2.0 / 3, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
("PointScaledNormalEval", {(2.0 / 3, 1.0 / 3.0): [(1.0, (0,)), (1.0, (1,))]}),
("PointScaledNormalEval", {(1.0 / 3, 2.0 / 3.0): [(1.0, (0,)), (1.0, (1,))]}),
("PointScaledNormalEval", {(0.0, 1.0 / 3.0): [(-1.0, (0,)), (0.0, (1,))]}),
("PointScaledNormalEval", {(0.0, 2.0 / 3.0): [(-1.0, (0,)), (0.0, (1,))]}),
("PointEdgeTangent", {(0.5, 0.0): [(-1.0, (0,)), (0.0, (1,))]}),
("PointEdgeTangent", {(0.5, 0.5): [(-1.0, (0,)), (1.0, (1,))]}),
("PointEdgeTangent", {(0.0, 0.5): [(0.0, (0,)), (-1.0, (1,))]})]
def morley_dofs(element):
"Special fix for Morley elements until Rob fixes in FIAT."
if not element.cell().cellname() == "triangle":
error("Unable to plot element, only know how to plot Morley on triangles.")
return [("PointEval", {(0.0, 0.0): [(1.0, ())]}),
("PointEval", {(1.0, 0.0): [(1.0, ())]}),
("PointEval", {(0.0, 1.0): [(1.0, ())]}),
("PointNormalDeriv", {(0.5, 0.0): [(0.0, (0,)), (-1.0, (1,))]}),
("PointNormalDeriv", {(0.5, 0.5): [(1.0, (0,)), (1.0, (1,))]}),
("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), (0.0, (1,))]})] | PypiClean |
/DIY-FilingsResearch-0.3.0.tar.gz/DIY-FilingsResearch-0.3.0/threadedSearch.py |
# Multithreading Searcher and Indexer.
# One Thread Indexes new documents in the background
# while thread in the foreground waits for new user queries
# This searcher and indexer works from the terminal, simply start it.
# It begins indexing files in the directory you point it to.
# import needed system modules
import os
import threading
# Import necessary Py-Lucene modules
import lucene
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.util import Version
from org.apache.lucene.index import IndexWriter
from org.apache.lucene.index import IndexWriterConfig, DirectoryReader
from org.apache.lucene.search import IndexSearcher
from org.apache.lucene.queryparser.classic import QueryParser
from org.apache.lucene.document import Document, Field, TextField
from org.apache.lucene.search.highlight import Highlighter, QueryScorer
from org.apache.lucene.search.highlight import SimpleFragmenter
from org.apache.lucene.search.highlight import NullFragmenter
from org.apache.lucene.search.highlight import SimpleHTMLFormatter
from java.io import File
class Indexer(threading.Thread):
# set some initial values for the class, the root directory to
# start indexing and pass in a writer instance
def __init__(self, root, writer, directoryToWalk):
threading.Thread.__init__(self)
self.root = root
self.writer = writer
self.directory = directoryToWalk
def run(self):
env.attachCurrentThread()
self.indexDocs()
# start indexing beginning at the root directory
def indexDocs(self):
for self.root, dirnames, filenames in os.walk(self.directory):
for filename in filenames:
try:
path = os.path.join(self.root, filename)
file = open(path)
contents = unicode(file.read(), 'iso-8859-1')
file.close()
doc = Document()
doc.add(Field("name", filename, TextField.TYPE_STORED))
doc.add(Field("path", path, TextField.TYPE_STORED))
if len(contents) > 0:
doc.add(Field("contents", contents,
TextField.TYPE_STORED))
else:
print "warning: the file is empty %s" % filename
self.writer.addDocument(doc)
self.writer.commit()
except Exception, e:
print "Failed in indexDocs:", e
class Queryer():
def __init__(self, store_dir, hits_dir, frags_dir=None):
# store_dir is the location of our generated lucene index
# hits_dir is the location of the highlighted document hits
# frags_dif is the location of the document hit fragments - optional
self.store_dir = store_dir
self.hits_dir = hits_dir
self.frags_dir = frags_dir
if not os.path.exists(self.store_dir):
os.mkdir(self.store_dir)
if not os.path.exists(self.hits_dir):
os.mkdir(self.hits_dir)
if self.frags_dir is not None and not os.path.exists(self.frags_dir):
os.mkdir(self.frags_dir)
self.directory = SimpleFSDirectory(File(self.store_dir))
# For now I just use the StandardAnalyzer
self.analyzer = StandardAnalyzer(Version.LUCENE_43)
config = IndexWriterConfig(Version.LUCENE_43, self.analyzer)
self.writer = IndexWriter(self.directory, config)
def run(self, writer=None, analyzer=None):
if writer is None:
writer = self.writer
if analyzer is None:
analyzer = self.analyzer
searcher = IndexSearcher(DirectoryReader.open(\
SimpleFSDirectory.open(File(self.store_dir))))
while True:
print
print "Hit enter with no input to quit."
command = raw_input("Query:")
if command == '':
return
print "Searching for:", command
query = QueryParser(Version.LUCENE_43, "contents",
analyzer).parse(command)
# We'll just show the top 10 matching documents for now
scoreDocs = searcher.search(query, 10).scoreDocs
print "%s total matching documents." % len(scoreDocs)
# Highlight the matching text in red
highlighter = Highlighter(SimpleHTMLFormatter('<b><font color\
="red">', '</font></b>'), QueryScorer(query))
# Using NullFragmenter since we still want to see
# the whole document
highlighter.setTextFragmenter(NullFragmenter())
for scoreDoc in scoreDocs:
doc = searcher.doc(scoreDoc.doc)
tokenStream = analyzer.tokenStream("contents",
StringReader(doc.get("contents")))
# arg 3: the maximum number of fragments
# arg 4: the separator used to intersperse the
# document fragments (typically "...")
# arg 3 and 4 don't really matter with NullFragmenter
result = highlighter.getBestFragments(tokenStream,
doc.get("contents"), 2, "...")
if len(result) > 10:
file_handler = open(self.hits_dir + '/' + doc.get("name"),
'w+')
file_handler.write(result)
# create hit fragments, if we want to show them
# arg 1: fragment size
highlighter.setTextFragmenter(SimpleFragmenter(200))
for scoreDoc in scoreDocs:
doc = searcher.doc(scoreDoc.doc)
tokenStream = analyzer.tokenStream("contents",
StringReader(doc.get("contents")))
result = highlighter.getBestFragments(tokenStream,
doc.get("contents"), 2, "...")
if len(result) > 10:
file_handler = open(self.frags_dir + '/' + doc.get("name"),
'w+')
file_handler.write(result) | PypiClean |
/CustomPipeline-0.0.3-py3-none-any.whl/rplibs/yaml/yaml_py3/scanner.py |
__all__ = ['Scanner', 'ScannerError']
from .error import MarkedYAMLError
from .tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey:
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner:
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == '\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == '%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == '-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == '.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == '\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == '[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == '{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == ']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == '}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == ',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == '-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == '?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == ':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == '*':
return self.fetch_alias()
# Is it an anchor?
if ch == '&':
return self.fetch_anchor()
# Is it a tag?
if ch == '!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == '|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == '>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == '\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == '\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token" % ch,
self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in list(self.possible_simple_keys):
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# A simple key is required only if it is the first token in the current
# line. Therefore it is always allowed.
assert self.allow_simple_key or not required
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid intendation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not nessesary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be catched by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '---' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '...' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
and (ch == '-' or (not self.flow_level and ch in '?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if <TAB>:
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == '\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == 'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == 'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r" % self.peek(),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r" % self.peek(),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not ('0' <= ch <= '9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch, self.get_mark())
length = 0
while '0' <= self.peek(length) <= '9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == ' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != ' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch, self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpteted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == '*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == '<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != '>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek(),
self.get_mark())
self.forward()
elif ch in '\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = '!'
self.forward()
else:
length = 1
use_handle = False
while ch not in '\0 \r\n\x85\u2028\u2029':
if ch == '!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = '!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = '!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = ''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != '\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in ' \t'
length = 0
while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != '\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == '\n' \
and leading_non_space and self.peek() not in ' \t':
if not breaks:
chunks.append(' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == '\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch, self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r" % ch,
self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() != ' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
while self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
'0': '\0',
'a': '\x07',
'b': '\x08',
't': '\x09',
'\t': '\x09',
'n': '\x0A',
'v': '\x0B',
'f': '\x0C',
'r': '\x0D',
'e': '\x1B',
' ': '\x20',
'\"': '\"',
'\\': '\\',
'N': '\x85',
'_': '\xA0',
'L': '\u2028',
'P': '\u2029',
}
ESCAPE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == '\'' and self.peek(1) == '\'':
chunks.append('\'')
self.forward(2)
elif (double and ch == '\'') or (not double and ch in '\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == '\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexdecimal numbers, but found %r" %
(length, self.peek(k)), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(chr(code))
self.forward(length)
elif ch in '\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch, self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in ' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == '\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in ' \t':
self.forward()
if self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',', ':' and '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == '#':
break
while True:
ch = self.peek(length)
if ch in '\0 \t\r\n\x85\u2028\u2029' \
or (not self.flow_level and ch == ':' and
self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
or (self.flow_level and ch in ',:?[]{}'):
break
length += 1
# It's not clear what we should do with ':' in the flow context.
if (self.flow_level and ch == ':'
and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
self.forward(length)
raise ScannerError("while scanning a plain scalar", start_mark,
"found unexpected ':'", self.get_mark(),
"Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == '#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in ' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != '!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length = 1
ch = self.peek(length)
if ch != ' ':
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if ch != '!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.!~*\'()[]%':
if ch == '%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch, self.get_mark())
return ''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
codes = []
mark = self.get_mark()
while self.peek() == '%':
self.forward()
for k in range(2):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexdecimal numbers, but found %r"
% self.peek(k), self.get_mark())
codes.append(int(self.prefix(2), 16))
self.forward(2)
try:
value = bytes(codes).decode('utf-8')
except UnicodeDecodeError as exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in '\r\n\x85':
if self.prefix(2) == '\r\n':
self.forward(2)
else:
self.forward()
return '\n'
elif ch in '\u2028\u2029':
self.forward()
return ch
return ''
#try:
# import psyco
# psyco.bind(Scanner)
#except ImportError:
# pass | PypiClean |
/OTLModel/Classes/Abstracten/Kast.py | from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from abc import abstractmethod
from OTLMOW.OTLModel.Classes.Abstracten.Behuizing import Behuizing
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.DtcAfmetingBxlxhInMm import DtcAfmetingBxlxhInMm
from OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument
from OTLMOW.OTLModel.Datatypes.KlAlgMateriaal import KlAlgMateriaal
from OTLMOW.GeometrieArtefact.PuntGeometrie import PuntGeometrie
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Kast(Behuizing, PuntGeometrie, VlakGeometrie):
"""Abstracte voor allerlei types kasten."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Kast'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
@abstractmethod
def __init__(self):
Behuizing.__init__(self)
PuntGeometrie.__init__(self)
VlakGeometrie.__init__(self)
self._afmeting = OTLAttribuut(field=DtcAfmetingBxlxhInMm,
naam='afmeting',
label='afmeting',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Kast.afmeting',
definition='Buitenafmeting van de kast als maximale breedte, lengte en hoogte in millimeter.',
owner=self)
self._heeftVerlichting = OTLAttribuut(field=BooleanField,
naam='heeftVerlichting',
label='heeft verlichting',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Kast.heeftVerlichting',
definition='Geeft aan of er verlichting aanwezig is binnen de kast.',
owner=self)
self._indelingsplan = OTLAttribuut(field=DtcDocument,
naam='indelingsplan',
label='indelingsplan',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Kast.indelingsplan',
definition='Schematisch overzicht van de indeling van de kast volgens de aanwezige technieken in vooraanzicht.',
owner=self)
self._kastmateriaal = OTLAttribuut(field=KlAlgMateriaal,
naam='kastmateriaal',
label='kastmateriaal',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Kast.kastmateriaal',
definition='Materiaal waaruit de kast is opgebouwd.',
owner=self)
@property
def afmeting(self):
"""Buitenafmeting van de kast als maximale breedte, lengte en hoogte in millimeter."""
return self._afmeting.get_waarde()
@afmeting.setter
def afmeting(self, value):
self._afmeting.set_waarde(value, owner=self)
@property
def heeftVerlichting(self):
"""Geeft aan of er verlichting aanwezig is binnen de kast."""
return self._heeftVerlichting.get_waarde()
@heeftVerlichting.setter
def heeftVerlichting(self, value):
self._heeftVerlichting.set_waarde(value, owner=self)
@property
def indelingsplan(self):
"""Schematisch overzicht van de indeling van de kast volgens de aanwezige technieken in vooraanzicht."""
return self._indelingsplan.get_waarde()
@indelingsplan.setter
def indelingsplan(self, value):
self._indelingsplan.set_waarde(value, owner=self)
@property
def kastmateriaal(self):
"""Materiaal waaruit de kast is opgebouwd."""
return self._kastmateriaal.get_waarde()
@kastmateriaal.setter
def kastmateriaal(self, value):
self._kastmateriaal.set_waarde(value, owner=self) | PypiClean |
/LTHTools-1.0.2.tar.gz/LTHTools-1.0.2/lth_tools/torch_tools/NMS/NMS.py | __author__ = 'lth'
import torch
from . import bbox_iou
def nms(prediction, conf_threshold=0.1, nms_threshold=0.9):
"""
:param prediction: N,7,output_nums_from_model --> 7 [x,y,w,h,cls_pred,cls_conf,obj_conf]
:return:
"""
prediction = prediction.permute(0, 2, 1)
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
class_conf, class_pred = prediction[:, :, 5].permute(1, 0), prediction[:, :, 4].permute(1, 0)
conf = prediction[:, :, 6].permute(1, 0)
conf_mask = ((conf * class_conf) >= conf_threshold).squeeze()
image_pred = image_pred[conf_mask]
class_conf = class_conf[conf_mask]
class_pred = class_pred[conf_mask]
conf = conf[conf_mask]
if not image_pred.size(0):
continue
# x,y,w,h,cls_conf,cls_pred,obj_conf
detections = torch.cat((image_pred[:, :4], class_conf.float(), class_pred.conf(), conf.float()), 1)
unique_labels = detections[:, 5].cpu().unique()
if prediction.is_cuda():
unique_labels = unique_labels.cuda()
detections = detections.cuda()
for c in unique_labels:
detections_class = detections[detections[:, 5] == c]
_, conf_sort_index = torch.sort(detections_class[:, 6] * detections_class[:, 4], descending=True)
detections_class = detections_class[conf_sort_index]
max_detections = []
while detections_class.size(0):
max_detections.append(detections_class[0].unsqueeze(0))
if len(detections_class) == 1:
break
ious = bbox_iou(max_detections[-1], detections_class[1:])
detections_class = detections_class[1:][ious < nms_threshold]
max_detections = torch.cat(max_detections).data
output[image_i] = max_detections if output[image_i] is None else torch.cat(
(output[image_i, max_detections]))
return output | PypiClean |
/GetDist-1.4.3.tar.gz/GetDist-1.4.3/getdist/gui/SyntaxHighlight.py | try:
from PySide6.QtCore import QRegularExpression
from PySide6.QtGui import QColor, QTextCharFormat, QFont, QSyntaxHighlighter
except ImportError:
# noinspection PyUnresolvedReferences
from PySide2.QtCore import QRegularExpression
# noinspection PyUnresolvedReferences
from PySide2.QtGui import QColor, QTextCharFormat, QFont, QSyntaxHighlighter
def txformat(color, style=''):
"""Return a QTextCharFormat with the given attributes.
"""
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format
# Syntax styles that can be shared by all languages
STYLES = {
'keyword': txformat('navy', 'bold'),
'operator': txformat('black'),
'brace': txformat('black'),
'defclass': txformat('black', 'bold'),
'string': txformat('green', 'bold'),
'string2': txformat('green'),
'comment': txformat('darkGray', 'italic'),
'self': txformat('black', 'italic'),
'numbers': txformat('brown'),
}
class PythonHighlighter(QSyntaxHighlighter):
"""Syntax highlighter for the Python language.
"""
# Python keywords
keywords = [
'and', 'assert', 'break', 'class', 'continue', 'def',
'del', 'elif', 'else', 'except', 'exec', 'finally',
'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield',
'None', 'True', 'False', 'as',
]
# Python operators
operators = [
'=',
# Comparison
'==', '!=', '<', '<=', '>', '>=',
# Arithmetic
r'\+', '-', r'\*', '/', '//', r'\%', r'\*\*',
# In-place
r'\+=', r'-=', r'\*=', r'/=', r'\%=',
# Bitwise
r'\^', r'\|', r'\&', r'\~', '>>', '<<',
]
# Python braces
braces = [
r'\{', r'\}', r'\(', r'\)', r'\[', r'\]',
]
# noinspection PyArgumentList
def __init__(self, document):
QSyntaxHighlighter.__init__(self, document)
# Multi-line strings (expression, flag, style)
# FIXME: The triple-quotes in these two lines will mess up the
# syntax highlighting from this point onward
self.tri_single = (QRegularExpression("'''"), 1, STYLES['string2'])
self.tri_double = (QRegularExpression('"""'), 2, STYLES['string2'])
rules = []
# Keyword, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, STYLES['keyword'])
for w in PythonHighlighter.keywords]
rules += [(r'%s' % o, 0, STYLES['operator'])
for o in PythonHighlighter.operators]
rules += [(r'%s' % b, 0, STYLES['brace'])
for b in PythonHighlighter.braces]
# All other rules
rules += [
# 'self'
(r'\bself\b', 0, STYLES['self']),
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, STYLES['string']),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, STYLES['string']),
# 'def' followed by an identifier
(r'\bdef\b\s*(\w+)', 1, STYLES['defclass']),
# 'class' followed by an identifier
(r'\bclass\b\s*(\w+)', 1, STYLES['defclass']),
# From '#' until a newline
(r'#[^\n]*', 0, STYLES['comment']),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, STYLES['numbers']),
]
# Build a QRegularExpression for each pattern
# noinspection PyArgumentList
self.rules = [(QRegularExpression(pat), index, fmt)
for (pat, index, fmt) in rules]
def highlightBlock(self, text):
"""Apply syntax highlighting to the given block of text.
"""
# Do other syntax formatting
for expression, nth, _format in self.rules:
match = expression.match(text)
index = match.capturedStart()
while index >= 0:
# We actually want the index of the nth match
length = match.capturedLength()
self.setFormat(index, length, _format)
match = expression.match(text, index + length)
index = match.capturedStart()
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegularExpression`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
match = delimiter.match(text)
start = match.capturedStart()
add = match.capturedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
match = delimiter.match(text, start + add)
end = match.capturedStart()
# Ending delimiter on this line?
if end >= add:
length = end - start + add + match.capturedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(text) - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
match = delimiter.match(text, start + length)
start = match.capturedStart()
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False | PypiClean |
/Kool-0.0.2-py3-none-any.whl/kool/db/models.py | from .flatfile import FlatFileDB, Query
from kool.utils import camel_to_snake, now
class Model(object):
db = None # database
def __init__(self, * args, ** kwargs):
"""
Model provides save, delete, purge operations to every
class that inherits it.
"""
# Get class name, so as to set the table name
cls_name = self.__class__.__name__
table_name = camel_to_snake(cls_name)
self._table = Model.db.create_table(name=table_name)
self.last_modified = None
self.date_created = None
self._id = None
def save(self, * args, ** kwargs):
"""
Saves current object to database.
It also updates the `last_modified` and `date_created` fields.
"""
data = {}
self.last_modified = '{}'.format(now())
if not self.date_created:
self.date_created = '{}'.format(now())
# Get objects dict
data = self.props()
if data:
# Creates a new instance
self._id = self._table.insert(data)
return self._id
def update(self, * args, ** kwargs):
"""
Update method provides a way of updating the values of an object.
"""
data = {}
self.last_modified = '{}'.format(now())
if not self.date_created:
self.date_created = '{}'.format(now())
# Get objects dict
data = self.props()
# Fetch exising object
obj = self._table.get(rid=self._id) if self._id else None
if obj and data:
# Updates an existing instance
ids = self._table.update(data, rids=[self._id])
self._id = ids[0]
return self._id
def delete(self, cond=None, rids=None, * args):
rids = []
rids = ([self._id,] if self._id else []) or rids or list(args)
if rids:
self._table.remove(cond=cond, rids=rids)
else:
raise ValueError('Record must be saved to delete')
def purge(self, confirm=False):
"""
Truncates the table. Operation is irreversible.
Keyword Arguments:
confirm {bool} -- user confirmation (default: {False})
"""
if confirm:
self._table.purge()
else:
raise ValueError('Confirm argument has to be set true')
def props(self):
"""Converts object to dictionary"""
return dict(
(key, value)
for (key, value) in self.__dict__.items()
if not (key.startswith('_') or key.startswith('__')))
def __getattr__(self, name):
"""
Forward all unknown attribute calls to the underlying standard table.
"""
return getattr(self._table, name)
# Instantiate database
Model.db = FlatFileDB()
def where(key):
return Query()[key]
def table(cls):
"""Returns a table object given a class"""
cls_name = cls.__name__
table_name = camel_to_snake(cls_name)
return Model().db.table(table_name) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py | import base64
import ctypes
import itertools
import re
import os
import ssl
import tempfile
from .bindings import Security, CoreFoundation, CFConst
# This regular expression is used to grab PEM data out of a PEM bundle.
_PEM_CERTS_RE = re.compile(
b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
)
def _cf_data_from_bytes(bytestring):
"""
Given a bytestring, create a CFData object from it. This CFData object must
be CFReleased by the caller.
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
)
def _cf_dictionary_from_tuples(tuples):
"""
Given a list of Python tuples, create an associated CFDictionary.
"""
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
def _cf_string_to_unicode(value):
"""
Creates a Unicode string from a CFString object. Used entirely for error
reporting.
Yes, it annoys me quite a lot that this function is this complex.
"""
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(
value_as_void_p,
CFConst.kCFStringEncodingUTF8
)
if string is None:
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
value_as_void_p,
buffer,
1024,
CFConst.kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = buffer.value
if string is not None:
string = string.decode('utf-8')
return string
def _assert_no_error(error, exception_class=None):
"""
Checks the return code and throws an exception if there is an error to
report
"""
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == u'':
output = u'OSStatus %s' % error
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output)
def _cert_array_from_pem(pem_bundle):
"""
Given a bundle of certs in PEM format, turns them into a CFArray of certs
that can be used to validate a cert chain.
"""
# Normalize the PEM bundle's line endings.
pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
der_certs = [
base64.b64decode(match.group(1))
for match in _PEM_CERTS_RE.finditer(pem_bundle)
]
if not der_certs:
raise ssl.SSLError("No root certificates specified")
cert_array = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
)
if not cert_array:
raise ssl.SSLError("Unable to allocate memory!")
try:
for der_bytes in der_certs:
certdata = _cf_data_from_bytes(der_bytes)
if not certdata:
raise ssl.SSLError("Unable to allocate memory!")
cert = Security.SecCertificateCreateWithData(
CoreFoundation.kCFAllocatorDefault, certdata
)
CoreFoundation.CFRelease(certdata)
if not cert:
raise ssl.SSLError("Unable to build cert object!")
CoreFoundation.CFArrayAppendValue(cert_array, cert)
CoreFoundation.CFRelease(cert)
except Exception:
# We need to free the array before the exception bubbles further.
# We only want to do that if an error occurs: otherwise, the caller
# should free.
CoreFoundation.CFRelease(cert_array)
return cert_array
def _is_cert(item):
"""
Returns True if a given CFTypeRef is a certificate.
"""
expected = Security.SecCertificateGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _is_identity(item):
"""
Returns True if a given CFTypeRef is an identity.
"""
expected = Security.SecIdentityGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _temporary_keychain():
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.
Returns a tuple of the SecKeychainRef and the path to the temporary
directory that contains it.
"""
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
# means we cannot use mkstemp to use a generic temporary file. Instead,
# we're going to create a temporary directory and a filename to use there.
# This filename will be 8 random bytes expanded into base64. We also need
# some random bytes to password-protect the keychain we're creating, so we
# ask for 40 random bytes.
random_bytes = os.urandom(40)
filename = base64.b16encode(random_bytes[:8]).decode('utf-8')
password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
tempdirectory = tempfile.mkdtemp()
keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
# We now want to create the keychain itself.
keychain = Security.SecKeychainRef()
status = Security.SecKeychainCreate(
keychain_path,
len(password),
password,
False,
None,
ctypes.byref(keychain)
)
_assert_no_error(status)
# Having created the keychain, we want to pass it off to the caller.
return keychain, tempdirectory
def _load_items_from_file(keychain, path):
"""
Given a single file, loads all the trust objects from it into arrays and
the keychain.
Returns a tuple of lists: the first list is a list of identities, the
second a list of certs.
"""
certificates = []
identities = []
result_array = None
with open(path, 'rb') as f:
raw_filedata = f.read()
try:
filedata = CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault,
raw_filedata,
len(raw_filedata)
)
result_array = CoreFoundation.CFArrayRef()
result = Security.SecItemImport(
filedata, # cert data
None, # Filename, leaving it out for now
None, # What the type of the file is, we don't care
None, # what's in the file, we don't care
0, # import flags
None, # key params, can include passphrase in the future
keychain, # The keychain to insert into
ctypes.byref(result_array) # Results
)
_assert_no_error(result)
# A CFArray is not very useful to us as an intermediary
# representation, so we are going to extract the objects we want
# and then free the array. We don't need to keep hold of keys: the
# keychain already has them!
result_count = CoreFoundation.CFArrayGetCount(result_array)
for index in range(result_count):
item = CoreFoundation.CFArrayGetValueAtIndex(
result_array, index
)
item = ctypes.cast(item, CoreFoundation.CFTypeRef)
if _is_cert(item):
CoreFoundation.CFRetain(item)
certificates.append(item)
elif _is_identity(item):
CoreFoundation.CFRetain(item)
identities.append(item)
finally:
if result_array:
CoreFoundation.CFRelease(result_array)
CoreFoundation.CFRelease(filedata)
return (identities, certificates)
def _load_client_cert_chain(keychain, *paths):
"""
Load certificates and maybe keys from a number of files. Has the end goal
of returning a CFArray containing one SecIdentityRef, and then zero or more
SecCertificateRef objects, suitable for use as a client certificate trust
chain.
"""
# Ok, the strategy.
#
# This relies on knowing that macOS will not give you a SecIdentityRef
# unless you have imported a key into a keychain. This is a somewhat
# artificial limitation of macOS (for example, it doesn't necessarily
# affect iOS), but there is nothing inside Security.framework that lets you
# get a SecIdentityRef without having a key in a keychain.
#
# So the policy here is we take all the files and iterate them in order.
# Each one will use SecItemImport to have one or more objects loaded from
# it. We will also point at a keychain that macOS can use to work with the
# private key.
#
# Once we have all the objects, we'll check what we actually have. If we
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
# we'll take the first certificate (which we assume to be our leaf) and
# ask the keychain to give us a SecIdentityRef with that cert's associated
# key.
#
# We'll then return a CFArray containing the trust chain: one
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
# responsibility for freeing this CFArray will be with the caller. This
# CFArray must remain alive for the entire connection, so in practice it
# will be stored with a single SSLSocket, along with the reference to the
# keychain.
certificates = []
identities = []
# Filter out bad paths.
paths = (path for path in paths if path)
try:
for file_path in paths:
new_identities, new_certs = _load_items_from_file(
keychain, file_path
)
identities.extend(new_identities)
certificates.extend(new_certs)
# Ok, we have everything. The question is: do we have an identity? If
# not, we want to grab one from the first cert we have.
if not identities:
new_identity = Security.SecIdentityRef()
status = Security.SecIdentityCreateWithCertificate(
keychain,
certificates[0],
ctypes.byref(new_identity)
)
_assert_no_error(status)
identities.append(new_identity)
# We now want to release the original certificate, as we no longer
# need it.
CoreFoundation.CFRelease(certificates.pop(0))
# We now need to build a new CFArray that holds the trust chain.
trust_chain = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
for item in itertools.chain(identities, certificates):
# ArrayAppendValue does a CFRetain on the item. That's fine,
# because the finally block will release our other refs to them.
CoreFoundation.CFArrayAppendValue(trust_chain, item)
return trust_chain
finally:
for obj in itertools.chain(identities, certificates):
CoreFoundation.CFRelease(obj) | PypiClean |
/CAMELS_library-0.3.tar.gz/CAMELS_library-0.3/plots/images_EX/plot_EX.py | from pylab import *
import numpy as np
from matplotlib.ticker import ScalarFormatter
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.ticker import AutoMinorLocator
from matplotlib.colors import LogNorm
from matplotlib.patches import Ellipse
rcParams["mathtext.fontset"]='cm'
############################### figure ###########################
#fig=figure(figsize=(15,10)) #give dimensions to the figure
##################################################################
################################ INPUT #######################################
#axes range
##############################################################################
############################ subplots ############################
#gs = gridspec.GridSpec(2,1,height_ratios=[5,2])
#ax1=plt.subplot(gs[0])
#ax2=plt.subplot(gs[1])
#make a subplot at a given position and with some given dimensions
#ax2=axes([0.4,0.55,0.25,0.1])
#gs.update(hspace=0.0,wspace=0.4,bottom=0.6,top=1.05)
#subplots_adjust(left=None, bottom=None, right=None, top=None,
# wspace=0.5, hspace=0.5)
#set minor ticks
#ax1.xaxis.set_minor_locator(AutoMinorLocator(4))
#ax1.yaxis.set_minor_locator(AutoMinorLocator(4))
#ax1.xaxis.set_major_formatter( NullFormatter() ) #unset x label
#ax1.yaxis.set_major_formatter( NullFormatter() ) #unset y label
# custom xticks
#ax1.set_xticks([0.25, 0.5, 1.0])
#ax1.set_yticks([0.25, 0.5, 1.0])
#ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) #for log
#ax1.get_yaxis().set_label_coords(-0.2,0.5) #align y-axis for multiple plots
##################################################################
##################### special behaviour stuff ####################
#to show error missing error bars in log scale
#ax1.set_yscale('log',nonposy='clip') #set log scale for the y-axis
#set the x-axis in %f format instead of %e
#ax1.xaxis.set_major_formatter(ScalarFormatter())
#set size of ticks
#ax1.tick_params(axis='both', which='major', labelsize=10)
#ax1.tick_params(axis='both', which='minor', labelsize=8)
#set the position of the ylabel
#ax1.yaxis.set_label_coords(-0.2, 0.4)
#set yticks in scientific notation
#ax1.ticklabel_format(axis='y',style='sci',scilimits=(1,4))
#set the x-axis in %f format instead of %e
#formatter = matplotlib.ticker.FormatStrFormatter('$%.2e$')
#ax1.yaxis.set_major_formatter(formatter)
#add two legends in the same plot
#ax5 = ax1.twinx()
#ax5.yaxis.set_major_formatter( NullFormatter() ) #unset y label
#ax5.legend([p1,p2],['0.0 eV','0.3 eV'],loc=3,prop={'size':14},ncol=1)
#set points to show in the yaxis
#ax1.set_yticks([0,1,2])
#highlight a zoomed region
#mark_inset(ax1, ax2, loc1=2, loc2=4, fc="none",edgecolor='purple')
##################################################################
############################ plot type ###########################
#standard plot
#p1,=ax1.plot(x,y,linestyle='-',marker='None')
#error bar plot with the minimum and maximum values of the error bar interval
#p1=ax1.errorbar(r,xi,yerr=[delta_xi_min,delta_xi_max],lw=1,fmt='o',ms=2,
# elinewidth=1,capsize=5,linestyle='-')
#filled area
#p1=ax1.fill_between([x_min,x_max],[1.02,1.02],[0.98,0.98],color='k',alpha=0.2)
#hatch area
#ax1.fill([x_min,x_min,x_max,x_max],[y_min,3.0,3.0,y_min],#color='k',
# hatch='X',fill=False,alpha=0.5)
#scatter plot
#p1=ax1.scatter(k1,Pk1,c='b',edgecolor='none',s=8,marker='*')
#plot with markers
#pl4,=ax1.plot(ke3,Pk3/Pke3,marker='.',markevery=2,c='r',linestyle='None')
#set size of dashed lines
#ax.plot([0, 1], [0, 1], linestyle='--', dashes=(5, 1)) #length of 5, space of 1
#image plot
#cax = ax1.imshow(densities,cmap=get_cmap('jet'),origin='lower',
# extent=[x_min, x_max, y_min, y_max],
# #vmin=min_density,vmax=max_density)
# norm = LogNorm(vmin=min_density,vmax=max_density))
#cbar = fig.colorbar(cax, ax2, ax=ax1, ticks=[-1, 0, 1]) #in ax2 colorbar of ax1
#cbar.set_label(r"$M_{\rm CSF}\/[h^{-1}M_\odot]$",fontsize=14,labelpad=-50)
#cbar.ax.tick_params(labelsize=10) #to change size of ticks
#make a polygon
#polygon = Rectangle((0.4,50.0), 20.0, 20.0, edgecolor='purple',lw=0.5,
# fill=False)
#ax1.add_artist(polygon)
####################################################################
x_min, x_max = 0.0, 25.0
y_min, y_max = 0.0, 25.0
fig = figure(figsize=(7,12)) #give dimensions to the figure
gs = gridspec.GridSpec(9,4)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax4 = plt.subplot(gs[3])
ax5 = plt.subplot(gs[4])
ax6 = plt.subplot(gs[5])
ax7 = plt.subplot(gs[6])
ax8 = plt.subplot(gs[7])
ax9 = plt.subplot(gs[8])
ax10 = plt.subplot(gs[9])
ax11 = plt.subplot(gs[10])
ax12 = plt.subplot(gs[11])
ax13 = plt.subplot(gs[12])
ax14 = plt.subplot(gs[13])
ax15 = plt.subplot(gs[14])
ax16 = plt.subplot(gs[15])
ax17 = plt.subplot(gs[16])
ax18 = plt.subplot(gs[17])
ax19 = plt.subplot(gs[18])
ax20 = plt.subplot(gs[19])
ax21 = plt.subplot(gs[20])
ax22 = plt.subplot(gs[21])
ax23 = plt.subplot(gs[22])
ax24 = plt.subplot(gs[23])
ax25 = plt.subplot(gs[24])
ax26 = plt.subplot(gs[25])
ax27 = plt.subplot(gs[26])
ax28 = plt.subplot(gs[27])
ax29 = plt.subplot(gs[28])
ax30 = plt.subplot(gs[29])
ax31 = plt.subplot(gs[30])
ax32 = plt.subplot(gs[31])
ax33 = plt.subplot(gs[32])
ax34 = plt.subplot(gs[33])
ax35 = plt.subplot(gs[34])
ax36 = plt.subplot(gs[35])
gs.update(hspace=0.07,wspace=0.05,bottom=0.0,top=1.00)
for ax in [ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8,ax9,ax10,ax11,ax12,ax13,ax14,ax15,
ax16,ax17,ax18,ax19,ax20,ax21,ax22,ax23,ax24,ax25,ax26,ax27,ax28,
ax29,ax30,ax31,ax32,ax33,ax34,ax35,ax36]:
ax.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False) # labels along the bottom edge are off
#ax1.set_xscale('log')
#ax1.set_yscale('log')
#ax1.set_xlim([x_min,x_max])
#ax1.set_ylim([y_min,y_max])
#ax1.set_xlabel(r'$k\/[h\/{\rm Mpc}^{-1}]$',fontsize=18)
#ax1.set_ylabel(r'$P(k)\,[(h^{-1}{\rm Mpc})^3]$',fontsize=18)
root = '/mnt/ceph/users/camels/Results/images_EX'
f_out = 'images_EX.pdf'
i = 7 #map index
f1 = '%s/Images_EX_fiducial_T.npy'%root
f2 = '%s/Images_EX_AGN_T.npy'%root
f3 = '%s/Images_EX_SN_T.npy'%root
f4 = '%s/Images_EX_noFB_T.npy'%root
T1, T2, T3, T4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
f1 = '%s/Images_EX_fiducial_Z.npy'%root
f2 = '%s/Images_EX_AGN_Z.npy'%root
f3 = '%s/Images_EX_SN_Z.npy'%root
f4 = '%s/Images_EX_noFB_Z.npy'%root
Z1, Z2, Z3, Z4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
Zsun = 0.02; Z1 = Z1/Zsun; Z2 = Z2/Zsun; Z3 = Z3/Zsun; Z4 = Z4/Zsun
f1 = '%s/Images_EX_fiducial_Mgas.npy'%root
f2 = '%s/Images_EX_AGN_Mgas.npy'%root
f3 = '%s/Images_EX_SN_Mgas.npy'%root
f4 = '%s/Images_EX_noFB_Mgas.npy'%root
Mg1, Mg2, Mg3, Mg4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
f1 = '%s/Images_EX_fiducial_Mcdm.npy'%root
f2 = '%s/Images_EX_AGN_Mcdm.npy'%root
f3 = '%s/Images_EX_SN_Mcdm.npy'%root
f4 = '%s/Images_EX_noFB_Mcdm.npy'%root
Mc1, Mc2, Mc3, Mc4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
f1 = '%s/Images_EX_fiducial_Mstar.npy'%root
f2 = '%s/Images_EX_AGN_Mstar.npy'%root
f3 = '%s/Images_EX_SN_Mstar.npy'%root
f4 = '%s/Images_EX_noFB_Mstar.npy'%root
Ms1, Ms2, Ms3, Ms4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
f1 = '%s/Images_EX_fiducial_Vgas.npy'%root
f2 = '%s/Images_EX_AGN_Vgas.npy'%root
f3 = '%s/Images_EX_SN_Vgas.npy'%root
f4 = '%s/Images_EX_noFB_Vgas.npy'%root
Vg1, Vg2, Vg3, Vg4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
f1 = '%s/Images_EX_fiducial_HI.npy'%root
f2 = '%s/Images_EX_AGN_HI.npy'%root
f3 = '%s/Images_EX_SN_HI.npy'%root
f4 = '%s/Images_EX_noFB_HI.npy'%root
HI1, HI2, HI3, HI4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
f1 = '%s/Images_EX_fiducial_ne.npy'%root
f2 = '%s/Images_EX_AGN_ne.npy'%root
f3 = '%s/Images_EX_SN_ne.npy'%root
f4 = '%s/Images_EX_noFB_ne.npy'%root
ne1, ne2, ne3, ne4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
ne1 *= 1e20; ne2 *= 1e20; ne3 *= 1e20; ne4 *= 1e20
f1 = '%s/Images_EX_fiducial_P.npy'%root
f2 = '%s/Images_EX_AGN_P.npy'%root
f3 = '%s/Images_EX_SN_P.npy'%root
f4 = '%s/Images_EX_noFB_P.npy'%root
P1, P2, P3, P4 = np.load(f1), np.load(f2), np.load(f3), np.load(f4)
min_T, max_T = 2e3, 2e7
min_Z, max_Z = 7e-10/Zsun, 7e-2/Zsun
min_Mg, max_Mg = 2e9, 1e14
min_Mc, max_Mc = 5e9, 1e15
min_Ms, max_Ms = 2e8, 1e15
min_Vg, max_Vg = 50.0, 500
min_HI, max_HI = 1e4, 1e14
min_ne, max_ne = 1e25, 1e33
min_P, max_P = 1e0, 1e11
Ms1[np.where(Ms1<min_Ms)] = min_Ms
Ms2[np.where(Ms2<min_Ms)] = min_Ms
Ms3[np.where(Ms3<min_Ms)] = min_Ms
Ms4[np.where(Ms4<min_Ms)] = min_Ms
dy = 0.112
for ax,T in zip([ax1,ax2,ax3,ax4],[T1[i], T2[i], T3[i], T4[i]]):
cax = ax.imshow(T,cmap=get_cmap('hot'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
norm = LogNorm(vmin=min_T,vmax=max_T))
axa = axes([0.91, 0.896, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax4) #in ax2 colorbar of ax1
cbar.set_label(r"$T_{\rm g}\,[K]$",fontsize=14,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
for ax,Z in zip([ax5,ax6,ax7,ax8],[Z1[i], Z2[i], Z3[i], Z4[i]]):
cax = ax.imshow(Z,cmap=get_cmap('cubehelix'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
norm = LogNorm(vmin=min_Z,vmax=max_Z))
axa = axes([0.91, 0.896-dy, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax8) #in ax2 colorbar of ax1
cbar.set_label(r"$Z/Z_\odot$",fontsize=14,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
for ax,Mg in zip([ax9,ax10,ax11,ax12],[Mg1[i], Mg2[i], Mg3[i], Mg4[i]]):
cax = ax.imshow(Mg,cmap=get_cmap('jet'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
norm = LogNorm(vmin=min_Mg,vmax=max_Mg))
axa = axes([0.91, 0.896-2*dy, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax12) #in ax2 colorbar of ax1
cbar.set_label(r"$\Sigma_{\rm g}\,[hM_\odot{\rm Mpc}^{-2}]$",fontsize=12,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
for ax,Ms in zip([ax13,ax14,ax15,ax16],[Ms1[i], Ms2[i], Ms3[i], Ms4[i]]):
cax = ax.imshow(Ms,cmap=get_cmap('nipy_spectral'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
norm = LogNorm(vmin=min_Ms, vmax=max_Ms))
axa = axes([0.91, 0.896-3*dy, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax12) #in ax2 colorbar of ax1
cbar.set_label(r"$\Sigma_{\rm *}\,[hM_\odot{\rm Mpc}^{-2}]$",fontsize=12,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
for ax,Vg in zip([ax17,ax18,ax19,ax20],[Vg1[i], Vg2[i], Vg3[i], Vg4[i]]):
cax = ax.imshow(Vg,cmap=get_cmap('rainbow'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
vmin=min_Vg, vmax=max_Vg)
#norm = LogNorm(vmin=min_Vg, vmax=max_Vg))
axa = axes([0.91, 0.896-4*dy, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax12) #in ax2 colorbar of ax1
cbar.set_label(r"$|\vec{V}_{\rm g}|\,[{\rm km/s}]$",fontsize=14,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
for ax,HI in zip([ax21,ax22,ax23,ax24],[HI1[i], HI2[i], HI3[i], HI4[i]]):
cax = ax.imshow(HI,cmap=get_cmap('magma'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
norm = LogNorm(vmin=min_HI, vmax=max_HI))
axa = axes([0.91, 0.896-5*dy, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax12) #in ax2 colorbar of ax1
cbar.set_label(r"$\Sigma_{\rm HI}\,[hM_\odot{\rm Mpc}^{-2}]$",fontsize=12,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
for ax,Mc in zip([ax25,ax26,ax27,ax28],[Mc1[i], Mc2[i], Mc3[i], Mc4[i]]):
cax = ax.imshow(Mc,cmap=get_cmap('gist_stern'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
norm = LogNorm(vmin=min_Mc, vmax=max_Mc))
axa = axes([0.91, 0.896-6*dy, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax12) #in ax2 colorbar of ax1
cbar.set_label(r"$\Sigma_{\rm DM}\,[hM_\odot{\rm Mpc}^{-2}]$",fontsize=12,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
for ax,ne in zip([ax29,ax30,ax31,ax32],[ne1[i], ne2[i], ne3[i], ne4[i]]):
cax = ax.imshow(ne,cmap=get_cmap('gist_earth'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
norm = LogNorm(vmin=min_ne, vmax=max_ne))
axa = axes([0.91, 0.896-7*dy, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax12) #in ax2 colorbar of ax1
cbar.set_label(r"$\Sigma_{\rm e}\,[h{\rm cm}^{-3}{\rm Mpc}^{-1}]$",fontsize=11,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
for ax,P in zip([ax33,ax34,ax35,ax36],[P1[i], P2[i], P3[i], P4[i]]):
cax = ax.imshow(P,cmap=get_cmap('terrain'),origin='lower',
interpolation='bicubic', extent=[x_min, x_max, y_min, y_max],
norm = LogNorm(vmin=min_P, vmax=max_P))
axa = axes([0.91, 0.896-8*dy, 0.015, 0.105])
cbar = fig.colorbar(cax, axa, ax=ax12) #in ax2 colorbar of ax1
cbar.set_label(r"$P_{\rm g}\,[h^2M_\odot{\rm kms}^{-1}{\rm kpc}^{-3}]$",fontsize=10,labelpad=5)
cbar.ax.tick_params(labelsize=8) #to change size of ticks
#cbar = fig.colorbar(cax, ax2, ax=ax1, ticks=[-1, 0, 1]) #in ax2 colorbar of ax1
#cbar.set_label(r"$M_{\rm CSF}\/[h^{-1}M_\odot]$",fontsize=14,labelpad=-50)
#cbar.ax.tick_params(labelsize=10) #to change size of ticks
#p1,=ax1.plot(x,y,linestyle='-',marker='None')
#place a label in the plot
#ax1.text(0.2,0.1, r"$z=4.0$", fontsize=22, color='k',transform=ax1.transAxes)
#legend
#ax1.legend([p1,p2],
# [r"$z=3$",
# r"$z=4$"],
# loc=0,prop={'size':18},ncol=1,frameon=True)
#columnspacing=2,labelspacing=2)
#ax1.set_title(r'$\sum m_\nu=0.0\/{\rm eV}$',position=(0.5,1.02),size=18)
#title('About as simple as it gets, folks')
#suptitle('About as simple as it gets, folks') #for title with several panels
#grid(True)
#show()
savefig(f_out, bbox_inches='tight', dpi=150)
close(fig)
###############################################################################
#some useful colors:
#'darkseagreen'
#'yellow'
#"hotpink"
#"gold
#"fuchsia"
#"lime"
#"brown"
#"silver"
#"cyan"
#"dodgerblue"
#"darkviolet"
#"magenta"
#"deepskyblue"
#"orchid"
#"aqua"
#"darkorange"
#"coral"
#"lightgreen"
#"salmon"
#"bisque" | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/league_demo/league_demo_ppo_config.py | from easydict import EasyDict
from torch.nn.modules.activation import Threshold
league_demo_ppo_config = dict(
exp_name="league_demo_ppo",
env=dict(
collector_env_num=8,
evaluator_env_num=10,
n_evaluator_episode=100,
env_type='prisoner_dilemma', # ['zero_sum', 'prisoner_dilemma']
stop_value=[-10.1, -5.05], # prisoner_dilemma
),
policy=dict(
cuda=False,
action_space='discrete',
model=dict(
obs_shape=2,
action_shape=2,
action_space='discrete',
encoder_hidden_size_list=[32, 32],
critic_head_hidden_size=32,
actor_head_hidden_size=32,
share_encoder=False,
),
learn=dict(
update_per_collect=3,
batch_size=32,
learning_rate=0.00001,
entropy_weight=0.0,
learner=dict(log_policy=False),
),
collect=dict(
n_episode=128, unroll_len=1, discount_factor=1.0, gae_lambda=1.0, collector=dict(get_train_sample=True, )
),
other=dict(
league=dict(
player_category=['default'],
path_policy="league_demo_ppo/policy",
active_players=dict(
main_player=1,
main_exploiter=1,
league_exploiter=1,
),
main_player=dict(
one_phase_step=200,
branch_probs=dict(
pfsp=0.5,
sp=0.5,
),
strong_win_rate=0.7,
),
main_exploiter=dict(
one_phase_step=200,
branch_probs=dict(main_players=1.0, ),
strong_win_rate=0.7,
min_valid_win_rate=0.3,
),
league_exploiter=dict(
one_phase_step=200,
branch_probs=dict(pfsp=1.0, ),
strong_win_rate=0.7,
mutate_prob=0.5,
),
use_pretrain=False,
use_pretrain_init_historical=False,
payoff=dict(
type='battle',
decay=0.99,
min_win_rate_games=8,
),
metric=dict(
mu=0,
sigma=25 / 3,
beta=25 / 3 / 2,
tau=0.0,
draw_probability=0.02,
),
),
),
),
)
league_demo_ppo_config = EasyDict(league_demo_ppo_config)
# This config file can be executed by `dizoo/league_demo/league_demo_ppo_main.py` | PypiClean |
/6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/environment.py | import os
import platform
import sys
from .util import get_username, to_unicode
from . import metaflow_version
from metaflow.exception import MetaflowException
version_cache = None
class InvalidEnvironmentException(MetaflowException):
headline = 'Incompatible environment'
class MetaflowEnvironment(object):
TYPE = 'local'
def __init__(self, flow):
pass
def init_environment(self, logger):
"""
Run before any step decorators are initialized.
"""
pass
def validate_environment(self, logger):
"""
Run before any command to validate that we are operating in
a desired environment.
"""
pass
def decospecs(self):
"""
Environment may insert decorators, equivalent to setting --with
options on the command line.
"""
return ()
def bootstrap_commands(self, step_name):
"""
A list of shell commands to bootstrap this environment in a remote runtime.
"""
return []
def add_to_package(self):
"""
A list of tuples (file, arcname) to add to the job package.
`arcname` is an alterative name for the file in the job package.
"""
return []
def pylint_config(self):
"""
Environment may override pylint config.
"""
return []
@classmethod
def get_client_info(cls, flow_name, metadata):
"""
Environment may customize the information returned to the client about the environment
Parameters
----------
flow_name : str
Name of the flow
metadata : dict
Metadata information regarding the task
Returns
-------
str : Information printed and returned to the user
"""
return "Local environment"
def get_package_commands(self, code_package_url):
cmds = ["set -e",
"echo \'Setting up task environment.\'",
"%s -m pip install awscli click requests boto3 \
--user -qqq" % self._python(),
"mkdir metaflow",
"cd metaflow",
"i=0; while [ $i -le 5 ]; do "
"echo \'Downloading code package.\'; "
"%s -m awscli s3 cp %s job.tar >/dev/null && \
echo \'Code package downloaded.\' && break; "
"sleep 10; i=$((i+1));"
"done " % (self._python(), code_package_url),
"tar xf job.tar"
]
return cmds
def get_environment_info(self):
global version_cache
if version_cache is None:
version_cache = metaflow_version.get_version()
# note that this dict goes into the code package
# so variables here should be relatively stable (no
# timestamps) so the hash won't change all the time
env = {'platform': platform.system(),
'username': get_username(),
'production_token': os.environ.get('METAFLOW_PRODUCTION_TOKEN'),
'runtime': os.environ.get('METAFLOW_RUNTIME_NAME', 'dev'),
'app': os.environ.get('APP'),
'environment_type': self.TYPE,
'python_version': sys.version,
'python_version_code': '%d.%d.%d' % sys.version_info[:3],
'metaflow_version': version_cache,
'script': os.path.basename(os.path.abspath(sys.argv[0]))}
return env
def executable(self, step_name):
return self._python()
def _python(self):
return "python" | PypiClean |
/AppZoo-2023.4.25.19.12.44.tar.gz/AppZoo-2023.4.25.19.12.44/appzoo/app.py |
from io import BytesIO
from starlette.status import *
from starlette.responses import *
from starlette.staticfiles import StaticFiles
from fastapi import FastAPI, Form, Depends, File, UploadFile, Body, Request, BackgroundTasks
# ME
from meutils.pipe import *
from meutils.str_utils import json_loads
# logger.add('runtime_{time}.log')
class App(object):
"""
from appzoo import App
app = App()
app_ = app.app
app.add_route()
if __name__ == '__main__':
app.run(app.app_from(__file__), port=9955, debug=True)
"""
def __init__(self, config_init=None, **kwargs):
self.app = FastAPI(**kwargs)
# 原生接口
self.get = self.app.get
self.post = self.app.post
self.api_route = self.app.api_route
self.mount = self.app.mount # mount('/subapi', subapp)
# 增加配置项,方便热更新
self.config = get_config(config_init) # 全局变量
# 功能性接口
self.add_route_plus(self.app_config, methods=["GET", "POST"])
self.add_route_plus(self.proxy_app, methods="POST") # 代理服务,自己调自己有问题
def run(self, app=None, host="0.0.0.0", port=8000, workers=1, access_log=True, debug=False, **kwargs):
"""
:param app: app字符串可开启热更新 debug/reload
:param host:
:param port:
:param workers:
:param access_log:
:param debug: reload
:param kwargs:
:return:
"""
import uvicorn
"""
https://www.cnblogs.com/poloyy/p/15549265.html
https://blog.csdn.net/qq_33801641/article/details/121313494
"""
# _ = uvicorn.Config(
# app if app else self.app,
# host=host, port=port, workers=workers, access_log=access_log, debug=debug, **kwargs
# )
# server = uvicorn.Server(_)
# from appzoo.utils.settings import uvicorn_logger_init
# uvicorn_logger_init()
# server.run()
# uvicorn.config.LOGGING_CONFIG['formatters']['access']['fmt'] = f"""
# 🔥 %(asctime)s | {LOCAL_HOST} - %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s
# """.strip()
uvicorn.config.LOGGING_CONFIG['formatters']['access']['fmt'] = f"""
🔥 %(asctime)s - %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s
""".strip()
uvicorn.run(
app if app else self.app,
host=host, port=port, workers=workers, access_log=access_log, debug=debug, **kwargs
)
def gunicorn_run(self, main_app='main:app_', gunicorn_conf=None):
if gunicorn_conf is None:
gunicorn_conf = get_resolve_path('gunicorn.conf.py', __file__)
assert Path(gunicorn_conf).exists()
os.system(f"gunicorn -c {gunicorn_conf} {main_app}")
def add_route(self, path='/xxx', func=lambda x='demo': x, method="GET", **kwargs):
handler = self._handler(func, method, **kwargs)
self.app.api_route(path=path, methods=[method])(handler)
def add_route_plus(self, func, path=None, methods: Union[List, str] = "GET", **kwargs):
"""
:param path:
:param func:
@lru_cache()
def cache(kwargs: str):
time.sleep(3)
return kwargs # json.load(kwargs.replace("'", '"'))
def nocache(kwargs: dict): # 不指定类型默认字典输入
time.sleep(3)
return kwargs
:param method:
:param kwargs:
:return:
"""
assert isinstance(func, Callable)
if isinstance(methods, str):
methods = [methods]
if path is None:
path = f"""/{func.__name__.replace('_', '-')}""" # todo 前缀
assert path.startswith('/')
handler = self._handler_plus(func, **kwargs)
self.app.api_route(path=path, methods=methods)(handler) # method
def add_route_uploadfiles(self, path='/xxx', func=lambda x='demo': x, **kwargs):
"""
def read_func(**kwargs):
logger.info(kwargs)
return pd.read_csv(kwargs['files'][0], names=['word']).to_dict('r')
app.add_route_uploadfiles('/upload', read_func)
"""
handler = self._handler4files(func, **kwargs)
self.app.api_route(path=path, methods=['POST'])(handler) # method
def add_apps(self, app_dir='apps', main_func='main', **kwargs): # todo: 优化
"""加载当前app_dir文件夹下的所有app(递归), 入口函数都是main
1. 过滤掉 _ 开头的py文件
2. 支持单文件
appcli easy-run <app_dir>
"""
app_home = Path(sys_path_append(app_dir))
n = app_home.parts.__len__()
pattern = Path(app_dir).name if Path(app_dir).is_file() else '*.py'
routes = []
for p in app_home.rglob(pattern):
home_parts = p.parts[n:]
route = f'/{app_home.stem}/' + "/".join(home_parts)[:-3]
module = importlib.import_module('.'.join(home_parts)[:-3])
if hasattr(module, main_func):
func = getattr(module, main_func)
self.add_route(route, func, method='POST', **kwargs)
routes.append(route)
else:
logger.warning(f"Filter: {p}")
logger.info(f"Add Routes: {routes}")
self.add_route(f'/__{app_home.stem}', lambda: routes, method='GET', **kwargs)
return routes
def _handler(self, func, method='GET', result_key='data', **kwargs):
"""
:param func:
:param method:
get -> request: Request
post -> kwargs: dict
:param result_key:
:return:
"""
if method == 'GET':
async def handler(request: Request):
input = request.query_params._dict
return self._try_func(input, func, result_key, **kwargs)
elif method == 'POST':
async def handler(kwargs_: dict):
input = kwargs_
return self._try_func(input, func, result_key, **kwargs)
else:
async def handler():
return {'Warning': 'method not in {"GET", "POST"}'}
return handler
def _handler4files(self, func, **kwargs):
async def handler(request: Request, files: List[UploadFile] = File(...)):
input = request.query_params._dict
# input['files'] = [BytesIO(await file.read()) for file in files]
for file in files:
bio = BytesIO(await file.read())
bio.name = file.filename
input.setdefault('files', []).append(bio)
return self._try_func_plus(input, func, **kwargs)
return handler
def _handler_plus(self, func, **kwargs): # todo 兼容所有类型
async def handler(request: Request):
input = request.query_params._dict
body = await request.body()
if body.startswith(b'{'): # 主要分支 # json={}
input.update(json_loads(body))
elif request.method == 'POST' and 'multipart/form-data' in request.headers.get("Content-Type"): # files={'files': open('xx')}
form = await request.form() # 重复 await
for file in form.getlist('files'): # files
bio = BytesIO(await file.read())
bio.name = file.filename
input.setdefault('files', []).append(bio)
elif body: # data:dict => application/x-www-form-urlencoded
input.update({'__data__': body}) # 非 json 请求体
# input4str 方便 cache
if 'str' in str(func.__annotations__): # todo: cache可支持dict 取消判断
input = str(input) # json.loads
elif 'tuple' in str(func.__annotations__):
input = tuple(input.items()) # dict
return self._try_func_plus(input, func, **kwargs)
return handler
@staticmethod
def _try_func(input, func, result_key='data', **kwargs): # todo: 可否用装饰器
__debug = input.pop('__debug', 0)
output = OrderedDict()
output['error_code'] = 0
output['error_msg'] = "SUCCESS"
if __debug:
output['requestParams'] = input
output['timestamp'] = time.ctime()
try:
output[result_key] = func(**input)
except Exception as error:
output['error_code'] = 1 # 通用错误
output['error_msg'] = traceback.format_exc().strip() if __debug else error # debug状态获取详细信息
finally:
output.update(kwargs)
return output
@staticmethod
def _try_func_plus(input, func, **kwargs):
output = OrderedDict(code=0, msg="SUCCESS", **kwargs)
try:
output['data'] = func(input)
except Exception as error:
output['code'] = 1 # 通用错误
output['data'] = kwargs.get('data')
output['msg'] = traceback.format_exc().strip().split('\n') \
if output['data'] is None else error # 无默认值则获取详细信息
logger.error(output['msg'])
return output
def app_from(self, file=__file__, app='app_'):
return f"{Path(file).stem}:{app}"
def app_config(self, kwargs: str):
_ = json_loads(kwargs)
if _ and _ != self.config: # 更新配置
self.config.update(_)
logger.warning("Configuration item is modified !!!")
return self.config
def proxy_app(self, kwargs: dict):
"""代理层
{
"url": "http://0.0.0.0:8000/xx",
"method": "post",
"json": {"a": 1}
}
"""
r = requests.request(**kwargs)
return r.json()
if __name__ == '__main__':
import uvicorn
app = App()
app_ = app.app
app.add_route('/get', lambda **kwargs: kwargs, method="GET", result_key="GetResult")
app.add_route('/post', lambda **kwargs: kwargs, method="POST", result_key="PostResult")
app.run(port=9000, debug=False, reload=False, access_log=True)
# app.run(f"{app.app_from(__file__)}", port=9000, debug=False, reload=False) # app_的在 __main__ 之上 | PypiClean |
/BlueWhale3_Text-1.6.0-py3-none-any.whl/orangecontrib/text/widgets/owscoredocuments.py | import re
from collections import Counter
from contextlib import contextmanager
from inspect import signature
from typing import Callable, List, Tuple, Union
import numpy as np
from AnyQt.QtCore import (
QItemSelection,
QItemSelectionModel,
QSortFilterProxyModel,
Qt,
Signal,
)
from AnyQt.QtWidgets import (
QButtonGroup,
QGridLayout,
QHeaderView,
QLineEdit,
QRadioButton,
QTableView,
)
from pandas import isnull
from sklearn.metrics.pairwise import cosine_similarity
# todo: uncomment when minimum version of Orange is 3.29.2
# from orangecanvas.gui.utils import disconnected
from orangewidget import gui
from Orange.data import ContinuousVariable, Domain, StringVariable, Table
from Orange.util import wrap_callback
from Orange.widgets.settings import ContextSetting, PerfectDomainContextHandler, Setting
from Orange.widgets.utils.annotated_data import create_annotated_table
from Orange.widgets.utils.concurrent import ConcurrentWidgetMixin, TaskState
from Orange.widgets.utils.itemmodels import PyTableModel, TableModel
from Orange.widgets.widget import Input, Msg, Output, OWWidget
from orangecontrib.text import Corpus
from orangecontrib.text.preprocess import BaseNormalizer, BaseTransformer
from orangecontrib.text.vectorization.document_embedder import (
LANGS_TO_ISO,
DocumentEmbedder,
)
from orangecontrib.text.i18n_config import *
def __(key):
return i18n.t('text.owscoredocuments.' + key)
# todo: remove when minimum version of Orange is 3.29.2
@contextmanager
def disconnected(signal, slot, type=Qt.UniqueConnection):
signal.disconnect(slot)
try:
yield
finally:
signal.connect(slot, type)
def _word_frequency(corpus: Corpus, words: List[str], callback: Callable) -> np.ndarray:
res = []
tokens = corpus.tokens
for i, t in enumerate(tokens):
counts = Counter(t)
res.append([counts.get(w, 0) for w in words])
callback((i + 1) / len(tokens))
return np.array(res)
def _word_appearance(
corpus: Corpus, words: List[str], callback: Callable
) -> np.ndarray:
res = []
tokens = corpus.tokens
for i, t in enumerate(tokens):
t = set(t)
res.append([w in t for w in words])
callback((i + 1) / len(tokens))
return np.array(res)
def _embedding_similarity(
corpus: Corpus,
words: List[str],
callback: Callable,
embedding_language: str,
) -> np.ndarray:
ticks = iter(np.linspace(0, 0.8, len(corpus) + len(words)))
# TODO: currently embedding report success unify them to report progress float
def emb_cb(sucess: bool):
if sucess:
callback(next(ticks))
language = LANGS_TO_ISO[embedding_language]
# make sure there will be only embeddings in X after calling the embedder
corpus = Corpus.from_table(Domain([], metas=corpus.domain.metas), corpus)
emb = DocumentEmbedder(language)
documet_embeddings, skipped = emb(corpus, emb_cb)
assert skipped is None
word_embeddings = np.array(emb([[w] for w in words], emb_cb))
return cosine_similarity(documet_embeddings.X, word_embeddings)
SCORING_METHODS = {
# key: (Method's name, Method's function, Tooltip)
"word_frequency": (
__("label.word_count"),
_word_frequency,
__("label.word_count_tip"),
),
"word_appearance": (
__("label.word_presence"),
_word_appearance,
__("label.word_presence_tip"),
),
"embedding_similarity": (
__("label.similarity"),
_embedding_similarity,
__("label.similarity_tip"),
),
}
ADDITIONAL_OPTIONS = {
"embedding_similarity": ("embedding_language", list(LANGS_TO_ISO.keys()))
}
AGGREGATIONS = {
"Mean": np.mean,
"Median": np.median,
"Min": np.min,
"Max": np.max,
}
def _preprocess_words(
corpus: Corpus, words: List[str], callback: Callable
) -> List[str]:
"""
Corpus's tokens can be preprocessed. Since they will not match correctly
with words preprocessors that change words (e.g. normalization) must
be applied to words too.
"""
# workaround to preprocess words
# TODO: currently preprocessors work only on corpus, when there will be more
# cases like this think about implementation of preprocessors for a list
# of strings
words_feature = StringVariable("words")
words_c = Corpus(
Domain([], metas=[words_feature]),
metas=np.array([[w] for w in words]),
text_features=[words_feature],
)
# only transformers and normalizers preprocess on the word level
pps = [
pp
for pp in corpus.used_preprocessor.preprocessors
if isinstance(pp, (BaseTransformer, BaseNormalizer))
]
for i, pp in enumerate(pps):
words_c = pp(words_c)
callback((i + 1) / len(pps))
return [w[0] for w in words_c.tokens if len(w)]
def _run(
corpus: Corpus,
words: List[str],
scoring_methods: List[str],
aggregation: str,
additional_params: dict,
state: TaskState,
) -> None:
"""
Perform word scoring with selected scoring methods
Parameters
----------
corpus
Corpus of documents
words
List of words used for scoring
scoring_methods
Methods to score documents with
aggregation
Aggregation applied for each document on word scores
additional_params
Additional prameters for scores (e.g. embedding needs text language)
state
TaskState for reporting the task status and giving partial results
"""
def callback(i: float) -> None:
state.set_progress_value(i * 100)
if state.is_interruption_requested():
raise Exception
cb_part = 1 / (len(scoring_methods) + 1) # +1 for preprocessing
words = _preprocess_words(corpus, words, wrap_callback(callback, end=cb_part))
if len(words) == 0:
raise Exception(
"Empty word list after preprocessing. Please provide a valid set of words."
)
for i, sm in enumerate(scoring_methods):
scoring_method = SCORING_METHODS[sm][1]
sig = signature(scoring_method)
add_params = {k: v for k, v in additional_params.items() if k in sig.parameters}
scs = scoring_method(
corpus,
words,
wrap_callback(callback, start=(i + 1) * cb_part, end=(i + 2) * cb_part),
**add_params
)
scs = AGGREGATIONS[aggregation](scs, axis=1)
state.set_partial_result((sm, aggregation, scs))
class SelectionMethods:
NONE, ALL, MANUAL, N_BEST = range(4)
ITEMS = __("item.none"), __("item.all"), __("item.manual"), __("item.top_documents")
class ScoreDocumentsTableView(QTableView):
pressedAny = Signal()
def __init__(self):
super().__init__(
sortingEnabled=True,
editTriggers=QTableView.NoEditTriggers,
selectionMode=QTableView.ExtendedSelection,
selectionBehavior=QTableView.SelectRows,
cornerButtonEnabled=False,
)
self.setItemDelegate(gui.ColoredBarItemDelegate(self))
self.verticalHeader().setDefaultSectionSize(22)
def update_column_widths(self) -> None:
"""
Set columns widths such that each score column has width based on size
hint and all scores columns have the same width.
"""
header = self.horizontalHeader()
col_width = max(
[0]
+ [
max(self.sizeHintForColumn(i), header.sectionSizeHint(i))
for i in range(1, self.model().columnCount())
]
)
for i in range(1, self.model().columnCount()):
header.resizeSection(i, col_width)
header.setSectionResizeMode(i, QHeaderView.Fixed)
# document title column is one that stretch
header.setSectionResizeMode(0, QHeaderView.Stretch)
def mousePressEvent(self, event):
super().mousePressEvent(event)
self.pressedAny.emit()
class ScoreDocumentsProxyModel(QSortFilterProxyModel):
@staticmethod
def _convert(text: str) -> Union[str, int]:
return int(text) if text.isdigit() else text.lower()
@staticmethod
def _alphanum_key(key: str) -> List[Union[str, int]]:
return [ScoreDocumentsProxyModel._convert(c) for c in re.split("([0-9]+)", key)]
def lessThan(self, left_ind, right_ind):
"""
Sort strings of the first column naturally: Document 2 < Document 12
"""
if left_ind.column() == 0 and right_ind.column() == 0:
left = self.sourceModel().data(left_ind, role=Qt.DisplayRole)
right = self.sourceModel().data(right_ind, role=Qt.DisplayRole)
if left is not None and right is not None:
return self._alphanum_key(left) < self._alphanum_key(right)
return super().lessThan(left_ind, right_ind)
class ScoreDocumentsTableModel(PyTableModel):
def data(self, index, role=Qt.DisplayRole):
if role in (gui.BarRatioRole, Qt.DisplayRole):
dat = super().data(index, Qt.EditRole)
return dat
if role == Qt.BackgroundColorRole and index.column() == 0:
return TableModel.ColorForRole[TableModel.Meta]
return super().data(index, role)
class OWScoreDocuments(OWWidget, ConcurrentWidgetMixin):
name = __("name")
description = ""
icon = "icons/ScoreDocuments.svg"
priority = 500
buttons_area_orientation = Qt.Vertical
# default order - table sorted in input order
DEFAULT_SORTING = (-1, Qt.AscendingOrder)
settingsHandler = PerfectDomainContextHandler()
auto_commit: bool = Setting(True)
aggregation: int = Setting(0)
word_frequency: bool = Setting(True)
word_appearance: bool = Setting(False)
embedding_similarity: bool = Setting(False)
embedding_language: int = Setting(0)
sort_column_order: Tuple[int, int] = Setting(DEFAULT_SORTING)
selected_rows: List[int] = ContextSetting([], schema_only=True)
sel_method: int = ContextSetting(SelectionMethods.N_BEST)
n_selected: int = ContextSetting(3)
class Inputs:
corpus = Input("Corpus", Corpus, label=i18n.t("text.common.corpus"))
words = Input("Words", Table, label=i18n.t("text.common.words"))
class Outputs:
selected_documents = Output("Selected documents", Corpus, default=True,
label=i18n.t("text.common.selected_document"))
corpus = Output("Corpus", Corpus, label=i18n.t("text.common.corpus"))
class Warning(OWWidget.Warning):
corpus_not_normalized = Msg(__("msg.corpus_not_normalized"))
class Error(OWWidget.Error):
custom_err = Msg("{}")
def __init__(self):
OWWidget.__init__(self)
ConcurrentWidgetMixin.__init__(self)
self._setup_control_area()
self._setup_main_area()
self.corpus = None
self.words = None
# saves scores avoid multiple computation of the same score
self.scores = {}
def _setup_control_area(self) -> None:
box = gui.widgetBox(self.controlArea, __("box.method"))
for value, (n, _, tt) in SCORING_METHODS.items():
b = gui.hBox(box, margin=0)
gui.checkBox(
b,
self,
value,
label=n,
callback=self.__setting_changed,
tooltip=tt,
)
if value in ADDITIONAL_OPTIONS:
value, options = ADDITIONAL_OPTIONS[value]
gui.comboBox(
b,
self,
value,
items=options,
callback=self.__setting_changed,
)
box = gui.widgetBox(self.controlArea, __("box.agg"))
gui.comboBox(
box,
self,
"aggregation",
items=[i18n.t("text.item.mean"), i18n.t("text.item.median"), i18n.t("text.item.min"),
i18n.t("text.item.max")],
callback=self.__setting_changed,
)
gui.rubber(self.controlArea)
# select words box
box = gui.vBox(self.buttonsArea, __("box.select_documents"))
grid = QGridLayout()
grid.setContentsMargins(0, 0, 0, 0)
self._sel_method_buttons = QButtonGroup()
for method, label in enumerate(SelectionMethods.ITEMS):
button = QRadioButton(label)
button.setChecked(method == self.sel_method)
grid.addWidget(button, method, 0)
self._sel_method_buttons.addButton(button, method)
self._sel_method_buttons.buttonClicked[int].connect(self.__set_selection_method)
spin = gui.spin(
box,
self,
"n_selected",
1,
999,
addToLayout=False,
callback=lambda: self.__set_selection_method(SelectionMethods.N_BEST),
)
grid.addWidget(spin, 3, 1)
box.layout().addLayout(grid)
# autocommit
gui.auto_send(self.buttonsArea, self, "auto_commit")
def _setup_main_area(self) -> None:
self._filter_line_edit = QLineEdit(
textChanged=self.__on_filter_changed, placeholderText=__("placeholder.filter")
)
self.mainArea.layout().addWidget(self._filter_line_edit)
self.model = model = ScoreDocumentsTableModel(parent=self)
model.setHorizontalHeaderLabels(["Document"])
def select_manual():
self.__set_selection_method(SelectionMethods.MANUAL)
self.view = view = ScoreDocumentsTableView()
view.pressedAny.connect(select_manual)
self.mainArea.layout().addWidget(view)
# by default data are sorted in the Table order
header = self.view.horizontalHeader()
header.sectionClicked.connect(self.__on_horizontal_header_clicked)
proxy_model = ScoreDocumentsProxyModel()
proxy_model.setFilterKeyColumn(0)
proxy_model.setFilterCaseSensitivity(False)
view.setModel(proxy_model)
view.model().setSourceModel(self.model)
self.view.selectionModel().selectionChanged.connect(self.__on_selection_change)
def __on_filter_changed(self) -> None:
model = self.view.model()
model.setFilterFixedString(self._filter_line_edit.text().strip())
def __on_horizontal_header_clicked(self, index: int):
header = self.view.horizontalHeader()
self.sort_column_order = (index, header.sortIndicatorOrder())
self._select_rows()
# when sorting change output table must consider the new order
# call explicitly since selection in table is not changed
if (
self.sel_method == SelectionMethods.MANUAL
and self.selected_rows
or self.sel_method == SelectionMethods.ALL
):
# retrieve selection in new order
self.selected_rows = self.get_selected_indices()
self._send_output()
def __on_selection_change(self):
self.selected_rows = self.get_selected_indices()
self._send_output()
def __set_selection_method(self, method: int):
self.sel_method = method
self._sel_method_buttons.button(method).setChecked(True)
self._select_rows()
@Inputs.corpus
def set_data(self, corpus: Corpus) -> None:
self.closeContext()
self.Warning.corpus_not_normalized.clear()
if corpus is None:
self.corpus = None
self._clear_and_run()
return
if not self._is_corpus_normalized(corpus):
self.Warning.corpus_not_normalized()
self.corpus = corpus
self.selected_rows = []
self.openContext(corpus)
self._sel_method_buttons.button(self.sel_method).setChecked(True)
self._clear_and_run()
@staticmethod
def _get_word_attribute(words: Table) -> None:
attrs = [
a
for a in words.domain.metas + words.domain.variables
if isinstance(a, StringVariable)
]
if not attrs:
return None
words_attr = next(
(a for a in attrs if a.attributes.get("type", "") == "words"), None
)
if words_attr:
return words.get_column_view(words_attr)[0].tolist()
else:
# find the most suitable attribute - one with lowest average text
# length - counted as a number of words
def avg_len(attr):
array_ = words.get_column_view(attr)[0]
array_ = array_[~isnull(array_)]
return sum(len(a.split()) for a in array_) / len(array_)
attr = sorted(attrs, key=avg_len)[0]
return words.get_column_view(attr)[0].tolist()
@Inputs.words
def set_words(self, words: Table) -> None:
if words is None or len(words.domain.variables + words.domain.metas) == 0:
self.words = None
else:
self.words = self._get_word_attribute(words)
self._clear_and_run()
def _gather_scores(self) -> Tuple[np.ndarray, List[str]]:
"""
Gather scores and labels for the dictionary that holds scores
Returns
-------
scores
Scores table
labels
The list with score names for the header and variables names
"""
if self.corpus is None:
return np.empty((0, 0)), []
aggregation = self._get_active_aggregation()
scorers = self._get_active_scorers()
methods = [m for m in scorers if (m, aggregation) in self.scores]
scores = [self.scores[(m, aggregation)] for m in methods]
scores = np.column_stack(scores) if scores else np.empty((len(self.corpus), 0))
labels = [SCORING_METHODS[m][0] for m in methods]
return scores, labels
def _send_output(self) -> None:
"""
Create corpus with scores and output it
"""
if self.corpus is None:
self.Outputs.corpus.send(None)
self.Outputs.selected_documents.send(None)
return
scores, labels = self._gather_scores()
if labels:
d = self.corpus.domain
domain = Domain(
d.attributes,
d.class_var,
metas=d.metas + tuple(ContinuousVariable(l) for l in labels),
)
out_corpus = Corpus(
domain,
self.corpus.X,
self.corpus.Y,
np.hstack([self.corpus.metas, scores]),
)
Corpus.retain_preprocessing(self.corpus, out_corpus)
else:
out_corpus = self.corpus
self.Outputs.corpus.send(create_annotated_table(out_corpus, self.selected_rows))
self.Outputs.selected_documents.send(
out_corpus[self.selected_rows] if self.selected_rows else None
)
def _fill_table(self) -> None:
"""
Fill the table in the widget with scores and document names
"""
if self.corpus is None:
self.model.clear()
return
scores, labels = self._gather_scores()
labels = ["Document"] + labels
titles = self.corpus.titles.tolist()
# clearing selection and sorting to prevent SEGFAULT on model.wrap
self.view.horizontalHeader().setSortIndicator(-1, Qt.AscendingOrder)
with disconnected(
self.view.selectionModel().selectionChanged, self.__on_selection_change
):
self.view.clearSelection()
self.model.wrap([[c] + s for c, s in zip(titles, scores.tolist())])
self.model.setHorizontalHeaderLabels(labels)
self.view.update_column_widths()
if self.model.columnCount() > self.sort_column_order[0]:
# if not enough columns do not apply sorting from settings since
# sorting can besaved for score column while scores are still computing
# tables is filled before scores are computed with document names
self.view.horizontalHeader().setSortIndicator(*self.sort_column_order)
self._select_rows()
def _fill_and_output(self) -> None:
"""Fill the table in the widget and send the output"""
self._fill_table()
self._send_output()
def _clear_and_run(self) -> None:
"""Clear cached scores and commit"""
self.scores = {}
self.cancel()
self._fill_and_output()
self.commit()
def __setting_changed(self) -> None:
self.commit()
def commit(self) -> None:
self.Error.custom_err.clear()
self.cancel()
if self.corpus is not None and self.words is not None:
scorers = self._get_active_scorers()
aggregation = self._get_active_aggregation()
new_scores = [s for s in scorers if (s, aggregation) not in self.scores]
if new_scores:
self.start(
_run,
self.corpus,
self.words,
new_scores,
aggregation,
{
v: items[getattr(self, v)]
for v, items in ADDITIONAL_OPTIONS.values()
},
)
else:
self._fill_and_output()
def on_done(self, _: None) -> None:
self._send_output()
def on_partial_result(self, result: Tuple[str, str, np.ndarray]) -> None:
sc_method, aggregation, scores = result
self.scores[(sc_method, aggregation)] = scores
self._fill_table()
def on_exception(self, ex: Exception) -> None:
self.Error.custom_err(ex)
self._fill_and_output()
def _get_active_scorers(self) -> List[str]:
"""
Gather currently active/selected scores
Returns
-------
List with selected scores names
"""
return [attr for attr in SCORING_METHODS if getattr(self, attr)]
def _get_active_aggregation(self) -> str:
"""
Gather currently active/selected aggregation
Returns
-------
Selected aggregation name
"""
return list(AGGREGATIONS.keys())[self.aggregation]
@staticmethod
def _is_corpus_normalized(corpus: Corpus) -> bool:
"""
Check if corpus is normalized.
"""
return any(
isinstance(pp, BaseNormalizer)
for pp in corpus.used_preprocessor.preprocessors
)
def get_selected_indices(self) -> List[int]:
# get indices in table's order - that the selected output table have same order
selected_rows = sorted(
self.view.selectionModel().selectedRows(), key=lambda idx: idx.row()
)
return [self.view.model().mapToSource(r).row() for r in selected_rows]
def _select_rows(self):
proxy_model = self.view.model()
n_rows, n_columns = proxy_model.rowCount(), proxy_model.columnCount()
if self.sel_method == SelectionMethods.NONE:
selection = QItemSelection()
elif self.sel_method == SelectionMethods.ALL:
selection = QItemSelection(
proxy_model.index(0, 0), proxy_model.index(n_rows - 1, n_columns - 1)
)
elif self.sel_method == SelectionMethods.MANUAL:
selection = QItemSelection()
new_sel = []
for row in self.selected_rows:
if row < n_rows:
new_sel.append(row)
_selection = QItemSelection(
self.model.index(row, 0), self.model.index(row, n_columns - 1)
)
selection.merge(
proxy_model.mapSelectionFromSource(_selection),
QItemSelectionModel.Select,
)
# selected rows must be updated when the same dataset with less rows
# appear at the input - it is not handled by selectionChanged
# in cases when all selected rows missing in new table
self.selected_rows = new_sel
elif self.sel_method == SelectionMethods.N_BEST:
n_sel = min(self.n_selected, n_rows)
selection = QItemSelection(
proxy_model.index(0, 0), proxy_model.index(n_sel - 1, n_columns - 1)
)
else:
raise NotImplementedError
self.view.selectionModel().select(selection, QItemSelectionModel.ClearAndSelect)
if __name__ == "__main__":
from orangewidget.utils.widgetpreview import WidgetPreview
from orangecontrib.text import preprocess
corpus = Corpus.from_file("book-excerpts")
# corpus.set_title_variable("Text")
pp_list = [
preprocess.LowercaseTransformer(),
preprocess.StripAccentsTransformer(),
preprocess.SnowballStemmer(),
]
for p in pp_list:
corpus = p(corpus)
w = StringVariable("Words")
w.attributes["type"] = "words"
words = ["house", "doctor", "boy", "way", "Rum"]
words = Table(
Domain([], metas=[w]),
np.empty((len(words), 0)),
metas=np.array(words).reshape((-1, 1)),
)
WidgetPreview(OWScoreDocuments).run(set_data=corpus, set_words=words) | PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/static/js/codemirror/mode/spreadsheet/spreadsheet.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("spreadsheet", function () {
return {
startState: function () {
return {
stringType: null,
stack: []
};
},
token: function (stream, state) {
if (!stream) return;
//check for state changes
if (state.stack.length === 0) {
//strings
if ((stream.peek() == '"') || (stream.peek() == "'")) {
state.stringType = stream.peek();
stream.next(); // Skip quote
state.stack.unshift("string");
}
}
//return state
//stack has
switch (state.stack[0]) {
case "string":
while (state.stack[0] === "string" && !stream.eol()) {
if (stream.peek() === state.stringType) {
stream.next(); // Skip quote
state.stack.shift(); // Clear flag
} else if (stream.peek() === "\\") {
stream.next();
stream.next();
} else {
stream.match(/^.[^\\\"\']*/);
}
}
return "string";
case "characterClass":
while (state.stack[0] === "characterClass" && !stream.eol()) {
if (!(stream.match(/^[^\]\\]+/) || stream.match(/^\\./)))
state.stack.shift();
}
return "operator";
}
var peek = stream.peek();
//no stack
switch (peek) {
case "[":
stream.next();
state.stack.unshift("characterClass");
return "bracket";
case ":":
stream.next();
return "operator";
case "\\":
if (stream.match(/\\[a-z]+/)) return "string-2";
else {
stream.next();
return "atom";
}
case ".":
case ",":
case ";":
case "*":
case "-":
case "+":
case "^":
case "<":
case "/":
case "=":
stream.next();
return "atom";
case "$":
stream.next();
return "builtin";
}
if (stream.match(/\d+/)) {
if (stream.match(/^\w+/)) return "error";
return "number";
} else if (stream.match(/^[a-zA-Z_]\w*/)) {
if (stream.match(/(?=[\(.])/, false)) return "keyword";
return "variable-2";
} else if (["[", "]", "(", ")", "{", "}"].indexOf(peek) != -1) {
stream.next();
return "bracket";
} else if (!stream.eatSpace()) {
stream.next();
}
return null;
}
};
});
CodeMirror.defineMIME("text/x-spreadsheet", "spreadsheet");
}); | PypiClean |
/CTkTable-0.8.tar.gz/CTkTable-0.8/README.md | # CTkTable
**Here is a quick and simple table widget having all the basic features.**

## Features:
- Add columns/rows
- Delete columns/rows
- Edit rows/columns at once
- Insert values to specific cell
- delete values from specific cell
- update all values at once
- edit each cell value and options
- entry editing
- can be used with scrollable frame
## Installation
```
pip install CTkTable
```
### [<img alt="GitHub repo size" src="https://img.shields.io/github/repo-size/Akascape/CTkTable?&color=white&label=Download%20Source%20Code&logo=Python&logoColor=yellow&style=for-the-badge" width="400">](https://github.com/Akascape/CTkTable/archive/refs/heads/main.zip)
## Usage
```python
import customtkinter
from CTkTable import *
root = customtkinter.CTk()
value = [[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5]]
table = CTkTable(master=root, row=5, column=5, values=value)
table.pack(expand=True, fill="both", padx=20, pady=20)
root.mainloop()
```
## Methods
- **.add_row(index, values)**
- **.add_column(index, values)**
- **.edit_row(row_num, *args)**: edit one full row at once
- **.edit_column(column_num, *args)**: edit one full column at once
- **.delete_row(index)**: remove one row
- **.delete_column(index)**: remove one column
- **.delete_rows(indices)**: remove mutliple rows
- **.delete_columns(indices)**: remove multiple columns
- **.select(row, column)**: select one cell
- **.select_row(row)**: select a row
- **.deselect_row(row)**: deselect a row
- **.select_column(column)**: select a column
- **.deselect_column(column)**: deselect a column
- **.update_values(values)**: update all values at once
- **.insert(row, column, value, *args)**: change specific index data
- **.delete(row, column, *args)**: delete the data from specific index
- **.get()**: get all values
- **.get(row, column)**: get specific cell value
- **.get_row(row)**: get all values of a specific row
- **.get_column(column)**: get all values of a specific column
- **.configure(arguments)**: change other table attributes
_here, **args** means ctkbutton parameters which can also be passed_
**Note: treat all the table cells as a ctkbutton class**
## Arguments
| Parameter | Description |
|-----------| ------------|
| **master** | parent widget |
| **values** | the default values for table |
| row | **optional**, set number of default rows |
| column | **optional**, set number of default columns |
| padx | add internal padding in x |
| pady | add internal padding in y |
| colors | set two fg_colors for the table (list), eg: `colors=["yellow", "green"]` |
| color_phase | set color phase based on rows or columns, eg: `color_phase="vertical"` |
| orientation | change the orientation of table, `vertical or horizontal` |
| header_color | define the topmost row color |
| corner_radius | define the corner roundness of the table |
| hover_color | enable hover effect on the cells |
| wraplength | set the width of cell text |
| **command** | specify a command when a table cell is pressed, [returns row, column, value] |
| **other button parameters* | all other ctk button parameters can be passed |
Note: This library is at early stage so there can be some performance issues.
### Thanks for visiting! Hope it will help :)
| PypiClean |
/Flask-MySQLdb-1.0.1.tar.gz/Flask-MySQLdb-1.0.1/README.md | Flask-MySQLdb [](https://app.travis-ci.com/alexferl/flask-mysqldb)
================
Flask-MySQLdb provides MySQL connection for Flask.
Quickstart
----------
First, you _may_ need to install some dependencies for [mysqlclient](https://github.com/PyMySQL/mysqlclient)
if you don't already have them, see [here](https://github.com/PyMySQL/mysqlclient#install).
Second, install Flask-MySQLdb:
```shell
pip install flask-mysqldb
```
Flask-MySQLdb depends, and will install for you, recent versions of Flask
(0.12.4 or later) and [mysqlclient](https://github.com/PyMySQL/mysqlclient-python).
Flask-MySQLdb is compatible with and tested with Python 3.7+. It _should_ work on any
version from Python 2.7 and up, but is not supported.
Next, add a ``MySQL`` instance to your code:
```python
from flask import Flask
from flask_mysqldb import MySQL
app = Flask(__name__)
# Required
app.config["MYSQL_USER"] = "user"
app.config["MYSQL_PASSWORD"] = "password"
app.config["MYSQL_DB"] = "database"
# Extra configs, optional:
app.config["MYSQL_CURSORCLASS"] = "DictCursor"
app.config["MYSQL_CUSTOM_OPTIONS"] = {"ssl": {"ca": "/path/to/ca-file"}} # https://mysqlclient.readthedocs.io/user_guide.html#functions-and-attributes
mysql = MySQL(app)
@app.route("/")
def users():
cur = mysql.connection.cursor()
cur.execute("""SELECT user, host FROM mysql.user""")
rv = cur.fetchall()
return str(rv)
if __name__ == "__main__":
app.run(debug=True)
```
Other configuration directives can be found [here](http://flask-mysqldb.readthedocs.io/en/latest/#configuration).
Why
---
Why would you want to use this extension versus just using MySQLdb by itself?
The only reason is that the extension was made using Flask's best practices in relation
to resources that need caching on the [app context](http://flask.pocoo.org/docs/0.12/appcontext/#context-usage).
What that means is that the extension will manage creating and teardown the connection to MySQL
for you while with if you were just using MySQLdb you would have to do it yourself.
Resources
---------
- [Documentation](http://flask-mysqldb.readthedocs.org/en/latest/)
- [PyPI](https://pypi.python.org/pypi/Flask-MySQLdb)
| PypiClean |
/Ifbyphone-API-Module-0.0.5.tar.gz/Ifbyphone-API-Module-0.0.5/src/Ifbyphone/api/sms.py | from base import IfbyphoneApiBase
class Sms(IfbyphoneApiBase):
def send(self, **kwargs):
""" Send an outbound SMS message
keyword arguments:
to -- number to receive SMS message
from -- Ifbyphone enabled SMS number
message -- SMS message
"""
self.options['to'] = kwargs['to']
self.options['from'] = kwargs['from_']
self.options['message'] = kwargs['message']
self.options['action'] = 'sms.send_message'
return self.call(self.options)
def delete_message(self, msg_id):
""" Delete a specific SMS messages
keyword arguments:
msg_id -- unique ID of SMS message
"""
self.options['msg_id'] = msg_id
self.options['action'] = 'sms.delete_message'
return self.call(self.options)
def get_message(self, msg_id):
""" Retrieve a specific SMS message
keyword arguments:
msg_id -- unique ID of SMS message
"""
self.options['msg_id'] = msg_id
self.options['action'] = 'sms.get_message'
return self.call(self.options)
def get_messages(self, **kwargs):
""" Retrieve all SMS messages within a date range
keyword arguments:
number -- the phone number associates with messages
start_date -- the dtarting date (yyyy-mm-dd)
end_date -- the ending date (yyyy-mm-dd)
"""
self.options.update(kwargs)
self.options['action'] = 'sms.get_messages'
return self.call(self.options)
def get_numbers(self):
""" Retrieve all SMS enabled numbers for an account
"""
return self.call(self.options)
def register_number(self, **kwargs):
""" Register a number to send and receive SMS
keyword arguments:
number -- phone number to register
url -- url to submit inbound SMS post data
"""
self.options.update(kwargs)
self.options['action'] = 'sms.register_number'
return self.call(self.options)
def unregister_number(self, number):
""" Un-register an SMS enabled phone number
keywork arguments:
number -- phone number to un-register
"""
self.options['number'] = number
self.options['action'] = 'sms.unregister_number'
return self.call(self.options) | PypiClean |
/Mesa-2.1.1-py3-none-any.whl/mesa/visualization/templates/external/bootstrap-slider-11.0.2/test/specs/PublicMethodsSpec.js | describe("Public Method Tests", function() {
var testSlider;
describe("slider constructor", function() {
describe("returns a jQuery object if it is called on a jQuery object with zero or more matching elements", function() {
it("returns a jQuery object if it is called on with no matching elements", function() {
testSlider = $();
expect(testSlider.slider() instanceof jQuery).toBe(true);
});
it("returns a jQuery object if it is called on with one matching element", function() {
testSlider = $('#testSlider1');
expect(testSlider.slider() instanceof jQuery).toBe(true);
});
it("returns a jQuery object if it is called on with multiple matching elements", function() {
testSlider = $('#testSlider1, #testSlider2');
expect(testSlider.slider() instanceof jQuery).toBe(true);
});
});
it("reads and sets the 'id' attribute of the slider instance that is created", function() {
var sliderId = "mySlider";
testSlider = $("#testSlider1").slider({
id : sliderId
});
var sliderInstanceHasExpectedId = $("#testSlider1").siblings("div.slider").is("#" + sliderId);
expect(sliderInstanceHasExpectedId).toBeTruthy();
});
it("generates multiple slider instances from selector", function() {
$(".makeSlider").slider();
var sliderInstancesExists = $(".makeSlider").siblings().is(".slider");
expect(sliderInstancesExists).toBeTruthy();
var sliderInstancesCount = $(".makeSlider").siblings(".slider").length;
expect(sliderInstancesCount).toEqual(2);
$('.makeSlider').slider('destroy');
});
it("reads and sets the 'min' option properly", function() {
var minVal = -5;
testSlider = $("#testSlider1").slider({
min : minVal
});
testSlider.slider('setValue', minVal);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(minVal);
});
it("reads and sets the 'max' option properly", function() {
var maxVal = 15;
testSlider = $("#testSlider1").slider({
max : maxVal
});
testSlider.slider('setValue', maxVal);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(maxVal);
});
it("reads and sets the 'precision' option properly", function() {
testSlider = $("#testSlider1").slider({
precision: 2
});
testSlider.slider('setValue', 8.115);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(8.12);
});
it("reads and sets the 'orientation' option properly", function() {
var orientationVal = "vertical";
testSlider = $("#testSlider1").slider({
orientation : orientationVal
});
var orientationClassApplied = $("#testSlider1").siblings("div.slider").hasClass("slider-vertical");
expect(orientationClassApplied).toBeTruthy();
});
it("reads and sets the 'value' option properly", function() {
var val = 8;
testSlider = $("#testSlider1").slider({
value : val
});
testSlider.slider('setValue', val);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(val);
});
it("reads and sets the 'selection' option properly", function() {
var selectionVal = "after",
maxSliderVal = 10;
testSlider = $("#testSlider1").slider({
selection : selectionVal
});
testSlider.slider('setValue', maxSliderVal);
var sliderSelectionWidthAtMaxValue = $("#testSlider1").siblings(".slider").children("div.slider-track").children("div.slider-selection").width();
expect(sliderSelectionWidthAtMaxValue).toBe(0);
});
it("updates the 'selection' option properly", function() {
var selectionVal = "none",
maxSliderVal = 10;
testSlider = $("#testSlider1").slider({
selection : selectionVal
});
testSlider.slider('setValue', maxSliderVal);
testSlider.slider('refresh');
var sliderSelectionHasHideClass_A = $("#testSlider1").siblings(".slider").children("div.slider-track").children("div.slider-track-low").hasClass('hide');
expect(sliderSelectionHasHideClass_A).toBe(true);
var sliderSelectionHasHideClass_B = $("#testSlider1").siblings(".slider").children("div.slider-track").children("div.slider-selection").hasClass('hide');
expect(sliderSelectionHasHideClass_B).toBe(true);
var sliderSelectionHasHideClass_C = $("#testSlider1").siblings(".slider").children("div.slider-track").children("div.slider-track-high").hasClass('hide');
expect(sliderSelectionHasHideClass_C).toBe(true);
var newSelectionVal = 'after';
testSlider.slider('setAttribute', 'selection', newSelectionVal);
testSlider.slider('refresh');
var sliderSelectionHasHideClass_D = $("#testSlider1").siblings(".slider").children("div.slider-track").children("div.slider-track-low").hasClass('hide');
expect(sliderSelectionHasHideClass_D).toBe(false);
var sliderSelectionHasHideClass_E = $("#testSlider1").siblings(".slider").children("div.slider-track").children("div.slider-selection").hasClass('hide');
expect(sliderSelectionHasHideClass_E).toBe(false);
var sliderSelectionHasHideClass_F = $("#testSlider1").siblings(".slider").children("div.slider-track").children("div.slider-track-high").hasClass('hide');
expect(sliderSelectionHasHideClass_F).toBe(false);
});
it("reads and sets the 'handle' option properly", function() {
var handleVal = "triangle";
testSlider = $("#testSlider1").slider({
handle : handleVal
});
var handleIsSetToTriangle = $("#testSlider1").siblings(".slider").children("div.slider-handle").hasClass("triangle");
expect(handleIsSetToTriangle).toBeTruthy();
});
it("reads and sets the 'reversed' option properly", function() {
var reversedVal = true,
maxSliderVal = 10;
testSlider = $("#testSlider1").slider({
reversed : reversedVal
});
testSlider.slider('setValue', maxSliderVal);
var sliderSelectionHeightAtMaxValue = $("#testSlider1").siblings(".slider").children("div.slider-track").children("div.slider-selection").width();
expect(sliderSelectionHeightAtMaxValue).toBe(0);
});
it("reads and sets the 'formatter' option properly", function() {
var tooltipFormatter = function(value) {
return 'Current value: ' + value;
};
testSlider = $("#testSlider1").slider({
formatter : tooltipFormatter
});
testSlider.slider('setValue', 9);
var tooltipMessage = $("#testSlider1").siblings(".slider").find("div.tooltip").children("div.tooltip-inner").text();
var expectedMessage = tooltipFormatter(9);
expect(tooltipMessage).toBe(expectedMessage);
});
it("reads and sets the 'enabled' option properly", function() {
testSlider = $("#testSlider1").slider({
enabled: false
});
var isEnabled = testSlider.slider('isEnabled');
expect(isEnabled).not.toBeTruthy();
});
describe("reads and sets the 'tooltip' option properly", function() {
it("tooltip is not shown if set to 'hide'", function() {
testSlider = $("#testSlider1").slider({
tooltip : "hide"
});
var tooltipIsHidden = testSlider.siblings(".slider").children("div.tooltip").hasClass("hide");
expect(tooltipIsHidden).toBeTruthy();
});
it("tooltip is shown during sliding if set to 'show'", function() {
testSlider = $("#testSlider1").slider({
tooltip : "show"
});
var tooltipIsHidden = !($("#testSlider1").siblings(".slider").children("div.tooltip").hasClass("show"));
expect(tooltipIsHidden).toBeTruthy();
// Trigger hover
var mouseenterEvent = document.createEvent("Events");
mouseenterEvent.initEvent("mouseenter", true, true);
testSlider.data('slider').sliderElem.dispatchEvent(mouseenterEvent);
var tooltipIsShownAfterSlide = $("#testSlider1").siblings(".slider").children("div.tooltip").hasClass("show");
expect(tooltipIsShownAfterSlide).toBeTruthy();
});
it("tooltip is shown on mouse over and hides correctly after mouse leave", function() {
testSlider = $("#testSlider1").slider({
tooltip : "show"
});
var tooltipIsHidden = !($("#testSlider1").siblings(".slider").children("div.tooltip").hasClass("show"));
expect(tooltipIsHidden).toBeTruthy();
// Trigger hover
var mouseenterEvent = document.createEvent("Events");
mouseenterEvent.initEvent("mouseenter", true, true);
testSlider.data('slider').sliderElem.dispatchEvent(mouseenterEvent);
var tooltipIsShownAfterSlide = $("#testSlider1").siblings(".slider").children("div.tooltip").hasClass("show");
expect(tooltipIsShownAfterSlide).toBeTruthy();
// Trigger leave
var mouseleaveEvent = document.createEvent("Events");
mouseleaveEvent.initEvent("mouseleave", true, true);
testSlider.data('slider').sliderElem.dispatchEvent(mouseleaveEvent);
var tooltipIsAgainHidden = !($("#testSlider1").siblings(".slider").children("div.tooltip").hasClass("show"));
expect(tooltipIsAgainHidden).toBeTruthy();
});
it("tooltip is always shown if set to 'always'", function() {
testSlider = $("#testSlider1").slider({
tooltip : "always"
});
var tooltipIsShown = $("#testSlider1").siblings(".slider").children("div.tooltip").hasClass("show");
expect(tooltipIsShown).toBeTruthy();
});
it("defaults to 'show' option if invalid value is passed", function() {
testSlider = $("#testSlider1").slider({
tooltip : "invalid option value"
});
var tooltipIsHidden = !($("#testSlider1").siblings(".slider").children("div.tooltip").hasClass("show"));
expect(tooltipIsHidden).toBeTruthy();
// Trigger hover
var mouseenterEvent = document.createEvent("Events");
mouseenterEvent.initEvent("mouseenter", true, true);
testSlider.data('slider').sliderElem.dispatchEvent(mouseenterEvent);
var tooltipIsShownOnHover = $("#testSlider1").siblings(".slider").children("div.tooltip").hasClass("show");
expect(tooltipIsShownOnHover).toBeTruthy();
});
});
});
describe("'setValue()' tests", function() {
var formatInvalidInputMsg = function(invalidValue) { return "Invalid input value '" + invalidValue + "' passed in"; };
describe("if slider is a single value slider", function() {
beforeEach(function() {
testSlider = $("#testSlider1").slider();
});
it("properly sets the value of the slider when given a numeric value", function() {
var valueToSet = 5;
testSlider.slider('setValue', valueToSet);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(valueToSet);
});
it("properly sets the value of the slider when given a string value", function(){
var valueToSet = "5";
testSlider.slider('setValue', valueToSet);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(5);
});
it("if a value passed in is greater than the max (10), the slider only goes to the max", function() {
var maxValue = 10,
higherThanSliderMaxVal = maxValue + 5;
testSlider.slider('setValue', higherThanSliderMaxVal);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(maxValue);
});
it("if a value passed in is less than the min (0), the slider only goes to the min", function() {
var minValue = 0,
lowerThanSliderMaxVal = minValue - 5;
testSlider.slider('setValue', lowerThanSliderMaxVal);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(minValue);
});
it("sets the 'value' property of the slider <input> element", function() {
var value = 9;
testSlider.slider('setValue', value);
var currentValue = document.querySelector("#testSlider1").value;
currentValue = parseFloat(currentValue);
expect(currentValue).toBe(value);
});
it("sets the 'value' attribute of the slider <input> element", function() {
var value = 9;
testSlider.slider('setValue', value);
var currentValue = document.querySelector("#testSlider1").getAttribute("value");
currentValue = parseFloat(currentValue);
expect(currentValue).toBe(value);
});
describe("when an invalid value type is passed in", function() {
var invalidValue;
beforeEach(function() {
invalidValue = "a";
});
it("throws an error and does not alter the slider value", function() {
var originalSliderValue = testSlider.slider('getValue');
var settingValue = function() {
testSlider.slider('setValue', invalidValue);
};
expect(settingValue).toThrow(new Error( formatInvalidInputMsg(invalidValue) ));
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(originalSliderValue);
});
});
});
describe("if slider is a range slider", function() {
beforeEach(function() {
testSlider = $("#testSlider1").slider({
value : [3, 8]
});
});
it("properly sets the values if both within the max and min", function() {
var valuesToSet = [5, 7];
testSlider.slider('setValue', valuesToSet);
var sliderValues = testSlider.slider('getValue');
expect(sliderValues[0]).toBe(valuesToSet[0]);
expect(sliderValues[1]).toBe(valuesToSet[1]);
});
describe("caps values to the min if they are set to be less than the min", function() {
var minValue = -5,
otherValue = 7;
it("first value is capped to min", function() {
testSlider.slider('setValue', [minValue, otherValue]);
var sliderValues = testSlider.slider('getValue');
expect(sliderValues[0]).toBe(0);
});
it("second value is capped to min", function() {
testSlider.slider('setValue', [otherValue, minValue]);
var sliderValues = testSlider.slider('getValue');
expect(sliderValues[1]).toBe(0);
});
});
describe("caps values to the max if they are set to be higher than the max", function() {
var maxValue = 15,
otherValue = 7;
it("first value is capped to max", function() {
testSlider.slider('setValue', [maxValue, otherValue]);
var sliderValues = testSlider.slider('getValue');
expect(sliderValues[0]).toBe(10);
});
it("second value is capped to max", function() {
testSlider.slider('setValue', [otherValue, maxValue]);
var sliderValues = testSlider.slider('getValue');
expect(sliderValues[1]).toBe(10);
});
});
describe("if either value is of invalid type", function() {
var invalidValue = "a",
otherValue = 7;
it("first value is of invalid type", function() {
var setSliderValueFn = function() {
testSlider.slider('setValue', [invalidValue, otherValue]);
};
expect(setSliderValueFn).toThrow(new Error( formatInvalidInputMsg(invalidValue) ));
});
it("second value is of invalid type", function() {
var setSliderValueFn = function() {
testSlider.slider('setValue', [otherValue, invalidValue]);
};
expect(setSliderValueFn).toThrow(new Error( formatInvalidInputMsg(invalidValue) ));
});
});
});
describe("triggerSlideEvent argument", function() {
it("if triggerSlideEvent argument is true, the 'slide' event is triggered", function() {
var testSlider = $("#testSlider1").slider({
value : 3
});
var newSliderVal = 5;
testSlider.on('slide', function(evt) {
expect(newSliderVal).toEqual(evt.value);
});
testSlider.slider('setValue', newSliderVal, true);
});
it("if triggerSlideEvent argument is false, the 'slide' event is not triggered", function() {
var newSliderVal = 5;
var slideEventTriggered = false;
var testSlider = $("#testSlider1").slider({
value : 3
});
testSlider.on('slide', function() {
slideEventTriggered = true;
});
testSlider.slider('setValue', newSliderVal, false);
expect(slideEventTriggered).toEqual(false);
});
});
describe("triggerChangeEvent argument", function() {
it("if triggerChangeEvent argument is true, the 'change' event is triggered", function() {
var testSlider = $("#testSlider1").slider({
value : 3
});
var newSliderVal = 5;
testSlider.on('change', function(evt) {
expect(newSliderVal).toEqual(evt.value.newValue);
});
testSlider.slider('setValue', newSliderVal, true);
});
it("if triggerChangeEvent argument is false, the 'change' event is not triggered", function() {
var changeEventTriggered = false;
var testSlider = $("#testSlider1").slider({
value : 3
});
testSlider.on('change', function() {
changeEventTriggered = true;
});
testSlider.slider('setValue', 5, false);
expect(changeEventTriggered).toEqual(false);
});
});
});
describe("'getValue()' tests", function() {
it("returns the current value of the slider", function() {
testSlider = $("#testSlider1").slider();
var valueToSet = 5;
testSlider.slider('setValue', valueToSet);
var sliderValue = testSlider.slider('getValue');
expect(sliderValue).toBe(valueToSet);
});
});
describe("'enable()' tests", function() {
it("correctly enables a slider", function() {
testSlider = $("#testSlider1").slider({
enabled: false
});
testSlider.slider("enable");
var isEnabled = testSlider.slider("isEnabled");
expect(isEnabled).toBeTruthy();
});
});
describe("'disable()' tests", function() {
it("correctly disable a slider", function() {
testSlider = $("#testSlider1").slider();
testSlider.slider("disable");
var isEnabled = testSlider.slider("isEnabled");
expect(isEnabled).not.toBeTruthy();
});
});
describe("'toggle()' tests", function() {
it("correctly enables a disabled slider", function() {
testSlider = $("#testSlider1").slider({
enabled: false
});
testSlider.slider("toggle");
var isEnabled = testSlider.slider("isEnabled");
expect(isEnabled).toBeTruthy();
});
it("correctly disables an enabled slider", function() {
testSlider = $("#testSlider1").slider();
testSlider.slider("toggle");
var isEnabled = testSlider.slider("isEnabled");
expect(isEnabled).not.toBeTruthy();
});
});
describe("'isEnabled()' tests", function() {
it("returns true for an enabled slider", function() {
testSlider = $("#testSlider1").slider({
id: "enabled",
enabled: true
});
var isEnabled = testSlider.slider("isEnabled");
var $slider = testSlider.siblings("#enabled");
var hasDisabledClass = $slider.hasClass("slider") && $slider.hasClass("#enabled");
expect(isEnabled).toBeTruthy();
expect(hasDisabledClass).not.toBeTruthy();
});
it("returns false for a disabled slider", function() {
testSlider = $("#testSlider1").slider({
id: "disabled",
enabled: false
});
var isEnabled = testSlider.slider("isEnabled");
var $slider = testSlider.siblings("#disabled");
var hasDisabledClass = $slider.hasClass("slider") && $slider.hasClass("slider-disabled");
expect(isEnabled).not.toBeTruthy();
expect(hasDisabledClass).toBeTruthy();
});
});
it("get attribute", function() {
testSlider = $("#testSlider1").slider();
var sliderMaxValue = testSlider.slider('getAttribute', 'max');
expect(sliderMaxValue).toBe(10);
});
it("changes slider from basic to range", function() {
testSlider = $("#makeRangeSlider").slider();
testSlider.slider('setAttribute', 'range', true).slider('refresh');
var isRangeSlider = $("#changeOrientationSlider").parent("div.slider").find('.slider-handle').last().hasClass('hide');
expect(isRangeSlider).toBeFalsy();
});
it("setAttribute: changes the 'data-slider-orientation' property from horizontal to vertical", function() {
testSlider = $("#changeOrientationSlider").slider({
id: "changeOrientationSliderElem"
});
testSlider.slider('setAttribute', 'orientation', 'vertical').slider('refresh');
var $slider = $("#changeOrientationSliderElem");
var orientationClassApplied = $slider.hasClass("slider-vertical");
expect(orientationClassApplied).toBeTruthy();
});
it("relayout: if slider is not displayed on initialization and then displayed later on, relayout() will not adjust the margin-left of the tooltip", function() {
// Setup
testSlider = new Slider("#relayoutSliderInput", {
id: "relayoutSlider",
min: 0,
max: 10,
value: 5
});
var mainTooltipDOMRef = document.querySelector("#relayoutSlider .tooltip-main");
var relayoutSliderContainerDOMRef = document.querySelector("#relayoutSliderContainer");
var tooltipMarginLeft;
// Main tooltip margin-left offset should not be set on slider intialization
tooltipMarginLeft = parseFloat(mainTooltipDOMRef.style.marginLeft);
expect(tooltipMarginLeft).toBeNaN();
// Show slider and call relayout()
relayoutSliderContainerDOMRef.style.display = "block";
testSlider.relayout();
// Main tooltip margin-left offset should not be set after relayout() is called.
tooltipMarginLeft = Math.abs( parseFloat(mainTooltipDOMRef.style.marginLeft) );
expect(tooltipMarginLeft).toBeNaN();
});
it("relayout: if slider is not displayed on initialization and then displayed later on, relayout() will re-adjust the tick label width", function() {
// Setup
testSlider = new Slider("#relayoutSliderInputTickLabels", {
id: "relayoutSliderTickLabels",
min: 0,
max: 10,
ticks: [0, 5, 10],
ticks_labels: ['low', 'mid', 'high'],
value: 5
});
var $ticks = $('#relayoutSliderTickLabels').find('.slider-tick-label');
// Tick-Width should be 0 on slider intialization
var i, $tick;
for (i = 0; i < $ticks.length; i++) {
$tick = $($ticks[i]);
expect( parseInt($tick.css('width')) ).toBe(0);
}
// Show slider and call relayout()
$('#relayoutSliderContainerTickLabels').css('display', 'block');
testSlider.relayout();
$('#relayoutSliderContainerTickLabels').css('display', 'none');
// Tick-Width should re-adjust to be > 0
for (i = 0; i < $ticks.length; i++) {
$tick = $($ticks[i]);
expect( parseInt($tick.css('width')) ).toBeGreaterThan(0);
}
});
afterEach(function() {
if(testSlider) {
if(testSlider instanceof jQuery) { testSlider.slider('destroy'); }
if(testSlider instanceof Slider) { testSlider.destroy(); }
testSlider = null;
}
});
}); | PypiClean |
/MPoL-0.1.13.tar.gz/MPoL-0.1.13/src/mpol/fourier.py | r"""The ``fourier`` module provides the core functionality of MPoL via :class:`mpol.fourier.FourierCube`."""
import numpy as np
import torch
import torch.fft # to avoid conflicts with old torch.fft *function*
import torchkbnufft
from torch import nn
from . import utils
from .coordinates import GridCoords
from .gridding import _setup_coords
class FourierCube(nn.Module):
r"""
This layer performs the FFT of an ImageCube and stores the corresponding dense FFT output as a cube. If you are using this layer in a forward-modeling RML workflow, because the FFT of the model is essentially stored as a grid, you will need to make the loss function calculation using a gridded loss function (e.g., :func:`mpol.losses.nll_gridded`) and a gridded dataset (e.g., :class:`mpol.datasets.GriddedDataset`).
Args:
cell_size (float): the width of an image-plane pixel [arcseconds]
npix (int): the number of pixels per image side
coords (GridCoords): an object already instantiated from the GridCoords class. If providing this, cannot provide ``cell_size`` or ``npix``.
"""
def __init__(self, cell_size=None, npix=None, coords=None):
super().__init__()
# we don't want to bother with the nchan argument here, so
# we don't use the convenience method _setup_coords
# and just do it manually
if coords:
assert (
npix is None and cell_size is None
), "npix and cell_size must be empty if precomputed GridCoords are supplied."
self.coords = coords
elif npix or cell_size:
assert (
coords is None
), "GridCoords must be empty if npix and cell_size are supplied."
self.coords = GridCoords(cell_size=cell_size, npix=npix)
def forward(self, cube):
"""
Perform the FFT of the image cube on each channel.
Args:
cube (torch.double tensor, of shape ``(nchan, npix, npix)``): a prepacked image cube, for example, from ImageCube.forward()
Returns:
(torch.complex tensor, of shape ``(nchan, npix, npix)``): the FFT of the image cube, in packed format.
"""
# make sure the cube is 3D
assert cube.dim() == 3, "cube must be 3D"
# the self.cell_size prefactor (in arcsec) is to obtain the correct output units
# since it needs to correct for the spacing of the input grid.
# See MPoL documentation and/or TMS Eqn A8.18 for more information.
self.vis = self.coords.cell_size**2 * torch.fft.fftn(cube, dim=(1, 2))
return self.vis
@property
def ground_vis(self):
r"""
The visibility cube in ground format cube fftshifted for plotting with ``imshow``.
Returns:
(torch.complex tensor, of shape ``(nchan, npix, npix)``): the FFT of the image cube, in sky plane format.
"""
return utils.packed_cube_to_ground_cube(self.vis)
@property
def ground_amp(self):
r"""
The amplitude of the cube, arranged in unpacked format corresponding to the FFT of the sky_cube. Array dimensions for plotting given by ``self.coords.vis_ext``.
Returns:
torch.double : 3D amplitude cube of shape ``(nchan, npix, npix)``
"""
return torch.abs(self.ground_vis)
@property
def ground_phase(self):
r"""
The phase of the cube, arranged in unpacked format corresponding to the FFT of the sky_cube. Array dimensions for plotting given by ``self.coords.vis_ext``.
Returns:
torch.double : 3D phase cube of shape ``(nchan, npix, npix)``
"""
return torch.angle(self.ground_vis)
def safe_baseline_constant_meters(uu, vv, freqs, coords, uv_cell_frac=0.05):
r"""
This routine determines whether the baselines can safely be assumed to be constant with channel when they converted from meters to units of kilolambda.
The antenna baselines *are* the same as a function of channel when they are measured in physical distance units, such as meters. However, when these baselines are converted to spatial frequency units, via
.. math::
u = \frac{D}{\lambda},
it's possible that the :math:`u` and :math:`v` values of each channel are significantly different if the :math:`\lambda` values of each channel are significantly different. This routine evaluates whether the maximum change in :math:`u` or :math:`v` across channels (when represented in kilolambda) is smaller than some threshold value, calculated as the fraction of a :math:`u,v` cell defined by ``coords``.
If this function returns ``True``, then it would be safe to proceed with parallelization in the :class:`mpol.fourier.NuFFT` layer via the coil dimension.
Args:
uu (1D np.array): a 1D array of length ``nvis`` array of the u (East-West) spatial frequency coordinate in units of [m]
vv (1D np.array): a 1D array of length ``nvis`` array of the v (North-South) spatial frequency coordinate in units of [m]
freqs (1D np.array): a 1D array of length ``nchan`` of the channel frequencies, in units of [Hz].
coords: a :class:`mpol.coordinates.GridCoords` object which represents the image and uv-grid dimensions.
uv_cell_frac (float): the maximum threshold for a change in :math:`u` or :math:`v` spatial frequency across channels, measured as a fraction of the :math:`u,v` cell defined by ``coords``.
Returns:
boolean: `True` if it is safe to assume that the baselines are constant with channel (at a tolerance of ``uv_cell_frac``.) Otherwise returns `False`.
"""
# broadcast and convert baselines to kilolambda across channel
uu, vv = utils.broadcast_and_convert_baselines(uu, vv, freqs)
# should be (nchan, nvis) arrays
# convert uv_cell_frac to a kilolambda threshold
delta_uv = uv_cell_frac * coords.du # [klambda]
# find maximum change in baseline across channel
# concatenate arrays to save steps
uv = np.array([uu, vv]) # (2, nchan, nvis) arrays
# find max - min along channel axis
uv_min = uv.min(axis=1)
uv_max = uv.max(axis=1)
uv_diff = uv_max - uv_min
# find maximum of that
max_diff = uv_diff.max()
# compare to uv_cell_frac
return max_diff < delta_uv
def safe_baseline_constant_kilolambda(uu, vv, coords, uv_cell_frac=0.05):
r"""
This routine determines whether the baselines can safely be assumed to be constant with channel, when the are represented in units of kilolambda.
Compared to :class:`mpol.fourier.safe_baseline_constant_meters`, this function works with multidimensional arrays of ``uu`` and ``vv`` that are shape (nchan, nvis) and have units of kilolambda.
If this routine returns True, then it should be safe for the user to either average the baselines across channel or simply choose a single, representative channel. This would enable parallelization in the {class}`mpol.fourier.NuFFT` via the coil dimension.
Args:
uu (1D np.array): a 1D array of length ``nvis`` array of the u (East-West) spatial frequency coordinate in units of [m]
vv (1D np.array): a 1D array of length ``nvis`` array of the v (North-South) spatial frequency coordinate in units of [m]
freqs (1D np.array): a 1D array of length ``nchan`` of the channel frequencies, in units of [Hz].
coords: a :class:`mpol.coordinates.GridCoords` object which represents the image and uv-grid dimensions.
uv_cell_frac (float): the maximum threshold for a change in :math:`u` or :math:`v` spatial frequency across channels, measured as a fraction of the :math:`u,v` cell defined by ``coords``.
Returns:
boolean: `True` if it is safe to assume that the baselines are constant with channel (at a tolerance of ``uv_cell_frac``.) Otherwise returns `False`.
"""
# convert uv_cell_frac to a kilolambda threshold
delta_uv = uv_cell_frac * coords.du # [klambda]
# find maximum change in baseline across channel
# concatenate arrays to save steps
uv = np.array([uu, vv]) # (2, nchan, nvis) arrays
# find max - min along channel axis
uv_min = uv.min(axis=1)
uv_max = uv.max(axis=1)
uv_diff = uv_max - uv_min
# find maximum of that
max_diff = uv_diff.max()
# compare to uv_cell_frac
return max_diff < delta_uv
class NuFFT(nn.Module):
r"""
This layer translates input from an :class:`mpol.images.ImageCube` directly to loose, ungridded samples of the Fourier plane, directly corresponding to the :math:`u,v` locations of the data. This layer is different than a :class:`mpol.Fourier.FourierCube` in that, rather than producing the dense cube-like output from an FFT routine, it utilizes the non-uniform FFT or 'NuFFT' to interpolate directly to discrete :math:`u,v` locations that need not correspond to grid cell centers. This is implemented using the KbNufft routines of the `TorchKbNufft <https://torchkbnufft.readthedocs.io/en/stable/index.html>`_ package.
**Dimensionality**: One consideration when using this layer is the dimensionality of your image and your visibility samples. If your image has multiple channels (``nchan > 1``), there is the possibility that the :math:`u,v` sample locations corresponding to each channel may be different. In ALMA/VLA applications, this can arise when continuum observations are taken over significant bandwidth, since the spatial frequency sampled by any pair of antennas is wavelength-dependent
.. math::
u = \frac{D}{\lambda},
where :math:`D` is the projected baseline (measured in, say, meters) and :math:`\lambda` is the observing wavelength. In this application, the image-plane model could be the same for each channel, or it may vary with channel (necessary if the spectral slope of the source is significant).
On the other hand, with spectral line observations it will usually be the case that the total bandwidth of the observations is small enough such that the :math:`u,v` sample locations could be considered as the same for each channel. In spectral line applications, the image-plane model usually varies substantially with each channel.
This layer will determine whether the spatial frequencies are treated as constant based upon the dimensionality of the ``uu`` and ``vv`` input arguments.
* If ``uu`` and ``vv`` have a shape of (``nvis``), then it will be assumed that the spatial frequencies can be treated as constant with channel (and will invoke parallelization across the image cube ``nchan`` dimension using the 'coil' dimension of the TorchKbNufft package).
* If the ``uu`` and ``vv`` have a shape of (``nchan, nvis``), then it will be assumed that the spatial frequencies are different for each channel, and the spatial frequencies provided for each channel will be used (and will invoke parallelization across the image cube ``nchan`` dimension using the 'batch' dimension of the TorchKbNufft package).
Note that there is no straightforward, computationally efficient way to proceed if there are a different number of spatial frequencies for each channel. The best approach is likely to construct ``uu`` and ``vv`` arrays that have a shape of (``nchan, nvis``), such that all channels are padded with bogus :math:`u,v` points to have the same length ``nvis``, and you create a boolean mask to keep track of which points are valid. Then, when this routine returns data points of shape (``nchan, nvis``), you can use that boolean mask to select only the valid :math:`u,v` points points.
**Interpolation mode**: You may choose the type of interpolation mode that KbNufft uses under the hood by changing the boolean value of ``sparse_matrices``. For repeated evaluations of this layer (as might exist within an optimization loop), ``sparse_matrices=True`` is likely to be the more accurate and faster choice. If ``sparse_matrices=False``, this routine will use the default table-based interpolation of TorchKbNufft. Note that as of TorchKbNuFFT version 1.4.0, sparse matrices are not yet available when parallelizing using the 'batch' dimension --- this will result in a warning.
Args:
cell_size (float): the width of an image-plane pixel [arcseconds]
npix (int): the number of pixels per image side
coords (GridCoords): an object already instantiated from the GridCoords class. If providing this, cannot provide ``cell_size`` or ``npix``.
nchan (int): the number of channels in the :class:`mpol.images.ImageCube`. Default = 1.
uu (np.array): a length ``nvis`` array (not including Hermitian pairs) of the u (East-West) spatial frequency coordinate [klambda]
vv (np.array): a length ``nvis`` array (not including Hermitian pairs) of the v (North-South) spatial frequency coordinate [klambda]
"""
def __init__(
self,
cell_size=None,
npix=None,
coords=None,
nchan=None,
uu=None,
vv=None,
sparse_matrices=True,
):
super().__init__()
_setup_coords(self, cell_size, npix, coords, nchan)
# initialize the non-uniform FFT object
self.nufft_ob = torchkbnufft.KbNufft(
im_size=(self.coords.npix, self.coords.npix)
)
if (uu is not None) and (vv is not None):
self.k_traj = self._assemble_ktraj(uu, vv)
else:
raise ValueError("uu and vv are required arguments.")
self.sparse_matrices = sparse_matrices
if self.sparse_matrices:
if self.same_uv:
# precompute the sparse interpolation matrices
self.interp_mats = torchkbnufft.calc_tensor_spmatrix(
self.k_traj, im_size=(self.coords.npix, self.coords.npix)
)
else:
import warnings
warnings.warn(
"Provided uu and vv arrays are multi-dimensional, suggesting an intent to parallelize using the 'batch' dimension. This feature is not yet available in TorchKbNuFFT v1.4.0 with sparse matrix interpolation (sparse_matrices=True), therefore we are proceeding with table interpolation (sparse_matrices=False).",
category=RuntimeWarning,
)
self.interp_mats = None
self.sparse_matrices = False
def _klambda_to_radpix(self, klambda):
"""Convert a spatial frequency in units of klambda to 'radians/sky pixel,' using the pixel cell_size provided by ``self.coords.dl``.
These concepts can be a little confusing because there are two angular measures at play.
1. The first is the normal angular sky coordinate, normally measured in arcseconds for typical sources observed by ALMA or the VLA. Arcseconds, being an angular coordinate, can equivalently be expressed in units of radians. To avoid confusion, we will call this angular measurement 'sky radians.' Alternatively, for a given image grid, this same sky coordinate could be expressed in units of sky pixels.
2. The second is the spatial frequency of some image-plane function, :math:`I_\nu(l,m)`, which we could quote in units of 'cycles per arcsecond' or 'cycles per sky pixel,' for example. With a radio interferometer, spatial frequencies are typically quoted in units of the observing wavelength, i.e., lambda or kilo-lambda. If the field of view of the image is small, thanks to the small-angle approximation, units of lambda are directly equivalent to 'cycles per sky radian.' The second angular measure comes about when converting the spatial frequency from a linear measure of frequency 'cycles per sky radian' to an angular measure of frequency 'radians per sky radian' or 'radians per sky pixel.'
The TorchKbNufft package expects k-trajectory vectors in units of 'radians per sky pixel.' This routine helps convert spatial frequencies from their default unit (kilolambda) into 'radians per sky pixel' using the pixel cell_size as provided by ``self.coords.dl``.
Args:
klambda (float): spatial frequency in units of kilolambda
Returns:
float: spatial frequency measured in units of radian per sky pixel
"""
# convert from kilolambda to cycles per sky radian
u_lam = klambda * 1e3 # [lambda, or cycles/radian]
# convert from 'cycles per sky radian' to 'radians per sky radian'
u_rad_per_rad = u_lam * 2 * np.pi # [radians / sky radian]
# size of pixel in radians
# self.coords.dl # [sky radians/pixel]
# convert from 'radians per sky radian' to 'radians per sky pixel'
u_rad_per_pix = u_rad_per_rad * self.coords.dl # [radians / pixel]
return u_rad_per_pix
def _assemble_ktraj(self, uu, vv):
r"""
This routine converts a series of :math:`u, v` coordinates into a k-trajectory vector for the torchkbnufft routines. The dimensionality of the k-trajectory vector will influence how TorchKbNufft will perform the operations.
* If ``uu`` and ``vv`` have a 1D shape of (``nvis``), then it will be assumed that the spatial frequencies can be treated as constant with channel. This will result in a ``k_traj`` vector that has shape (``2, nvis``), such that parallelization will be across the image cube ``nchan`` dimension using the 'coil' dimension of the TorchKbNufft package.
* If the ``uu`` and ``vv`` have a 2D shape of (``nchan, nvis``), then it will be assumed that the spatial frequencies are different for each channel, and the spatial frequencies provided for each channel will be used. This will result in a ``k_traj`` vector that has shape (``nchan, 2, nvis``), such that parallelization will be across the image cube ``nchan`` dimension using the 'batch' dimension of the TorchKbNufft package.
Args:
uu (1D or 2D numpy array): u (East-West) spatial frequency coordinate [klambda]
vv (1D or 2D numpy array): v (North-South) spatial frequency coordinate [klambda]
Returns:
k_traj (torch tensor): a k-trajectory vector with shape
"""
uu_radpix = self._klambda_to_radpix(uu)
vv_radpix = self._klambda_to_radpix(vv)
# if uu and vv are 1D dimension, then we can assume that we will parallelize across the coil dimension.
# otherwise, we assume that we will parallelize across the batch dimension.
self.same_uv = len(uu.shape) == 1
if self.same_uv:
# k-trajectory needs to be packed the way the image is packed (y,x), so
# the trajectory needs to be packed (v, u)
# if TorchKbNufft receives a k-traj tensor of shape (2, nvis), it will parallelize across the coil dimension, assuming
# that the k-traj is the same for all coils/channels.
# interim convert to numpy array because of torch warning about speed
k_traj = torch.tensor(np.array([vv_radpix, uu_radpix]))
else:
# in this case, we are given two tensors of shape (nchan, nvis)
# first, augment each tensor individually to create a (nbatch, 1, nvis) tensor
# then, concatenate the tensors along the axis=1 dimension.
assert (
uu_radpix.shape[0] == self.nchan
), "nchan of uu ({:}) is more than one but different than that used to initialize the NuFFT layer ({:})".format(
uu_radpix.shape[0], self.nchan
)
assert (
vv_radpix.shape[0] == self.nchan
), "nchan of vv ({:}) is more than one but different than that used to initialize the NuFFT layer ({:})".format(
vv_radpix.shape[0], self.nchan
)
uu_radpix_aug = torch.unsqueeze(torch.tensor(uu_radpix), 1)
vv_radpix_aug = torch.unsqueeze(torch.tensor(vv_radpix), 1)
# interim convert to numpy array because of torch warning about speed
k_traj = torch.cat([vv_radpix_aug, uu_radpix_aug], axis=1)
# if TorchKbNufft receives a k-traj tensor of shape (nbatch, 2, nvis), it will parallelize across the batch dimension
return k_traj
def forward(self, cube):
r"""
Perform the FFT of the image cube for each channel and interpolate to the ``uu`` and ``vv`` points set at layer initialization. This call should automatically take the best parallelization option as indicated by the shape of the ``uu`` and ``vv`` points.
Args:
cube (torch.double tensor): of shape ``(nchan, npix, npix)``). The cube should be a "prepacked" image cube, for example, from :meth:`mpol.images.ImageCube.forward`
Returns:
torch.complex tensor: of shape ``(nchan, nvis)``, Fourier samples evaluated corresponding to the ``uu``, ``vv`` points set at initialization.
"""
# make sure that the nchan assumptions for the ImageCube and the NuFFT setup are the same
assert (
cube.shape[0] == self.nchan
), "nchan of ImageCube ({:}) is different than that used to initialize NuFFT layer ({:})".format(
cube.shape[0], self.nchan
)
# "unpack" the cube, but leave it flipped
# NuFFT routine expects a "normal" cube, not an fftshifted one
shifted = torch.fft.fftshift(cube, dim=(1, 2))
# convert the cube to a complex type, since this is required by TorchKbNufft
complexed = shifted.type(torch.complex128)
# Consider how the similarity of the spatial frequency samples should be treated. We already took care of this on the k_traj side, since we set the shapes. But this also needs to be taken care of on the image side.
# * If we plan to parallelize using the batch dimension, then we need an image with shape (nchan, 1, npix, npix).
# * If we plan to parallelize with the coil dimension, then we need an image with shape (1, nchan, npix, npix).
if self.same_uv:
# expand the cube to include a batch dimension
expanded = complexed.unsqueeze(0)
# now [1, nchan, npix, npix] shape
else:
expanded = complexed.unsqueeze(1)
# now [nchan, 1, npix, npix] shape
# torchkbnufft uses a [nbatch, ncoil, npix, npix] scheme
if self.sparse_matrices:
output = self.coords.cell_size**2 * self.nufft_ob(
expanded, self.k_traj, interp_mats=self.interp_mats
)
else:
output = self.coords.cell_size**2 * self.nufft_ob(expanded, self.k_traj)
if self.same_uv:
# nchan took on the ncoil position, so remove the nbatch dimension
output = torch.squeeze(output, dim=0)
else:
# nchan took on the nbatch position, so remove the ncoil dimension
output = torch.squeeze(output, dim=1)
return output
def make_fake_data(imageCube, uu, vv, weight):
r"""
Create a fake dataset from a supplied :class:`mpol.images.ImageCube`. See :ref:`mock-dataset-label` for more details on how to prepare a generic image for use in an :class:`~mpol.images.ImageCube`.
The provided visibilities can be 1d for a single continuum channel, or 2d for image cube. If 1d, visibilities will be converted to 2d arrays of shape ``(1, nvis)``.
Args:
imageCube (:class:`~mpol.images.ImageCube`): the image layer to put into a fake dataset
uu (numpy array): array of u spatial frequency coordinates, not including Hermitian pairs. Units of [:math:`\mathrm{k}\lambda`]
vv (numpy array): array of v spatial frequency coordinates, not including Hermitian pairs. Units of [:math:`\mathrm{k}\lambda`]
weight (2d numpy array): length array of thermal weights :math:`w_i = 1/\sigma_i^2`. Units of [:math:`1/\mathrm{Jy}^2`]
Returns:
(2-tuple): a two tuple of the fake data. The first array is the mock dataset including noise, the second array is the mock dataset without added noise.
"""
# instantiate a NuFFT object based on the ImageCube
# OK if uu shape (nvis,)
nufft = NuFFT(coords=imageCube.coords, nchan=imageCube.nchan, uu=uu, vv=vv)
# make into a multi-channel dataset, even if only a single-channel provided
if uu.ndim == 1:
uu = np.atleast_2d(uu)
vv = np.atleast_2d(vv)
weight = np.atleast_2d(weight)
# carry it forward to the visibilities, which will be (nchan, nvis)
vis_noiseless = nufft.forward(imageCube.forward()).detach().numpy()
# generate complex noise
sigma = 1 / np.sqrt(weight)
noise = np.random.normal(
loc=0, scale=sigma, size=uu.shape
) + 1.0j * np.random.normal(loc=0, scale=sigma, size=uu.shape)
# add to data
vis_noise = vis_noiseless + noise
return vis_noise, vis_noiseless | PypiClean |
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_budgets/post.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from firefly_iii_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from firefly_iii_client import schemas # noqa: F401
from firefly_iii_client.model.budget_store import BudgetStore
from firefly_iii_client.model.budget_single import BudgetSingle
from firefly_iii_client.model.validation_error import ValidationError
from firefly_iii_client.model.unauthenticated import Unauthenticated
from firefly_iii_client.model.bad_request import BadRequest
from firefly_iii_client.model.internal_exception import InternalException
from firefly_iii_client.model.not_found import NotFound
from . import path
# Header params
XTraceIdSchema = schemas.UUIDSchema
RequestRequiredHeaderParams = typing_extensions.TypedDict(
'RequestRequiredHeaderParams',
{
}
)
RequestOptionalHeaderParams = typing_extensions.TypedDict(
'RequestOptionalHeaderParams',
{
'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ],
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_x_trace_id = api_client.HeaderParameter(
name="X-Trace-Id",
style=api_client.ParameterStyle.SIMPLE,
schema=XTraceIdSchema,
)
# body param
SchemaForRequestBodyApplicationJson = BudgetStore
SchemaForRequestBodyApplicationXWwwFormUrlencoded = BudgetStore
request_body_budget_store = api_client.RequestBody(
content={
'application/json': api_client.MediaType(
schema=SchemaForRequestBodyApplicationJson),
'application/x-www-form-urlencoded': api_client.MediaType(
schema=SchemaForRequestBodyApplicationXWwwFormUrlencoded),
},
required=True,
)
_auth = [
'firefly_iii_auth',
]
SchemaFor200ResponseBodyApplicationVndApijson = BudgetSingle
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationVndApijson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/vnd.api+json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationVndApijson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthenticated
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor404ResponseBodyApplicationJson = NotFound
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor404ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
content={
'application/json': api_client.MediaType(
schema=SchemaFor404ResponseBodyApplicationJson),
},
)
SchemaFor422ResponseBodyApplicationJson = ValidationError
@dataclass
class ApiResponseFor422(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor422ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_422 = api_client.OpenApiResponse(
response_cls=ApiResponseFor422,
content={
'application/json': api_client.MediaType(
schema=SchemaFor422ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalException
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
'401': _response_for_401,
'404': _response_for_404,
'422': _response_for_422,
'500': _response_for_500,
}
_all_accept_content_types = (
'application/vnd.api+json',
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _store_budget_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _store_budget_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: typing_extensions.Literal["application/x-www-form-urlencoded"],
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _store_budget_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _store_budget_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _store_budget_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _store_budget_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = 'application/json',
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Store a new budget
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params)
used_path = path.value
_headers = HTTPHeaderDict()
for parameter in (
request_header_x_trace_id,
):
parameter_data = header_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_budget_store.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class StoreBudget(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def store_budget(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def store_budget(
self,
body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: typing_extensions.Literal["application/x-www-form-urlencoded"],
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def store_budget(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def store_budget(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def store_budget(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def store_budget(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = 'application/json',
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._store_budget_oapg(
body=body,
header_params=header_params,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: typing_extensions.Literal["application/x-www-form-urlencoded"],
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = 'application/json',
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._store_budget_oapg(
body=body,
header_params=header_params,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/encoding/digests/MD5.js | define("dojox/encoding/digests/MD5",["./_base"],function(_1){
var _2=8;
function R(n,c){
return (n<<c)|(n>>>(32-c));
};
function C(q,a,b,x,s,t){
return _1.addWords(R(_1.addWords(_1.addWords(a,q),_1.addWords(x,t)),s),b);
};
function FF(a,b,c,d,x,s,t){
return C((b&c)|((~b)&d),a,b,x,s,t);
};
function GG(a,b,c,d,x,s,t){
return C((b&d)|(c&(~d)),a,b,x,s,t);
};
function HH(a,b,c,d,x,s,t){
return C(b^c^d,a,b,x,s,t);
};
function II(a,b,c,d,x,s,t){
return C(c^(b|(~d)),a,b,x,s,t);
};
function _3(x,_4){
x[_4>>5]|=128<<((_4)%32);
x[(((_4+64)>>>9)<<4)+14]=_4;
var a=1732584193;
var b=-271733879;
var c=-1732584194;
var d=271733878;
for(var i=0;i<x.length;i+=16){
var _5=a;
var _6=b;
var _7=c;
var _8=d;
a=FF(a,b,c,d,x[i+0],7,-680876936);
d=FF(d,a,b,c,x[i+1],12,-389564586);
c=FF(c,d,a,b,x[i+2],17,606105819);
b=FF(b,c,d,a,x[i+3],22,-1044525330);
a=FF(a,b,c,d,x[i+4],7,-176418897);
d=FF(d,a,b,c,x[i+5],12,1200080426);
c=FF(c,d,a,b,x[i+6],17,-1473231341);
b=FF(b,c,d,a,x[i+7],22,-45705983);
a=FF(a,b,c,d,x[i+8],7,1770035416);
d=FF(d,a,b,c,x[i+9],12,-1958414417);
c=FF(c,d,a,b,x[i+10],17,-42063);
b=FF(b,c,d,a,x[i+11],22,-1990404162);
a=FF(a,b,c,d,x[i+12],7,1804603682);
d=FF(d,a,b,c,x[i+13],12,-40341101);
c=FF(c,d,a,b,x[i+14],17,-1502002290);
b=FF(b,c,d,a,x[i+15],22,1236535329);
a=GG(a,b,c,d,x[i+1],5,-165796510);
d=GG(d,a,b,c,x[i+6],9,-1069501632);
c=GG(c,d,a,b,x[i+11],14,643717713);
b=GG(b,c,d,a,x[i+0],20,-373897302);
a=GG(a,b,c,d,x[i+5],5,-701558691);
d=GG(d,a,b,c,x[i+10],9,38016083);
c=GG(c,d,a,b,x[i+15],14,-660478335);
b=GG(b,c,d,a,x[i+4],20,-405537848);
a=GG(a,b,c,d,x[i+9],5,568446438);
d=GG(d,a,b,c,x[i+14],9,-1019803690);
c=GG(c,d,a,b,x[i+3],14,-187363961);
b=GG(b,c,d,a,x[i+8],20,1163531501);
a=GG(a,b,c,d,x[i+13],5,-1444681467);
d=GG(d,a,b,c,x[i+2],9,-51403784);
c=GG(c,d,a,b,x[i+7],14,1735328473);
b=GG(b,c,d,a,x[i+12],20,-1926607734);
a=HH(a,b,c,d,x[i+5],4,-378558);
d=HH(d,a,b,c,x[i+8],11,-2022574463);
c=HH(c,d,a,b,x[i+11],16,1839030562);
b=HH(b,c,d,a,x[i+14],23,-35309556);
a=HH(a,b,c,d,x[i+1],4,-1530992060);
d=HH(d,a,b,c,x[i+4],11,1272893353);
c=HH(c,d,a,b,x[i+7],16,-155497632);
b=HH(b,c,d,a,x[i+10],23,-1094730640);
a=HH(a,b,c,d,x[i+13],4,681279174);
d=HH(d,a,b,c,x[i+0],11,-358537222);
c=HH(c,d,a,b,x[i+3],16,-722521979);
b=HH(b,c,d,a,x[i+6],23,76029189);
a=HH(a,b,c,d,x[i+9],4,-640364487);
d=HH(d,a,b,c,x[i+12],11,-421815835);
c=HH(c,d,a,b,x[i+15],16,530742520);
b=HH(b,c,d,a,x[i+2],23,-995338651);
a=II(a,b,c,d,x[i+0],6,-198630844);
d=II(d,a,b,c,x[i+7],10,1126891415);
c=II(c,d,a,b,x[i+14],15,-1416354905);
b=II(b,c,d,a,x[i+5],21,-57434055);
a=II(a,b,c,d,x[i+12],6,1700485571);
d=II(d,a,b,c,x[i+3],10,-1894986606);
c=II(c,d,a,b,x[i+10],15,-1051523);
b=II(b,c,d,a,x[i+1],21,-2054922799);
a=II(a,b,c,d,x[i+8],6,1873313359);
d=II(d,a,b,c,x[i+15],10,-30611744);
c=II(c,d,a,b,x[i+6],15,-1560198380);
b=II(b,c,d,a,x[i+13],21,1309151649);
a=II(a,b,c,d,x[i+4],6,-145523070);
d=II(d,a,b,c,x[i+11],10,-1120210379);
c=II(c,d,a,b,x[i+2],15,718787259);
b=II(b,c,d,a,x[i+9],21,-343485551);
a=_1.addWords(a,_5);
b=_1.addWords(b,_6);
c=_1.addWords(c,_7);
d=_1.addWords(d,_8);
}
return [a,b,c,d];
};
function _9(_a,_b){
var wa=_1.stringToWord(_b);
if(wa.length>16){
wa=_3(wa,_b.length*_2);
}
var l=[],r=[];
for(var i=0;i<16;i++){
l[i]=wa[i]^909522486;
r[i]=wa[i]^1549556828;
}
var h=_3(l.concat(_1.stringToWord(_a)),512+_a.length*_2);
return _3(r.concat(h),640);
};
_1.MD5=function(_c,_d){
var _e=_d||_1.outputTypes.Base64;
var wa=_3(_1.stringToWord(_c),_c.length*_2);
switch(_e){
case _1.outputTypes.Raw:
return wa;
case _1.outputTypes.Hex:
return _1.wordToHex(wa);
case _1.outputTypes.String:
return _1.wordToString(wa);
default:
return _1.wordToBase64(wa);
}
};
_1.MD5._hmac=function(_f,key,_10){
var out=_10||_1.outputTypes.Base64;
var wa=_9(_f,key);
switch(out){
case _1.outputTypes.Raw:
return wa;
case _1.outputTypes.Hex:
return _1.wordToHex(wa);
case _1.outputTypes.String:
return _1.wordToString(wa);
default:
return _1.wordToBase64(wa);
}
};
return _1.MD5;
}); | PypiClean |
/360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/wptoolkit.py |
from prettytable import PrettyTable
from .config import Config
from .servers import Servers
class WPToolkit(object):
def __init__(self, config: Config):
self.config = config
self.table = PrettyTable(field_names=['ID', 'Server name', 'WP sites', 'Alive', 'Outdated', 'Outdated PHP', 'Broken'])
self.table.align['ID'] = 'l'
self.table.align['Server name'] = 'l'
self.table.min_width['Server name'] = 24
self.table.align['WP sites'] = 'r'
self.table.align['Alive'] = 'r'
self.table.align['Outdated'] = 'r'
self.table.align['Outdated PHP'] = 'r'
self.table.align['Broken'] = 'r'
self.num_servers_with_wpt = 0
self.sum_wp_sites_total = 0
self.sum_wp_sites_alive = 0
self.sum_wp_sites_outdated = 0
self.sum_wp_sites_outdated_php = 0
self.sum_wp_sites_broken = 0
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
# add summary row as table footer
self.table.add_row(['', 'Sum of ' + str(self.num_servers_with_wpt) + ' servers', self.sum_wp_sites_total, self.sum_wp_sites_alive, self.sum_wp_sites_outdated, self.sum_wp_sites_outdated_php, self.sum_wp_sites_broken])
if self.config.hide_ids:
self.table.del_column('ID')
# Get string to be printed and create list of elements separated by \n
list_of_table_lines = self.table.get_string().split('\n')
# remember summary row
summary_line = list_of_table_lines[-2]
# remove summary row again to allow sorting and limiting
self.table.del_row(len(self.table.rows)-1)
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
list_of_table_lines = self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit).split('\n')
else:
list_of_table_lines = self.table.get_string(sortby=sort, reversesort=reverse).split('\n')
# Sorting by multiple columns could be done like this
# list_of_table_lines = self.table.get_string(sortby=("Col Name 1", "Col Name 2")), reversesort=reverse).split('\n')
# Print the table
print('\n'.join(list_of_table_lines))
print(summary_line)
print(list_of_table_lines[0])
def print(self, format: str = 'table', issuesOnly: bool = False, sort: str = '', reverse: bool = False, limit: int = 0):
"""Iterate through all servers and aggregate metrics for those that have WP Toolkit installed"""
servers = Servers(self.config)
if servers.fetchData():
for server in servers.servers:
id = server['id']
name = server['name']
last_data = server['last_data']
if last_data and 'wp-toolkit' in last_data:
wpt_data = last_data['wp-toolkit']
if wpt_data:
wp_sites_total = wpt_data['WordPress Websites']
wp_sites_alive = wpt_data['WordPress Websites - Alive']
wp_sites_outdated = wpt_data['WordPress Websites - Outdated']
wp_sites_outdated_php = wpt_data['WordPress Websites - Outdated PHP']
wp_sites_broken = wpt_data['WordPress Websites - Broken']
self.num_servers_with_wpt += 1
self.sum_wp_sites_total += wp_sites_total
self.sum_wp_sites_alive += wp_sites_alive
self.sum_wp_sites_outdated += wp_sites_outdated
self.sum_wp_sites_outdated_php += wp_sites_outdated_php
self.sum_wp_sites_broken += wp_sites_broken
if wp_sites_outdated > 0 or wp_sites_outdated_php > 0 or wp_sites_broken > 0 or not issuesOnly:
self.table.add_row([id, name, wp_sites_total, wp_sites_alive, wp_sites_outdated, wp_sites_outdated_php, wp_sites_broken])
if (format == 'table'):
self.printFooter(sort=sort, reverse=reverse, limit=limit)
elif (format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter)) | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/Reward/_alertCostScore.py |
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.Reward._newRenewableSourcesUsageScore import _NewRenewableSourcesUsageScore
from grid2op.dtypes import dt_float
from grid2op.Exceptions import Grid2OpException
import warnings
#TODO
# Test this class comprehensively if usage is revived.
# Was originally thought for use in L2RPN 2023 Competition, but eventually not selected for use.
# Tests were disregarded at some stage of these developments.
class _AlertCostScore(BaseReward):
"""
INTERNAL
.. danger:: This function is not used and not tested
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
It **must not** serve as a reward. This scored needs to be **MAXIMIZED**,
as it is a negative! Also, this "reward" is not scaled or anything. Use it as your
own risk.
Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this reward is based on the "alert feature"
where the agent is asked to send information about potential line overload issue on the grid after unpredictable powerline
disconnection (attack of the opponent).
The alerts are assessed once per attack. In this scheme, this "reward" computed the assistant"cost score", which penalized the number of alerts
the assistant have produced during an episode. It should not be used to train an agent.
For information, it will not be used for the L2RPN_IDF_2023 competition.
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(-1.0)
self.reward_max = dt_float(1.0)
self._is_simul_env = False
self.total_nb_alertes_possible = None
self.total_nb_alerts = None
warnings.warn("This class is not tested, use it with care")
def initialize(self, env):
if not env.dim_alerts > 0:
raise Grid2OpException(
'Impossible to use the "_AlertCostScore" with an environment for which the Assistant feature '
'is disabled. Please make sure "env.dim_alerts" is > 0 or '
"change the reward class with `grid2op.make(..., reward_class=AnyOtherReward)`"
)
self.reset(env)
def reset(self, env):
self._is_simul_env = self.is_simulated_env(env)
if self._is_simul_env:
return
#self.total_nb_alertes_possible = (env.chronics_handler.max_timestep() + 1) * (env.dim_alerts)
self.total_nb_alerts = 0
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if self._is_simul_env:
return dt_float(0.)
if is_done:
self.total_nb_alertes_possible = env.nb_time_step * env.dim_alerts
ratio_nb_alerts = 100 * ( 1 - self.total_nb_alerts / self.total_nb_alertes_possible)
return self._penalization_fun(ratio_nb_alerts)
else:
self.total_nb_alerts = env._total_number_of_alert
return dt_float(0.)
@staticmethod
def _penalization_fun(x, center=80):
return _NewRenewableSourcesUsageScore._surlinear_func_curtailment(x=x, center=center) | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.